├── .ackrc ├── .editorconfig ├── .envrc ├── .github └── workflows │ └── rust.yml ├── .gitignore ├── .hooks ├── install.sh └── pre-commit ├── .travis.yml ├── Cargo.lock ├── Cargo.toml ├── LICENSE ├── README.md ├── RELEASE-NOTES.md ├── TODO.md ├── benches └── benches.rs ├── contrib ├── check-api-stablity.sh ├── electrs.service └── popular-scripts.txt ├── doc ├── schema.md └── usage.md ├── electrs_macros ├── Cargo.toml └── src │ └── lib.rs ├── flake.lock ├── flake.nix ├── rocksdb-overlay.nix ├── rust-toolchain.toml ├── scripts ├── local-electrum.bash └── run.sh ├── src ├── app.rs ├── bin │ ├── electrs.rs │ ├── popular-scripts.rs │ └── tx-fingerprint-stats.rs ├── chain.rs ├── config.rs ├── daemon.rs ├── electrum │ ├── client.rs │ ├── discovery.rs │ ├── discovery │ │ └── default_servers.rs │ ├── mod.rs │ └── server.rs ├── elements │ ├── asset.rs │ ├── mod.rs │ ├── peg.rs │ └── registry.rs ├── errors.rs ├── lib.rs ├── metrics.rs ├── new_index │ ├── db.rs │ ├── fetch.rs │ ├── mempool.rs │ ├── mod.rs │ ├── precache.rs │ ├── query.rs │ ├── schema.rs │ └── zmq.rs ├── otlp_trace.rs ├── rest.rs ├── signal.rs └── util │ ├── bincode.rs │ ├── block.rs │ ├── electrum_merkle.rs │ ├── fees.rs │ ├── mod.rs │ ├── script.rs │ └── transaction.rs ├── tests ├── common.rs ├── electrum.rs └── rest.rs └── tools ├── addr.py ├── client.py ├── mempool.py └── xpub.py /.ackrc: -------------------------------------------------------------------------------- 1 | --ignore-dir=target 2 | --ignore-dir=db 3 | -------------------------------------------------------------------------------- /.editorconfig: -------------------------------------------------------------------------------- 1 | # see https://editorconfig.org for more options, and setup instructions for yours editor 2 | 3 | [*] 4 | indent_style = space 5 | indent_size = 4 6 | 7 | -------------------------------------------------------------------------------- /.envrc: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if ! has nix_direnv_version || ! nix_direnv_version 2.2.1; then 4 | source_url "https://raw.githubusercontent.com/nix-community/nix-direnv/2.2.1/direnvrc" "sha256-zelF0vLbEl5uaqrfIzbgNzJWGmLzCmYAkInj/LNxvKs=" 5 | fi 6 | 7 | watch_file rust-toolchain.toml 8 | use flake 9 | -------------------------------------------------------------------------------- /.github/workflows/rust.yml: -------------------------------------------------------------------------------- 1 | name: CI 2 | 3 | on: 4 | push: 5 | branches: 6 | - new-index 7 | pull_request: {} 8 | 9 | env: 10 | CARGO_TERM_COLOR: always 11 | 12 | jobs: 13 | check: 14 | runs-on: ubuntu-22.04 15 | steps: 16 | - uses: actions/checkout@v4 17 | - uses: dtolnay/rust-toolchain@1.75.0 18 | - uses: Swatinem/rust-cache@v2 19 | - name: Run cargo check --all-targets 20 | run: cargo check --all-targets 21 | 22 | test: 23 | runs-on: ubuntu-22.04 24 | steps: 25 | - run: sudo apt-get update && sudo apt-get install libfuse2 26 | - uses: actions/checkout@v4 27 | - uses: dtolnay/rust-toolchain@1.75.0 28 | - uses: Swatinem/rust-cache@v2 29 | - name: Run tests (Bitcoin mode, REST+Electrum) 30 | run: RUST_LOG=debug cargo test 31 | 32 | test-electrum-raw: 33 | runs-on: ubuntu-22.04 34 | steps: 35 | - uses: actions/checkout@v4 36 | - uses: dtolnay/rust-toolchain@1.75.0 37 | - uses: Swatinem/rust-cache@v2 38 | - name: Run test test_electrum_raw 39 | run: RUST_LOG=debug cargo test -- --include-ignored test_electrum_raw 40 | 41 | test-liquid: 42 | runs-on: ubuntu-22.04 43 | steps: 44 | - uses: actions/checkout@v4 45 | - uses: dtolnay/rust-toolchain@1.75.0 46 | - uses: Swatinem/rust-cache@v2 47 | - name: Run tests (Liquid mode, REST) 48 | run: RUST_LOG=debug cargo test --features liquid 49 | 50 | nix: 51 | runs-on: ubuntu-latest 52 | steps: 53 | - uses: actions/checkout@v4 54 | - uses: DeterminateSystems/nix-installer-action@main 55 | - run: nix build . 56 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | target 2 | *db/ 3 | *.log 4 | *.sublime* 5 | *~ 6 | *.pyc 7 | result 8 | .direnv 9 | -------------------------------------------------------------------------------- /.hooks/install.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | cd `dirname $0`/../.git/hooks/ 4 | ln -s ../../.hooks/pre-commit 5 | -------------------------------------------------------------------------------- /.hooks/pre-commit: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | CARGO_FMT="cargo +stable fmt --all" 4 | 5 | $CARGO_FMT --version &>/dev/null 6 | if [ $? != 0 ]; then 7 | printf "[pre_commit] \033[0;31merror\033[0m: \"$CARGO_FMT\" not available?\n" 8 | exit 1 9 | fi 10 | 11 | $CARGO_FMT -- --check 12 | result=$? 13 | 14 | printf "[pre_commit] $CARGO_FMT → " 15 | if [ $result != 0 ]; then 16 | printf "\033[0;31merror\033[0m \n" 17 | else 18 | printf "\033[0;32mOK\033[0m \n" 19 | fi 20 | 21 | exit $result 22 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: rust 2 | 3 | rust: 4 | - stable 5 | 6 | cache: cargo 7 | 8 | before_script: 9 | - rustup component add rustfmt-preview 10 | 11 | script: 12 | - cargo fmt --all -- --check 13 | - cargo check --all 14 | - cargo build --all 15 | - cargo test --all 16 | - cargo build --features "liquid" --all 17 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | workspace = { members = ["electrs_macros"] } 2 | 3 | [package] 4 | name = "electrs" 5 | version = "0.4.1" 6 | authors = ["Roman Zeyde "] 7 | description = "An efficient re-implementation of Electrum Server in Rust" 8 | license = "MIT" 9 | homepage = "https://github.com/romanz/electrs" 10 | repository = "https://github.com/romanz/electrs" 11 | keywords = ["bitcoin", "electrum", "server", "index", "database"] 12 | documentation = "https://docs.rs/electrs/" 13 | readme = "README.md" 14 | edition = "2018" 15 | default-run = "electrs" 16 | 17 | [features] 18 | liquid = ["elements"] 19 | electrum-discovery = ["electrum-client"] 20 | bench = [] 21 | otlp-tracing = [ 22 | "tracing", 23 | "tracing-subscriber", 24 | "opentelemetry", 25 | "tracing-opentelemetry", 26 | "opentelemetry-otlp", 27 | "opentelemetry-semantic-conventions", 28 | "electrs_macros/otlp-tracing" 29 | ] 30 | 31 | [dependencies] 32 | arraydeque = "0.5.1" 33 | arrayref = "0.3.6" 34 | base64 = "0.22" 35 | bincode = "1.3.1" 36 | bitcoin = { version = "0.32", features = ["serde"] } 37 | clap = "2.33.3" 38 | crossbeam-channel = "0.5.0" 39 | dirs = "5.0.1" 40 | elements = { version = "0.25", features = ["serde"], optional = true } 41 | error-chain = "0.12.4" 42 | glob = "0.3" 43 | itertools = "0.12" 44 | lazy_static = "1.3.0" 45 | libc = "0.2.81" 46 | log = "0.4.11" 47 | socket2 = { version = "0.5.3", features = ["all"] } 48 | num_cpus = "1.12.0" 49 | page_size = "0.6.0" 50 | prometheus = "0.13" 51 | rayon = "1.5.0" 52 | rocksdb = "0.21" 53 | rust-crypto = "0.2" 54 | serde = "1.0.118" 55 | serde_derive = "1.0.118" 56 | serde_json = "1.0.60" 57 | signal-hook = "0.3" 58 | stderrlog = "0.6" 59 | sysconf = ">=0.3.4" 60 | time = { version = "0.3", features = ["formatting"] } 61 | tiny_http = "0.12.0" 62 | url = "2.2.0" 63 | hyper = "0.14" 64 | hyperlocal = "0.8" 65 | # close to same tokio version as dependent by hyper v0.14 and hyperlocal 0.8 -- things can go awry if they mismatch 66 | tokio = { version = "1", features = ["sync", "macros", "rt-multi-thread", "rt"] } 67 | opentelemetry = { version = "0.20.0", features = ["rt-tokio"], optional = true } 68 | tracing-opentelemetry = { version = "0.21.0", optional = true } 69 | opentelemetry-otlp = { version = "0.13.0", default-features = false, features = ["http-proto", "reqwest-client"], optional = true } 70 | tracing-subscriber = { version = "0.3.17", default-features = false, features = ["env-filter", "fmt"], optional = true } 71 | opentelemetry-semantic-conventions = { version = "0.12.0", optional = true } 72 | tracing = { version = "0.1.40", default-features = false, features = ["attributes"], optional = true } 73 | 74 | # optional dependencies for electrum-discovery 75 | electrum-client = { version = "0.8", optional = true } 76 | zmq = "0.10.0" 77 | electrs_macros = { path = "electrs_macros", default-features = false } 78 | 79 | [dev-dependencies] 80 | bitcoind = { version = "0.36", features = ["25_0"] } 81 | elementsd = { version = "0.11", features = ["22_1_1"] } 82 | electrumd = { version = "0.1.0", features = ["4_5_4"] } 83 | ureq = { version = "2.9", default-features = false, features = ["json"] } 84 | tempfile = "3.10" 85 | criterion = { version = "0.4", features = ["html_reports"] } 86 | bitcoin-test-data = { version = "*" } 87 | 88 | [[bench]] 89 | name = "benches" 90 | harness = false 91 | required-features = ["bench"] 92 | 93 | 94 | [profile.release] 95 | lto = true 96 | panic = 'abort' 97 | codegen-units = 1 98 | 99 | [patch.crates-io.electrum-client] 100 | git = "https://github.com/Blockstream/rust-electrum-client" 101 | rev = "d3792352992a539afffbe11501d1aff9fd5b919d" # add-peer branch 102 | 103 | # not yet published on crates.io 104 | [patch.crates-io.electrumd] 105 | git = "https://github.com/shesek/electrumd" 106 | rev = "b35d9db285d932cb3c2296beab65e571a2506349" 107 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (C) 2018, Roman Zeyde. 2 | 3 | Permission is hereby granted, free of charge, to any person obtaining a copy 4 | of this software and associated documentation files (the "Software"), to deal 5 | in the Software without restriction, including without limitation the rights 6 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 | copies of the Software, and to permit persons to whom the Software is 8 | furnished to do so, subject to the following conditions: 9 | 10 | The above copyright notice and this permission notice shall be included in 11 | all copies or substantial portions of the Software. 12 | 13 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 19 | THE SOFTWARE. 20 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Esplora - Electrs backend API 2 | 3 | A block chain index engine and HTTP API written in Rust based on [romanz/electrs](https://github.com/romanz/electrs). 4 | 5 | Used as the backend for the [Esplora block explorer](https://github.com/Blockstream/esplora) powering [blockstream.info](https://blockstream.info/). 6 | 7 | API documentation [is available here](https://github.com/blockstream/esplora/blob/master/API.md). 8 | 9 | Documentation for the database schema and indexing process [is available here](doc/schema.md). 10 | 11 | ### Installing & indexing 12 | 13 | Install Rust, Bitcoin Core (no `txindex` needed) and the `clang` and `cmake` packages, increase maximum number open files by `ulimit -n 100000` and then: 14 | 15 | ```bash 16 | $ git clone https://github.com/blockstream/electrs && cd electrs 17 | $ git checkout new-index 18 | $ cargo run --release --bin electrs -- -vvvv --daemon-dir ~/.bitcoin 19 | 20 | # Or for liquid: 21 | $ cargo run --features liquid --release --bin electrs -- -vvvv --network liquid --daemon-dir ~/.liquid 22 | ``` 23 | 24 | See [electrs's original documentation](https://github.com/romanz/electrs/blob/master/doc/usage.md) for more detailed instructions. 25 | Note that our indexes are incompatible with electrs's and has to be created separately. 26 | 27 | The indexes require 610GB of storage after running compaction (as of June 2020), but you'll need to have 28 | free space of about double that available during the index compaction process. 29 | Creating the indexes should take a few hours on a beefy machine with SSD. 30 | 31 | To deploy with Docker, follow the [instructions here](https://github.com/Blockstream/esplora#how-to-build-the-docker-image). 32 | 33 | ### Light mode 34 | 35 | For personal or low-volume use, you may set `--lightmode` to reduce disk storage requirements 36 | by roughly 50% at the cost of slower and more expensive lookups. 37 | 38 | With this option set, raw transactions and metadata associated with blocks will not be kept in rocksdb 39 | (the `T`, `X` and `M` indexes), 40 | but instead queried from bitcoind on demand. 41 | 42 | ### Notable changes from Electrs: 43 | 44 | - HTTP REST API in addition to the Electrum JSON-RPC protocol, with extended transaction information 45 | (previous outputs, spending transactions, script asm and more). 46 | 47 | - Extended indexes and database storage for improved performance under high load: 48 | 49 | - A full transaction store mapping txids to raw transactions is kept in the database under the prefix `t`. 50 | - An index of all spendable transaction outputs is kept under the prefix `O`. 51 | - An index of all addresses (encoded as string) is kept under the prefix `a` to enable by-prefix address search. 52 | - A map of blockhash to txids is kept in the database under the prefix `X`. 53 | - Block stats metadata (number of transactions, size and weight) is kept in the database under the prefix `M`. 54 | 55 | With these new indexes, bitcoind is no longer queried to serve user requests and is only polled 56 | periodically for new blocks and for syncing the mempool. 57 | 58 | - Support for Liquid and other Elements-based networks, including CT, peg-in/out and multi-asset. 59 | (requires enabling the `liquid` feature flag using `--features liquid`) 60 | 61 | ### CLI options 62 | 63 | In addition to electrs's original configuration options, a few new options are also available: 64 | 65 | - `--http-addr ` - HTTP server address/port to listen on (default: `127.0.0.1:3000`). 66 | - `--lightmode` - enable light mode (see above) 67 | - `--cors ` - origins allowed to make cross-site request (optional, defaults to none). 68 | - `--address-search` - enables the by-prefix address search index. 69 | - `--index-unspendables` - enables indexing of provably unspendable outputs. 70 | - `--utxos-limit ` - maximum number of utxos to return per address. 71 | - `--electrum-txs-limit ` - maximum number of txs to return per address in the electrum server (does not apply for the http api). 72 | - `--electrum-banner ` - welcome banner text for electrum server. 73 | 74 | Additional options with the `liquid` feature: 75 | - `--parent-network ` - the parent network this chain is pegged to. 76 | 77 | Additional options with the `electrum-discovery` feature: 78 | - `--electrum-hosts ` - a json map of the public hosts where the electrum server is reachable, in the [`server.features` format](https://electrumx.readthedocs.io/en/latest/protocol-methods.html#server.features). 79 | - `--electrum-announce` - announce the electrum server on the electrum p2p server discovery network. 80 | 81 | See `$ cargo run --release --bin electrs -- --help` for the full list of options. 82 | 83 | ## License 84 | 85 | MIT 86 | -------------------------------------------------------------------------------- /RELEASE-NOTES.md: -------------------------------------------------------------------------------- 1 | # 0.4.1 (14 Oct 2018) 2 | 3 | * Don't run full compaction after initial import is over (when using JSONRPC) 4 | 5 | # 0.4.0 (22 Sep 2018) 6 | 7 | * Optimize for low-memory systems by using different RocksDB settings 8 | * Rename `--skip_bulk_import` flag to `--jsonrpc-import` 9 | 10 | # 0.3.2 (14 Sep 2018) 11 | 12 | * Optimize block headers processing during startup 13 | * Handle TCP disconnections during long RPCs 14 | * Use # of CPUs for bulk indexing threads 15 | * Update rust-bitcoin to 0.14 16 | * Optimize block headers processing during startup 17 | 18 | 19 | # 0.3.1 (20 Aug 2018) 20 | 21 | * Reconnect to bitcoind only on transient errors 22 | * Poll mempool after transaction broadcasting 23 | 24 | # 0.3.0 (14 Aug 2018) 25 | 26 | * Optimize for low-memory systems 27 | * Improve compaction performance 28 | * Handle disconnections from bitcoind by retrying 29 | * Make `blk*.dat` ingestion more robust 30 | * Support regtest network 31 | * Support more Electrum RPC methods 32 | * Export more Prometheus metrics (CPU, RAM, file descriptors) 33 | * Add `scripts/run.sh` for building and running `electrs` 34 | * Add some Python tools (as API usage examples) 35 | * Change default Prometheus monitoring ports 36 | 37 | # 0.2.0 (14 Jul 2018) 38 | 39 | * Allow specifying custom bitcoind data directory 40 | * Allow specifying JSONRPC cookie from commandline 41 | * Improve initial bulk indexing performance 42 | * Support 32-bit systems 43 | 44 | # 0.1.0 (2 Jul 2018) 45 | 46 | * Announcement: https://lists.linuxfoundation.org/pipermail/bitcoin-dev/2018-July/016190.html 47 | * Published to https://crates.io/electrs and https://docs.rs/electrs 48 | -------------------------------------------------------------------------------- /TODO.md: -------------------------------------------------------------------------------- 1 | # Electrum 2 | 3 | * Snapshot DB after successful indexing - and run queries on the latest snapshot 4 | * Update height to -1 for txns with any [unconfirmed input](https://electrumx.readthedocs.io/en/latest/protocol-basics.html#status) 5 | 6 | # Rust 7 | 8 | * Use [bytes](https://carllerche.github.io/bytes/bytes/index.html) instead of `Vec` when possible 9 | * Use generators instead of vectors 10 | * Use proper HTTP parser for JSONRPC replies over persistent connection 11 | 12 | # Performance 13 | 14 | * Consider https://github.com/facebook/rocksdb/wiki/RocksDB-Tuning-Guide#difference-of-spinning-disk 15 | -------------------------------------------------------------------------------- /benches/benches.rs: -------------------------------------------------------------------------------- 1 | use bitcoin::{consensus::Decodable, Block}; 2 | use criterion::{black_box, criterion_group, criterion_main, Criterion}; 3 | use electrs::new_index::schema::bench::*; 4 | 5 | fn criterion_benchmark(c: &mut Criterion) { 6 | c.bench_function("add_blocks", |b| { 7 | let block_bytes = bitcoin_test_data::blocks::mainnet_702861(); 8 | let block = Block::consensus_decode(&mut &block_bytes[..]).unwrap(); 9 | let data = Data::new(block); 10 | // TODO use iter_batched to avoid measuring cloning inputs 11 | 12 | b.iter(move || black_box(add_blocks(&data))) 13 | }); 14 | } 15 | 16 | criterion_group!(benches, criterion_benchmark); 17 | criterion_main!(benches); 18 | -------------------------------------------------------------------------------- /contrib/check-api-stablity.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -eo pipefail 4 | shopt -s expand_aliases 5 | 6 | # A simple script for checking HTTP API stability by comparing the responses 7 | # between two running electrs instances connected to a regtest node backend. 8 | 9 | : ${NODE_DIR?missing NODE_DIR} # for bitcoind/elementds 10 | : ${ELECTRS1_URL?missing ELECTRS1_URL} 11 | : ${ELECTRS2_URL?missing ELECTRS2_URL} 12 | # Set ELEMENTS_CHAIN for Elements-based chains (e.g. to 'elementsregtest') 13 | 14 | alias cli="$([[ -z $ELEMENTS_CHAIN ]] && echo bitcoin-cli -regtest || echo elements-cli -chain=$ELEMENTS_CHAIN) -datadir=$NODE_DIR" 15 | 16 | check() { 17 | echo "Checking GET $1 ..." 18 | local res1=$(curl -f -s "$ELECTRS1_URL$1" || echo "Request to ELECTRS1 failed") 19 | local res2=$(curl -f -s "$ELECTRS2_URL$1" || echo "Request to ELECTRS2 failed") 20 | { if [[ "$res1" = "{"* || "$res1" = "["* ]]; then 21 | # Use `jq` for canonicalized ordering and to display a diff of beautified JSON 22 | local sort_arr='walk(if type == "array" then sort else . end)' 23 | diff -u1 <(jq --sort-keys "$sort_arr" <<< $res1) <(jq --sort-keys "$sort_arr" <<< $res2) 24 | else 25 | diff -u1 <(echo "$res1") <(echo "$res2") 26 | fi } && echo OK || echo No match 27 | } 28 | 29 | sync() { pkill -USR1 electrs; sleep 1; } 30 | 31 | # Ensure both electrs instances are connected to the same node backend 32 | check /blocks/tip/hash 33 | 34 | # Send an unconfirmed transaction 35 | address=$(cli getnewaddress) 36 | txid=$(cli sendtoaddress $address 1.234) 37 | sync 38 | check /address/$address 39 | check /address/$address/txs 40 | check /address/$address/utxo 41 | check /tx/$txid 42 | check /mempool 43 | check /mempool/recent 44 | 45 | # Mine a block confirming the transaction 46 | blockhash=$(cli -generate 1 | jq -r .blocks[0]) 47 | sync 48 | check /block/$blockhash 49 | check /block/$blockhash/txs 50 | check /blocks 51 | check /address/$address 52 | check /address/$address/txs 53 | check /address/$address/utxo 54 | check /tx/$txid 55 | 56 | # Elements-only tests 57 | if [[ -n $ELEMENTS_CHAIN ]]; then 58 | # Test non-confidential transaction 59 | uc_address=$(cli getaddressinfo $address | jq -r .unconfidential) 60 | uc_txid=$(cli sendtoaddress $uc_address 5.678) 61 | sync 62 | check /address/$uc_address 63 | check /tx/$uc_txid 64 | 65 | # Test asset issuance (blinded w/o contract hash & unblinded w/ contract hash) 66 | asset1=$(cli issueasset 10 20 true) 67 | asset2=$(cli issueasset 30 40 false 3333333333333333333333333333333333333333333333333333333333333333) 68 | sync 69 | check_asset() { 70 | check /asset/$(jq -r .asset <<< $1) 71 | check /tx/$(jq -r .txid <<< $1) # issuance tx 72 | } 73 | check_asset "$asset1" 74 | check_asset "$asset2" 75 | cli -generate 1 > /dev/null 76 | sync 77 | check_asset "$asset1" 78 | check_asset "$asset2" 79 | 80 | # Test transactions transferring an issued asset (confidential & non-confidential) 81 | asset1_id=$(jq -r .asset <<< $asset1) 82 | asset2_id=$(jq -r .asset <<< $asset2) 83 | i_txid=$(cli -named sendtoaddress address=$address amount=0.987 assetlabel=$asset1_id) 84 | i_uc_txid=$(cli -named sendtoaddress address=$uc_address amount=0.654 assetlabel=$asset2_id) 85 | sync 86 | check /tx/$i_txid 87 | check /tx/$i_uc_txid 88 | check /address/$uc_address 89 | check /address/$uc_address/utxo 90 | 91 | # Test issuance with no reissuance tokens 92 | check_asset "$(cli issueasset 10 0 false && sync)" 93 | fi -------------------------------------------------------------------------------- /contrib/electrs.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Electrum Rust Server 3 | 4 | [Service] 5 | Type=simple 6 | ExecStart=/path/to/electrs/target/release/electrs -vvvv --db-dir /path/to/electrs/db/ 7 | Restart=on-failure 8 | RestartSec=60 9 | Environment="RUST_BACKTRACE=1" 10 | 11 | # Hardening measures 12 | PrivateTmp=true 13 | ProtectSystem=full 14 | NoNewPrivileges=true 15 | MemoryDenyWriteExecute=true 16 | 17 | [Install] 18 | WantedBy=multi-user.target 19 | -------------------------------------------------------------------------------- /doc/schema.md: -------------------------------------------------------------------------------- 1 | # Index Schema 2 | 3 | The index is stored as three RocksDB databases: 4 | 5 | - `txstore` 6 | - `history` 7 | - `cache` 8 | 9 | ### Indexing process 10 | 11 | The indexing is done in the two phase, where each can be done concurrently within itself. 12 | The first phase populates the `txstore` database, the second phase populates the `history` database. 13 | 14 | NOTE: in order to construct the history rows for spending inputs in phase #2, we rely on having the transactions being processed at phase #1, so they can be looked up efficiently (using parallel point lookups). 15 | 16 | After the indexing is completed, both funding and spending are indexed as independent rows under `H{scripthash}`, so that they can be queried in-order in one go. 17 | 18 | ### `txstore` 19 | 20 | Each block results in the following new rows: 21 | 22 | * `"B{blockhash}" → "{header}"` 23 | 24 | * `"X{blockhash}" → "{txids}"` (list of txids included in the block) 25 | 26 | * `"M{blockhash}" → "{metadata}"` (block weight, size and number of txs) 27 | 28 | * `"D{blockhash}" → ""` (signifies the block is done processing) 29 | 30 | Each transaction results in the following new rows: 31 | 32 | * `"T{txid}" → "{serialized-transaction}"` 33 | 34 | * `"C{txid}{confirmed-blockhash}" → ""` (a list of blockhashes where `txid` was seen to be confirmed) 35 | 36 | Each output results in the following new row: 37 | 38 | * `"O{txid}{vout}" → "{scriptpubkey}{value}"` 39 | 40 | When the indexer is synced up to the tip of the chain, the hash of the tip is saved as following: 41 | 42 | * `"t" → "{blockhash}"` 43 | 44 | ### `history` 45 | 46 | Each funding output (except for provably unspendable ones when `--index-unspendables` is not enabled) results in the following new rows (`H` is for history, `F` is for funding): 47 | 48 | * `"H{funding-scripthash}{funding-height}F{funding-txid:vout}{value}" → ""` 49 | * `"a{funding-address-str}" → ""` (for prefix address search, only saved when `--address-search` is enabled) 50 | 51 | Each spending input (except the coinbase) results in the following new rows (`S` is for spending): 52 | 53 | * `"H{funding-scripthash}{spending-height}S{spending-txid:vin}{funding-txid:vout}{value}" → ""` 54 | 55 | * `"S{funding-txid:vout}{spending-txid:vin}" → ""` 56 | 57 | #### Elements only 58 | 59 | Assets (re)issuances results in the following new rows (only for user-issued assets): 60 | 61 | * `"i{asset-id}" → "{issuing-txid:vin}{prev-txid:vout}{issuance}{reissuance_token}"` 62 | * `"I{asset-id}{issuance-height}I{issuing-txid:vin}{is_reissuance}{amount}{tokens}" → ""` 63 | 64 | Peg-ins/peg-outs results in the following new rows (only for the native asset, typically L-BTC): 65 | 66 | * `"I{asset-id}{pegin-height}F{pegin-txid:vin}{value}" → ""` 67 | * `"I{asset-id}{pegout-height}F{pegout-txid:vout}{value}" → ""` 68 | 69 | Every burn (unspendable output) results in the following new row (both user-issued and native): 70 | 71 | * `"I{asset-id}{burn-height}F{burning-txid:vout}{value}" → ""` 72 | 73 | ### `cache` 74 | 75 | Holds a cache for aggregated stats and unspent TXOs of scripthashes. 76 | 77 | The cache is created on-demand, the first time the scripthash is requested by a user. 78 | 79 | The cached data is kept next to the `blockhash` the cache is up-to-date for. 80 | When requesting data, the cache is updated with the new history rows added since the `blockhash`. 81 | If the `blockhash` was since orphaned, the cache is removed and re-computed. 82 | 83 | * `"A{scripthash}" → "{stats}{blockhash}"` (where `stats` is composed of `tx_count`, `funded_txo_{count,sum}` and `spent_txo_{count,sum}`) 84 | 85 | * `"U{scripthash}" → "{utxo}{blockhash}"` (where `utxo` is a set of `(txid,vout)` outpoints) 86 | 87 | #### Elements only: 88 | 89 | Stats for issued assets: 90 | * `"z{asset-id}" → "{issued_stats}{blockhash}"` (where `issued_stats` is composed of `tx_count`, `issuance_count`, `issued_amount`, `burned_amount`, `has_blinded_issuances`, `reissuance_tokens`, `burned_reissuance_tokens`) 91 | 92 | Stats for the native asset: 93 | * `"z{issued-asset}" → "{native_stats}{blockhash}"` (where `native_stats` is composed of `tx_count`, `peg_in_count`, `peg_in_amount`, `peg_out_count`, `peg_out_amount`, `burn_count` and `burn_amount`) 94 | -------------------------------------------------------------------------------- /doc/usage.md: -------------------------------------------------------------------------------- 1 | ## Installation 2 | 3 | Install [latest Rust](https://rustup.rs/) (1.31+), 4 | [latest Bitcoin Core](https://bitcoincore.org/en/download/) (0.16+) 5 | and [latest Electrum wallet](https://electrum.org/#download) (3.2+). 6 | 7 | Also, install the following packages (on Debian): 8 | ```bash 9 | $ sudo apt update 10 | $ sudo apt install clang cmake # for building 'rust-rocksdb' 11 | ``` 12 | 13 | ## Build 14 | 15 | First build should take ~20 minutes: 16 | ```bash 17 | $ cargo build --release 18 | ``` 19 | 20 | 21 | ## Bitcoind configuration 22 | 23 | Allow Bitcoin daemon to sync before starting Electrum server: 24 | ```bash 25 | $ bitcoind -server=1 -txindex=0 -prune=0 26 | ``` 27 | 28 | If you are using `-rpcuser=USER` and `-rpcpassword=PASSWORD` for authentication, please use `--cookie="USER:PASSWORD"` command-line flag. 29 | Otherwise, [`~/.bitcoin/.cookie`](https://github.com/bitcoin/bitcoin/blob/0212187fc624ea4a02fc99bc57ebd413499a9ee1/contrib/debian/examples/bitcoin.conf#L70-L72) will be read, allowing this server to use bitcoind JSONRPC interface. 30 | 31 | ## Usage 32 | 33 | First index sync should take ~1.5 hours: 34 | ```bash 35 | $ cargo run --release -- -vvv --timestamp --db-dir ./db [--cookie="USER:PASSWORD"] 36 | 2018-08-17T18:27:42 - INFO - NetworkInfo { version: 179900, subversion: "/Satoshi:0.17.99/" } 37 | 2018-08-17T18:27:42 - INFO - BlockchainInfo { chain: "main", blocks: 537204, headers: 537204, bestblockhash: "0000000000000000002956768ca9421a8ddf4e53b1d81e429bd0125a383e3636", pruned: false, initialblockdownload: false } 38 | 2018-08-17T18:27:42 - DEBUG - opening DB at "./db/mainnet" 39 | 2018-08-17T18:27:42 - DEBUG - full compaction marker: None 40 | 2018-08-17T18:27:42 - INFO - listing block files at "/home/user/.bitcoin/blocks/blk*.dat" 41 | 2018-08-17T18:27:42 - INFO - indexing 1348 blk*.dat files 42 | 2018-08-17T18:27:42 - DEBUG - found 0 indexed blocks 43 | 2018-08-17T18:27:55 - DEBUG - applying 537205 new headers from height 0 44 | 2018-08-17T19:31:01 - DEBUG - no more blocks to index 45 | 2018-08-17T19:31:03 - DEBUG - no more blocks to index 46 | 2018-08-17T19:31:03 - DEBUG - last indexed block: best=0000000000000000002956768ca9421a8ddf4e53b1d81e429bd0125a383e3636 height=537204 @ 2018-08-17T15:24:02Z 47 | 2018-08-17T19:31:05 - DEBUG - opening DB at "./db/mainnet" 48 | 2018-08-17T19:31:06 - INFO - starting full compaction 49 | 2018-08-17T19:58:19 - INFO - finished full compaction 50 | 2018-08-17T19:58:19 - INFO - enabling auto-compactions 51 | 2018-08-17T19:58:19 - DEBUG - opening DB at "./db/mainnet" 52 | 2018-08-17T19:58:26 - DEBUG - applying 537205 new headers from height 0 53 | 2018-08-17T19:58:27 - DEBUG - downloading new block headers (537205 already indexed) from 000000000000000000150d26fcc38b8c3b71ae074028d1d50949ef5aa429da00 54 | 2018-08-17T19:58:27 - INFO - best=000000000000000000150d26fcc38b8c3b71ae074028d1d50949ef5aa429da00 height=537218 @ 2018-08-17T16:57:50Z (14 left to index) 55 | 2018-08-17T19:58:28 - DEBUG - applying 14 new headers from height 537205 56 | 2018-08-17T19:58:29 - INFO - RPC server running on 127.0.0.1:50001 57 | ``` 58 | 59 | The index database is stored here: 60 | ```bash 61 | $ du db/ 62 | 38G db/mainnet/ 63 | ``` 64 | 65 | ## Electrum client 66 | ```bash 67 | # Connect only to the local server, for better privacy 68 | $ ./scripts/local-electrum.bash 69 | + ADDR=127.0.0.1 70 | + PORT=50001 71 | + PROTOCOL=t 72 | + electrum --oneserver --server=127.0.0.1:50001:t 73 | 74 | ``` 75 | 76 | In order to use a secure connection, TLS-terminating proxy (e.g. [hitch](https://github.com/varnish/hitch)) is recommended: 77 | ```bash 78 | $ hitch --backend=[127.0.0.1]:50001 --frontent=[127.0.0.1]:50002 pem_file 79 | $ electrum --oneserver --server=127.0.0.1:50002:s 80 | ``` 81 | 82 | ## Docker 83 | ```bash 84 | $ docker build -t electrs-app . 85 | $ docker run --network host \ 86 | --volume /home/roman/.bitcoin:/home/user/.bitcoin:ro \ 87 | --volume $PWD:/home/user \ 88 | --rm -i -t electrs-app 89 | ``` 90 | 91 | ## Monitoring 92 | 93 | Indexing and serving metrics are exported via [Prometheus](https://github.com/pingcap/rust-prometheus): 94 | 95 | ```bash 96 | $ sudo apt install prometheus 97 | $ echo " 98 | scrape_configs: 99 | - job_name: electrs 100 | static_configs: 101 | - targets: ['localhost:4224'] 102 | " | sudo tee -a /etc/prometheus/prometheus.yml 103 | $ sudo systemctl restart prometheus 104 | $ firefox 'http://localhost:9090/graph?g0.range_input=1h&g0.expr=index_height&g0.tab=0' 105 | ``` 106 | -------------------------------------------------------------------------------- /electrs_macros/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "electrs_macros" 3 | version = "0.1.0" 4 | edition = "2021" 5 | 6 | [lib] 7 | proc-macro = true 8 | 9 | [features] 10 | otlp-tracing = [] 11 | 12 | [dependencies] 13 | syn = "2.0" 14 | quote = "1.0" 15 | proc-macro2 = "1.0" 16 | -------------------------------------------------------------------------------- /electrs_macros/src/lib.rs: -------------------------------------------------------------------------------- 1 | use proc_macro::TokenStream; 2 | 3 | 4 | #[proc_macro_attribute] 5 | #[cfg(feature = "otlp-tracing")] 6 | pub fn trace(attr: TokenStream, item: TokenStream) -> TokenStream { 7 | use quote::quote; 8 | use syn::{parse_macro_input, ItemFn}; 9 | 10 | let additional_fields = if !attr.is_empty() { 11 | let attr_tokens: proc_macro2::TokenStream = attr.into(); 12 | quote! {, #attr_tokens } 13 | } else { 14 | quote! {} 15 | }; 16 | 17 | let function = parse_macro_input!(item as ItemFn); 18 | 19 | let fields_tokens = quote! { 20 | fields(module = module_path!(), file = file!(), line = line!() #additional_fields) 21 | }; 22 | 23 | let expanded = quote! { 24 | #[tracing::instrument(skip_all, #fields_tokens)] 25 | #function 26 | }; 27 | 28 | expanded.into() 29 | } 30 | 31 | #[proc_macro_attribute] 32 | #[cfg(not(feature = "otlp-tracing"))] 33 | pub fn trace(_attr: TokenStream, item: TokenStream) -> TokenStream { 34 | item 35 | } -------------------------------------------------------------------------------- /flake.lock: -------------------------------------------------------------------------------- 1 | { 2 | "nodes": { 3 | "crane": { 4 | "inputs": { 5 | "nixpkgs": [ 6 | "nixpkgs" 7 | ] 8 | }, 9 | "locked": { 10 | "lastModified": 1711586303, 11 | "narHash": "sha256-iZDHWTqQj6z6ccqTSEOPOxQ8KMFAemInUObN2R9vHSs=", 12 | "owner": "ipetkov", 13 | "repo": "crane", 14 | "rev": "a329cd00398379c62e76fc3b8d4ec2934260d636", 15 | "type": "github" 16 | }, 17 | "original": { 18 | "owner": "ipetkov", 19 | "repo": "crane", 20 | "type": "github" 21 | } 22 | }, 23 | "flake-utils": { 24 | "inputs": { 25 | "systems": "systems" 26 | }, 27 | "locked": { 28 | "lastModified": 1710146030, 29 | "narHash": "sha256-SZ5L6eA7HJ/nmkzGG7/ISclqe6oZdOZTNoesiInkXPQ=", 30 | "owner": "numtide", 31 | "repo": "flake-utils", 32 | "rev": "b1d9ab70662946ef0850d488da1c9019f3a9752a", 33 | "type": "github" 34 | }, 35 | "original": { 36 | "owner": "numtide", 37 | "repo": "flake-utils", 38 | "type": "github" 39 | } 40 | }, 41 | "nixpkgs": { 42 | "locked": { 43 | "lastModified": 1711523803, 44 | "narHash": "sha256-UKcYiHWHQynzj6CN/vTcix4yd1eCu1uFdsuarupdCQQ=", 45 | "owner": "NixOS", 46 | "repo": "nixpkgs", 47 | "rev": "2726f127c15a4cc9810843b96cad73c7eb39e443", 48 | "type": "github" 49 | }, 50 | "original": { 51 | "owner": "NixOS", 52 | "ref": "nixos-unstable", 53 | "repo": "nixpkgs", 54 | "type": "github" 55 | } 56 | }, 57 | "root": { 58 | "inputs": { 59 | "crane": "crane", 60 | "flake-utils": "flake-utils", 61 | "nixpkgs": "nixpkgs", 62 | "rust-overlay": "rust-overlay" 63 | } 64 | }, 65 | "rust-overlay": { 66 | "inputs": { 67 | "flake-utils": [ 68 | "flake-utils" 69 | ], 70 | "nixpkgs": [ 71 | "nixpkgs" 72 | ] 73 | }, 74 | "locked": { 75 | "lastModified": 1711592024, 76 | "narHash": "sha256-oD4OJ3TRmVrbAuKZWxElRCyCagNCDuhfw2exBmNOy48=", 77 | "owner": "oxalica", 78 | "repo": "rust-overlay", 79 | "rev": "aa858717377db2ed8ffd2d44147d907baee656e5", 80 | "type": "github" 81 | }, 82 | "original": { 83 | "owner": "oxalica", 84 | "repo": "rust-overlay", 85 | "type": "github" 86 | } 87 | }, 88 | "systems": { 89 | "locked": { 90 | "lastModified": 1681028828, 91 | "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=", 92 | "owner": "nix-systems", 93 | "repo": "default", 94 | "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e", 95 | "type": "github" 96 | }, 97 | "original": { 98 | "owner": "nix-systems", 99 | "repo": "default", 100 | "type": "github" 101 | } 102 | } 103 | }, 104 | "root": "root", 105 | "version": 7 106 | } 107 | -------------------------------------------------------------------------------- /flake.nix: -------------------------------------------------------------------------------- 1 | { 2 | inputs = { 3 | nixpkgs.url = "github:NixOS/nixpkgs/nixos-unstable"; 4 | flake-utils.url = "github:numtide/flake-utils"; 5 | rust-overlay = { 6 | url = "github:oxalica/rust-overlay"; 7 | inputs = { 8 | nixpkgs.follows = "nixpkgs"; 9 | flake-utils.follows = "flake-utils"; 10 | }; 11 | }; 12 | crane = { 13 | url = "github:ipetkov/crane"; 14 | inputs = { 15 | nixpkgs.follows = "nixpkgs"; 16 | }; 17 | }; 18 | }; 19 | outputs = { self, nixpkgs, flake-utils, rust-overlay, crane }: 20 | flake-utils.lib.eachDefaultSystem 21 | (system: 22 | let 23 | overlays = [ 24 | (import rust-overlay) 25 | (import ./rocksdb-overlay.nix) 26 | ]; 27 | pkgs = import nixpkgs { 28 | inherit system overlays; 29 | }; 30 | rustToolchain = pkgs.pkgsBuildHost.rust-bin.fromRustupToolchainFile ./rust-toolchain.toml; 31 | 32 | craneLib = (crane.mkLib pkgs).overrideToolchain rustToolchain; 33 | 34 | src = craneLib.cleanCargoSource ./.; 35 | 36 | nativeBuildInputs = with pkgs; [ rustToolchain clang ]; # required only at build time 37 | buildInputs = with pkgs; [ ]; # also required at runtime 38 | 39 | envVars = 40 | { 41 | LIBCLANG_PATH = "${pkgs.libclang.lib}/lib"; 42 | ELEMENTSD_SKIP_DOWNLOAD = true; 43 | BITCOIND_SKIP_DOWNLOAD = true; 44 | ELECTRUMD_SKIP_DOWNLOAD = true; 45 | 46 | # link rocksdb dynamically 47 | ROCKSDB_INCLUDE_DIR = "${pkgs.rocksdb}/include"; 48 | ROCKSDB_LIB_DIR = "${pkgs.rocksdb}/lib"; 49 | 50 | # for integration testing 51 | BITCOIND_EXE = "${pkgs.bitcoind}/bin/bitcoind"; 52 | ELEMENTSD_EXE = "${pkgs.elementsd}/bin/elementsd"; 53 | ELECTRUMD_EXE = "${pkgs.electrum}/bin/electrum"; 54 | }; 55 | 56 | commonArgs = { 57 | inherit src buildInputs nativeBuildInputs; 58 | } // envVars; 59 | 60 | cargoArtifacts = craneLib.buildDepsOnly commonArgs; 61 | bin = craneLib.buildPackage (commonArgs // { 62 | inherit cargoArtifacts; 63 | }); 64 | binLiquid = craneLib.buildPackage (commonArgs // { 65 | inherit cargoArtifacts; 66 | cargoExtraArgs = "--features liquid"; 67 | }); 68 | 69 | in 70 | with pkgs; 71 | { 72 | packages = 73 | { 74 | # that way we can build `bin` specifically, 75 | # but it's also the default. 76 | inherit bin binLiquid; 77 | default = bin; 78 | }; 79 | 80 | apps."blockstream-electrs-liquid" = { 81 | type = "app"; 82 | program = "${binLiquid}/bin/electrs"; 83 | }; 84 | apps."blockstream-electrs" = { 85 | type = "app"; 86 | program = "${bin}/bin/electrs"; 87 | }; 88 | 89 | devShells.default = mkShell (envVars // { 90 | inputsFrom = [ bin ]; 91 | }); 92 | } 93 | ); 94 | } 95 | -------------------------------------------------------------------------------- /rocksdb-overlay.nix: -------------------------------------------------------------------------------- 1 | final: prev: { 2 | 3 | rocksdb = prev.rocksdb.overrideAttrs (oldAttrs: rec { 4 | version = "8.1.1"; 5 | 6 | src = final.fetchFromGitHub { 7 | owner = "facebook"; 8 | repo = oldAttrs.pname; 9 | rev = "v${version}"; 10 | hash = "sha256-79hRtc5QSWLLyjRGCmuYZSoIc9IcIsnl8UCinz2sVw4="; 11 | }; 12 | }); 13 | } 14 | -------------------------------------------------------------------------------- /rust-toolchain.toml: -------------------------------------------------------------------------------- 1 | [toolchain] 2 | channel = "1.75.0" 3 | components = [ 4 | "cargo", 5 | "clippy", 6 | "rust-src", 7 | "rust-std", 8 | "rustc", 9 | "rustfmt" 10 | ] 11 | -------------------------------------------------------------------------------- /scripts/local-electrum.bash: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -eux 3 | 4 | ADDR=127.0.0.1 # localhost 5 | PORT=50001 # default mainnet Electrum RPC port 6 | PROTOCOL=t # TCP (no SSL) 7 | 8 | # Use only local Electrum server: 9 | electrum --oneserver --server="$ADDR:$PORT:$PROTOCOL" $* 10 | -------------------------------------------------------------------------------- /scripts/run.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -eu 3 | trap 'kill $(jobs -p)' EXIT 4 | 5 | DELAY=5 6 | LOG=/tmp/electrs.log 7 | CARGO="cargo +stable" 8 | 9 | tail -v -n0 -F "$LOG" & 10 | 11 | export RUST_BACKTRACE=1 12 | while : 13 | do 14 | $CARGO fmt 15 | $CARGO check --release 16 | $CARGO run --release -- $* 2>> "$LOG" 17 | echo "Restarting in $DELAY seconds..." 18 | sleep $DELAY 19 | done 20 | -------------------------------------------------------------------------------- /src/app.rs: -------------------------------------------------------------------------------- 1 | use bitcoin::hashes::sha256d::Hash as Sha256dHash; 2 | use std::sync::{Arc, Mutex}; 3 | 4 | use crate::{daemon, index, signal::Waiter, store}; 5 | 6 | use crate::errors::*; 7 | 8 | pub struct App { 9 | store: store::DBStore, 10 | index: index::Index, 11 | daemon: daemon::Daemon, 12 | tip: Mutex, 13 | } 14 | 15 | impl App { 16 | pub fn new( 17 | store: store::DBStore, 18 | index: index::Index, 19 | daemon: daemon::Daemon, 20 | ) -> Result> { 21 | Ok(Arc::new(App { 22 | store, 23 | index, 24 | daemon: daemon.reconnect()?, 25 | tip: Mutex::new(Sha256dHash::default()), 26 | })) 27 | } 28 | 29 | fn write_store(&self) -> &store::WriteStore { 30 | &self.store 31 | } 32 | // TODO: use index for queries. 33 | pub fn read_store(&self) -> &store::ReadStore { 34 | &self.store 35 | } 36 | pub fn index(&self) -> &index::Index { 37 | &self.index 38 | } 39 | pub fn daemon(&self) -> &daemon::Daemon { 40 | &self.daemon 41 | } 42 | 43 | pub fn update(&self, signal: &Waiter) -> Result { 44 | let mut tip = self.tip.lock().expect("failed to lock tip"); 45 | let new_block = *tip != self.daemon().getbestblockhash()?; 46 | if new_block { 47 | *tip = self.index().update(self.write_store(), &signal)?; 48 | } 49 | Ok(new_block) 50 | } 51 | } 52 | -------------------------------------------------------------------------------- /src/bin/electrs.rs: -------------------------------------------------------------------------------- 1 | extern crate error_chain; 2 | #[macro_use] 3 | extern crate log; 4 | 5 | extern crate electrs; 6 | 7 | use crossbeam_channel::{self as channel}; 8 | use error_chain::ChainedError; 9 | use std::process; 10 | use std::sync::{Arc, RwLock}; 11 | use std::time::Duration; 12 | 13 | use electrs::{ 14 | config::Config, 15 | daemon::Daemon, 16 | electrum::RPC as ElectrumRPC, 17 | errors::*, 18 | metrics::Metrics, 19 | new_index::{precache, zmq, ChainQuery, FetchFrom, Indexer, Mempool, Query, Store}, 20 | rest, 21 | signal::Waiter, 22 | }; 23 | 24 | #[cfg(feature = "otlp-tracing")] 25 | use electrs::otlp_trace; 26 | 27 | #[cfg(feature = "liquid")] 28 | use electrs::elements::AssetRegistry; 29 | use electrs::metrics::MetricOpts; 30 | 31 | fn fetch_from(config: &Config, store: &Store) -> FetchFrom { 32 | let mut jsonrpc_import = config.jsonrpc_import; 33 | if !jsonrpc_import { 34 | // switch over to jsonrpc after the initial sync is done 35 | jsonrpc_import = store.done_initial_sync(); 36 | } 37 | 38 | if jsonrpc_import { 39 | // slower, uses JSONRPC (good for incremental updates) 40 | FetchFrom::Bitcoind 41 | } else { 42 | // faster, uses blk*.dat files (good for initial indexing) 43 | FetchFrom::BlkFiles 44 | } 45 | } 46 | 47 | fn run_server(config: Arc) -> Result<()> { 48 | let (block_hash_notify, block_hash_receive) = channel::bounded(1); 49 | let signal = Waiter::start(block_hash_receive); 50 | let metrics = Metrics::new(config.monitoring_addr); 51 | metrics.start(); 52 | 53 | if let Some(zmq_addr) = config.zmq_addr.as_ref() { 54 | zmq::start(&format!("tcp://{zmq_addr}"), block_hash_notify); 55 | } 56 | 57 | let daemon = Arc::new(Daemon::new( 58 | &config.daemon_dir, 59 | &config.blocks_dir, 60 | config.daemon_rpc_addr, 61 | config.daemon_parallelism, 62 | config.cookie_getter(), 63 | config.network_type, 64 | signal.clone(), 65 | &metrics, 66 | )?); 67 | let store = Arc::new(Store::open(&config.db_path.join("newindex"), &config)); 68 | let mut indexer = Indexer::open( 69 | Arc::clone(&store), 70 | fetch_from(&config, &store), 71 | &config, 72 | &metrics, 73 | ); 74 | let mut tip = indexer.update(&daemon)?; 75 | 76 | let chain = Arc::new(ChainQuery::new( 77 | Arc::clone(&store), 78 | Arc::clone(&daemon), 79 | &config, 80 | &metrics, 81 | )); 82 | 83 | if let Some(ref precache_file) = config.precache_scripts { 84 | let precache_scripthashes = precache::scripthashes_from_file(precache_file.to_string()) 85 | .expect("cannot load scripts to precache"); 86 | precache::precache(&chain, precache_scripthashes); 87 | } 88 | 89 | let mempool = Arc::new(RwLock::new(Mempool::new( 90 | Arc::clone(&chain), 91 | &metrics, 92 | Arc::clone(&config), 93 | ))); 94 | 95 | while !Mempool::update(&mempool, &daemon, &tip)? { 96 | // Mempool syncing was aborted because the chain tip moved; 97 | // Index the new block(s) and try again. 98 | tip = indexer.update(&daemon)?; 99 | } 100 | 101 | #[cfg(feature = "liquid")] 102 | let asset_db = config.asset_db_path.as_ref().map(|db_dir| { 103 | let asset_db = Arc::new(RwLock::new(AssetRegistry::new(db_dir.clone()))); 104 | AssetRegistry::spawn_sync(asset_db.clone()); 105 | asset_db 106 | }); 107 | 108 | let query = Arc::new(Query::new( 109 | Arc::clone(&chain), 110 | Arc::clone(&mempool), 111 | Arc::clone(&daemon), 112 | Arc::clone(&config), 113 | #[cfg(feature = "liquid")] 114 | asset_db, 115 | )); 116 | 117 | // TODO: configuration for which servers to start 118 | let rest_server = rest::start(Arc::clone(&config), Arc::clone(&query)); 119 | let electrum_server = ElectrumRPC::start(Arc::clone(&config), Arc::clone(&query), &metrics); 120 | 121 | let main_loop_count = metrics.gauge(MetricOpts::new( 122 | "electrs_main_loop_count", 123 | "count of iterations of electrs main loop each 5 seconds or after interrupts", 124 | )); 125 | 126 | loop { 127 | main_loop_count.inc(); 128 | 129 | if let Err(err) = signal.wait(Duration::from_secs(5), true) { 130 | info!("stopping server: {}", err); 131 | rest_server.stop(); 132 | // the electrum server is stopped when dropped 133 | break; 134 | } 135 | 136 | // Index new blocks 137 | let current_tip = daemon.getbestblockhash()?; 138 | if current_tip != tip { 139 | tip = indexer.update(&daemon)?; 140 | }; 141 | 142 | // Update mempool 143 | if !Mempool::update(&mempool, &daemon, &tip)? { 144 | warn!("skipped failed mempool update, trying again in 5 seconds"); 145 | } 146 | 147 | // Update subscribed clients 148 | electrum_server.notify(); 149 | } 150 | info!("server stopped"); 151 | Ok(()) 152 | } 153 | 154 | fn main_() { 155 | let config = Arc::new(Config::from_args()); 156 | if let Err(e) = run_server(config) { 157 | error!("server failed: {}", e.display_chain()); 158 | process::exit(1); 159 | } 160 | } 161 | 162 | #[cfg(not(feature = "otlp-tracing"))] 163 | fn main() { 164 | main_(); 165 | } 166 | 167 | #[cfg(feature = "otlp-tracing")] 168 | #[tokio::main] 169 | async fn main() { 170 | let _tracing_guard = otlp_trace::init_tracing("electrs"); 171 | main_() 172 | } 173 | -------------------------------------------------------------------------------- /src/bin/popular-scripts.rs: -------------------------------------------------------------------------------- 1 | extern crate electrs; 2 | 3 | use bitcoin::hex::DisplayHex; 4 | use electrs::{ 5 | config::Config, 6 | new_index::{Store, TxHistoryKey}, 7 | util::bincode, 8 | }; 9 | 10 | fn main() { 11 | let config = Config::from_args(); 12 | let store = Store::open(&config.db_path.join("newindex"), &config); 13 | 14 | let mut iter = store.history_db().raw_iterator(); 15 | iter.seek(b"H"); 16 | 17 | let mut curr_scripthash = [0u8; 32]; 18 | let mut total_entries = 0; 19 | 20 | while iter.valid() { 21 | let key = iter.key().unwrap(); 22 | 23 | if !key.starts_with(b"H") { 24 | break; 25 | } 26 | 27 | let entry: TxHistoryKey = 28 | bincode::deserialize_big(&key).expect("failed to deserialize TxHistoryKey"); 29 | 30 | if curr_scripthash != entry.hash { 31 | if total_entries > 100 { 32 | println!( 33 | "{} {}", 34 | curr_scripthash.to_lower_hex_string(), 35 | total_entries 36 | ); 37 | } 38 | 39 | curr_scripthash = entry.hash; 40 | total_entries = 0; 41 | } 42 | 43 | total_entries += 1; 44 | 45 | iter.next(); 46 | } 47 | 48 | if total_entries >= 4000 { 49 | println!( 50 | "scripthash,{},{}", 51 | curr_scripthash.to_lower_hex_string(), 52 | total_entries 53 | ); 54 | } 55 | } 56 | -------------------------------------------------------------------------------- /src/bin/tx-fingerprint-stats.rs: -------------------------------------------------------------------------------- 1 | extern crate electrs; 2 | 3 | #[cfg(not(feature = "liquid"))] 4 | #[macro_use] 5 | extern crate log; 6 | 7 | #[cfg(not(feature = "liquid"))] 8 | fn main() { 9 | use std::collections::HashSet; 10 | use std::sync::Arc; 11 | 12 | use bitcoin::blockdata::script::ScriptBuf; 13 | use bitcoin::consensus::encode::deserialize; 14 | use electrs::{ 15 | chain::Transaction, 16 | config::Config, 17 | daemon::Daemon, 18 | metrics::Metrics, 19 | new_index::{ChainQuery, FetchFrom, Indexer, Store}, 20 | signal::Waiter, 21 | util::has_prevout, 22 | }; 23 | 24 | let signal = Waiter::start(crossbeam_channel::never()); 25 | let config = Config::from_args(); 26 | let store = Arc::new(Store::open(&config.db_path.join("newindex"), &config)); 27 | 28 | let metrics = Metrics::new(config.monitoring_addr); 29 | metrics.start(); 30 | 31 | let daemon = Arc::new( 32 | Daemon::new( 33 | &config.daemon_dir, 34 | &config.blocks_dir, 35 | config.daemon_rpc_addr, 36 | config.daemon_parallelism, 37 | config.cookie_getter(), 38 | config.network_type, 39 | signal, 40 | &metrics, 41 | ) 42 | .unwrap(), 43 | ); 44 | 45 | let chain = ChainQuery::new(Arc::clone(&store), Arc::clone(&daemon), &config, &metrics); 46 | 47 | let mut indexer = Indexer::open(Arc::clone(&store), FetchFrom::Bitcoind, &config, &metrics); 48 | indexer.update(&daemon).unwrap(); 49 | 50 | let mut iter = store.txstore_db().raw_iterator(); 51 | iter.seek(b"T"); 52 | 53 | let mut total = 0; 54 | let mut uih_totals = vec![0, 0, 0]; 55 | 56 | while iter.valid() { 57 | let key = iter.key().unwrap(); 58 | let value = iter.value().unwrap(); 59 | 60 | if !key.starts_with(b"T") { 61 | break; 62 | } 63 | 64 | let tx: Transaction = deserialize(&value).expect("failed to parse Transaction"); 65 | let txid = tx.compute_txid(); 66 | 67 | iter.next(); 68 | 69 | // only consider transactions of exactly two outputs 70 | if tx.output.len() != 2 { 71 | continue; 72 | } 73 | // skip coinbase txs 74 | if tx.is_coinbase() { 75 | continue; 76 | } 77 | 78 | // skip orphaned transactions 79 | let blockid = match chain.tx_confirming_block(&txid) { 80 | Some(blockid) => blockid, 81 | None => continue, 82 | }; 83 | 84 | //info!("{:?},{:?}", txid, blockid); 85 | 86 | let prevouts = chain 87 | .lookup_txos( 88 | tx.input 89 | .iter() 90 | .filter(|txin| has_prevout(txin)) 91 | .map(|txin| txin.previous_output) 92 | .collect(), 93 | ) 94 | .unwrap(); 95 | 96 | let total_out: u64 = tx.output.iter().map(|out| out.value.to_sat()).sum(); 97 | let small_out = tx 98 | .output 99 | .iter() 100 | .map(|out| out.value.to_sat()) 101 | .min() 102 | .unwrap(); 103 | let large_out = tx 104 | .output 105 | .iter() 106 | .map(|out| out.value.to_sat()) 107 | .max() 108 | .unwrap(); 109 | 110 | let total_in: u64 = prevouts.values().map(|out| out.value.to_sat()).sum(); 111 | let smallest_in = prevouts 112 | .values() 113 | .map(|out| out.value.to_sat()) 114 | .min() 115 | .unwrap(); 116 | 117 | let fee = total_in - total_out; 118 | 119 | // test for UIH 120 | let uih = if total_in - smallest_in > large_out + fee { 121 | 2 122 | } else if total_in - smallest_in > small_out + fee { 123 | 1 124 | } else { 125 | 0 126 | }; 127 | 128 | // test for spending multiple coins owned by the same spk 129 | let is_multi_spend = { 130 | let mut seen_spks = HashSet::new(); 131 | prevouts 132 | .values() 133 | .any(|out| !seen_spks.insert(&out.script_pubkey)) 134 | }; 135 | 136 | // test for sending back to one of the spent spks 137 | let has_reuse = { 138 | let prev_spks: HashSet = prevouts 139 | .values() 140 | .map(|out| out.script_pubkey.clone()) 141 | .collect(); 142 | tx.output 143 | .iter() 144 | .any(|out| prev_spks.contains(&out.script_pubkey)) 145 | }; 146 | 147 | println!( 148 | "{},{},{},{},{},{}", 149 | txid, blockid.height, tx.lock_time, uih, is_multi_spend as u8, has_reuse as u8 150 | ); 151 | 152 | total += 1; 153 | uih_totals[uih] += 1; 154 | } 155 | info!( 156 | "processed {} total txs, UIH counts: {:?}", 157 | total, uih_totals 158 | ); 159 | } 160 | 161 | #[cfg(feature = "liquid")] 162 | fn main() {} 163 | -------------------------------------------------------------------------------- /src/chain.rs: -------------------------------------------------------------------------------- 1 | #[cfg(not(feature = "liquid"))] // use regular Bitcoin data structures 2 | pub use bitcoin::{ 3 | address, blockdata::block::Header as BlockHeader, blockdata::script, consensus::deserialize, 4 | hash_types::TxMerkleNode, Address, Block, BlockHash, OutPoint, ScriptBuf as Script, Sequence, 5 | Transaction, TxIn, TxOut, Txid, 6 | }; 7 | 8 | #[cfg(feature = "liquid")] 9 | pub use { 10 | crate::elements::asset, 11 | elements::{ 12 | address, confidential, encode::deserialize, script, Address, AssetId, Block, BlockHash, 13 | BlockHeader, OutPoint, Script, Sequence, Transaction, TxIn, TxMerkleNode, TxOut, Txid, 14 | }, 15 | }; 16 | 17 | use bitcoin::blockdata::constants::genesis_block; 18 | pub use bitcoin::network::Network as BNetwork; 19 | 20 | #[cfg(not(feature = "liquid"))] 21 | pub type Value = u64; 22 | #[cfg(feature = "liquid")] 23 | pub use confidential::Value; 24 | 25 | #[derive(Debug, Copy, Clone, PartialEq, Hash, Serialize, Ord, PartialOrd, Eq)] 26 | pub enum Network { 27 | #[cfg(not(feature = "liquid"))] 28 | Bitcoin, 29 | #[cfg(not(feature = "liquid"))] 30 | Testnet, 31 | #[cfg(not(feature = "liquid"))] 32 | Regtest, 33 | #[cfg(not(feature = "liquid"))] 34 | Signet, 35 | 36 | #[cfg(feature = "liquid")] 37 | Liquid, 38 | #[cfg(feature = "liquid")] 39 | LiquidTestnet, 40 | #[cfg(feature = "liquid")] 41 | LiquidRegtest, 42 | } 43 | 44 | impl Network { 45 | #[cfg(not(feature = "liquid"))] 46 | pub fn magic(self) -> u32 { 47 | u32::from_le_bytes(BNetwork::from(self).magic().to_bytes()) 48 | } 49 | 50 | #[cfg(feature = "liquid")] 51 | pub fn magic(self) -> u32 { 52 | match self { 53 | Network::Liquid | Network::LiquidRegtest => 0xDAB5_BFFA, 54 | Network::LiquidTestnet => 0x62DD_0E41, 55 | } 56 | } 57 | 58 | pub fn is_regtest(self) -> bool { 59 | match self { 60 | #[cfg(not(feature = "liquid"))] 61 | Network::Regtest => true, 62 | #[cfg(feature = "liquid")] 63 | Network::LiquidRegtest => true, 64 | _ => false, 65 | } 66 | } 67 | 68 | #[cfg(feature = "liquid")] 69 | pub fn address_params(self) -> &'static address::AddressParams { 70 | // Liquid regtest uses elements's address params 71 | match self { 72 | Network::Liquid => &address::AddressParams::LIQUID, 73 | Network::LiquidRegtest => &address::AddressParams::ELEMENTS, 74 | Network::LiquidTestnet => &address::AddressParams::LIQUID_TESTNET, 75 | } 76 | } 77 | 78 | #[cfg(feature = "liquid")] 79 | pub fn native_asset(self) -> &'static AssetId { 80 | match self { 81 | Network::Liquid => &*asset::NATIVE_ASSET_ID, 82 | Network::LiquidTestnet => &*asset::NATIVE_ASSET_ID_TESTNET, 83 | Network::LiquidRegtest => &*asset::NATIVE_ASSET_ID_REGTEST, 84 | } 85 | } 86 | 87 | #[cfg(feature = "liquid")] 88 | pub fn pegged_asset(self) -> Option<&'static AssetId> { 89 | match self { 90 | Network::Liquid => Some(&*asset::NATIVE_ASSET_ID), 91 | Network::LiquidTestnet | Network::LiquidRegtest => None, 92 | } 93 | } 94 | 95 | pub fn names() -> Vec { 96 | #[cfg(not(feature = "liquid"))] 97 | return vec![ 98 | "mainnet".to_string(), 99 | "testnet".to_string(), 100 | "regtest".to_string(), 101 | "signet".to_string(), 102 | ]; 103 | 104 | #[cfg(feature = "liquid")] 105 | return vec![ 106 | "liquid".to_string(), 107 | "liquidtestnet".to_string(), 108 | "liquidregtest".to_string(), 109 | ]; 110 | } 111 | } 112 | 113 | pub fn genesis_hash(network: Network) -> BlockHash { 114 | #[cfg(not(feature = "liquid"))] 115 | return bitcoin_genesis_hash(network.into()); 116 | #[cfg(feature = "liquid")] 117 | return liquid_genesis_hash(network); 118 | } 119 | 120 | pub fn bitcoin_genesis_hash(network: BNetwork) -> bitcoin::BlockHash { 121 | lazy_static! { 122 | static ref BITCOIN_GENESIS: bitcoin::BlockHash = 123 | genesis_block(BNetwork::Bitcoin).block_hash(); 124 | static ref TESTNET_GENESIS: bitcoin::BlockHash = 125 | genesis_block(BNetwork::Testnet).block_hash(); 126 | static ref REGTEST_GENESIS: bitcoin::BlockHash = 127 | genesis_block(BNetwork::Regtest).block_hash(); 128 | static ref SIGNET_GENESIS: bitcoin::BlockHash = 129 | genesis_block(BNetwork::Signet).block_hash(); 130 | } 131 | match network { 132 | BNetwork::Bitcoin => *BITCOIN_GENESIS, 133 | BNetwork::Testnet => *TESTNET_GENESIS, 134 | BNetwork::Regtest => *REGTEST_GENESIS, 135 | BNetwork::Signet => *SIGNET_GENESIS, 136 | _ => panic!("unknown network {:?}", network), 137 | } 138 | } 139 | 140 | #[cfg(feature = "liquid")] 141 | pub fn liquid_genesis_hash(network: Network) -> elements::BlockHash { 142 | use crate::util::DEFAULT_BLOCKHASH; 143 | 144 | lazy_static! { 145 | static ref LIQUID_GENESIS: BlockHash = 146 | "1466275836220db2944ca059a3a10ef6fd2ea684b0688d2c379296888a206003" 147 | .parse() 148 | .unwrap(); 149 | } 150 | 151 | match network { 152 | Network::Liquid => *LIQUID_GENESIS, 153 | // The genesis block for liquid regtest chains varies based on the chain configuration. 154 | // This instead uses an all zeroed-out hash, which doesn't matter in practice because its 155 | // only used for Electrum server discovery, which isn't active on regtest. 156 | _ => *DEFAULT_BLOCKHASH, 157 | } 158 | } 159 | 160 | impl From<&str> for Network { 161 | fn from(network_name: &str) -> Self { 162 | match network_name { 163 | #[cfg(not(feature = "liquid"))] 164 | "mainnet" => Network::Bitcoin, 165 | #[cfg(not(feature = "liquid"))] 166 | "testnet" => Network::Testnet, 167 | #[cfg(not(feature = "liquid"))] 168 | "regtest" => Network::Regtest, 169 | #[cfg(not(feature = "liquid"))] 170 | "signet" => Network::Signet, 171 | 172 | #[cfg(feature = "liquid")] 173 | "liquid" => Network::Liquid, 174 | #[cfg(feature = "liquid")] 175 | "liquidtestnet" => Network::LiquidTestnet, 176 | #[cfg(feature = "liquid")] 177 | "liquidregtest" => Network::LiquidRegtest, 178 | 179 | _ => panic!("unsupported Bitcoin network: {:?}", network_name), 180 | } 181 | } 182 | } 183 | 184 | #[cfg(not(feature = "liquid"))] 185 | impl From for BNetwork { 186 | fn from(network: Network) -> Self { 187 | match network { 188 | Network::Bitcoin => BNetwork::Bitcoin, 189 | Network::Testnet => BNetwork::Testnet, 190 | Network::Regtest => BNetwork::Regtest, 191 | Network::Signet => BNetwork::Signet, 192 | } 193 | } 194 | } 195 | 196 | #[cfg(not(feature = "liquid"))] 197 | impl From for Network { 198 | fn from(network: BNetwork) -> Self { 199 | match network { 200 | BNetwork::Bitcoin => Network::Bitcoin, 201 | BNetwork::Testnet => Network::Testnet, 202 | BNetwork::Regtest => Network::Regtest, 203 | BNetwork::Signet => Network::Signet, 204 | _ => panic!("unknown network {:?}", network), 205 | } 206 | } 207 | } 208 | -------------------------------------------------------------------------------- /src/electrum/client.rs: -------------------------------------------------------------------------------- 1 | use std::collections::HashMap; 2 | use std::convert::TryFrom; 3 | 4 | use bitcoin::hashes::{sha256d, Hash}; 5 | pub use electrum_client::client::Client; 6 | pub use electrum_client::Error as ElectrumError; 7 | pub use electrum_client::ServerFeaturesRes; 8 | 9 | use crate::chain::BlockHash; 10 | use crate::electrum::ServerFeatures; 11 | use crate::errors::{Error, ResultExt}; 12 | 13 | // Convert from electrum-client's server features struct to ours. We're using a different struct because 14 | // the electrum-client's one doesn't support the "hosts" key. 15 | impl TryFrom for ServerFeatures { 16 | type Error = Error; 17 | fn try_from(features: ServerFeaturesRes) -> Result { 18 | let genesis_hash = { 19 | let mut genesis_hash = features.genesis_hash; 20 | genesis_hash.reverse(); 21 | BlockHash::from_raw_hash(sha256d::Hash::from_byte_array(genesis_hash)) 22 | }; 23 | 24 | Ok(ServerFeatures { 25 | // electrum-client doesn't retain the hosts map data, but we already have it from the add_peer request 26 | hosts: HashMap::new(), 27 | genesis_hash, 28 | server_version: features.server_version, 29 | protocol_min: features 30 | .protocol_min 31 | .parse() 32 | .chain_err(|| "invalid protocol_min")?, 33 | protocol_max: features 34 | .protocol_max 35 | .parse() 36 | .chain_err(|| "invalid protocol_max")?, 37 | pruning: features.pruning.map(|pruning| pruning as usize), 38 | hash_function: features 39 | .hash_function 40 | .chain_err(|| "missing hash_function")?, 41 | }) 42 | } 43 | } 44 | -------------------------------------------------------------------------------- /src/electrum/discovery/default_servers.rs: -------------------------------------------------------------------------------- 1 | use crate::chain::Network; 2 | use crate::electrum::discovery::{DiscoveryManager, Service}; 3 | 4 | pub fn add_default_servers(discovery: &DiscoveryManager, network: Network) { 5 | match network { 6 | #[cfg(not(feature = "liquid"))] 7 | Network::Bitcoin => { 8 | discovery 9 | .add_default_server( 10 | "3smoooajg7qqac2y.onion".into(), 11 | vec![Service::Tcp(50001), Service::Ssl(50002)], 12 | ) 13 | .ok(); 14 | discovery 15 | .add_default_server( 16 | "81-7-10-251.blue.kundencontroller.de".into(), 17 | vec![Service::Ssl(50002)], 18 | ) 19 | .ok(); 20 | discovery 21 | .add_default_server( 22 | "E-X.not.fyi".into(), 23 | vec![Service::Tcp(50001), Service::Ssl(50002)], 24 | ) 25 | .ok(); 26 | discovery 27 | .add_default_server( 28 | "VPS.hsmiths.com".into(), 29 | vec![Service::Tcp(50001), Service::Ssl(50002)], 30 | ) 31 | .ok(); 32 | discovery 33 | .add_default_server( 34 | "b.ooze.cc".into(), 35 | vec![Service::Tcp(50001), Service::Ssl(50002)], 36 | ) 37 | .ok(); 38 | discovery 39 | .add_default_server( 40 | "bauerjda5hnedjam.onion".into(), 41 | vec![Service::Tcp(50001), Service::Ssl(50002)], 42 | ) 43 | .ok(); 44 | discovery 45 | .add_default_server( 46 | "bauerjhejlv6di7s.onion".into(), 47 | vec![Service::Tcp(50001), Service::Ssl(50002)], 48 | ) 49 | .ok(); 50 | discovery 51 | .add_default_server( 52 | "bitcoin.corgi.party".into(), 53 | vec![Service::Tcp(50001), Service::Ssl(50002)], 54 | ) 55 | .ok(); 56 | discovery 57 | .add_default_server( 58 | "bitcoin3nqy3db7c.onion".into(), 59 | vec![Service::Tcp(50001), Service::Ssl(50002)], 60 | ) 61 | .ok(); 62 | discovery 63 | .add_default_server( 64 | "bitcoins.sk".into(), 65 | vec![Service::Tcp(50001), Service::Ssl(50002)], 66 | ) 67 | .ok(); 68 | discovery 69 | .add_default_server( 70 | "btc.cihar.com".into(), 71 | vec![Service::Tcp(50001), Service::Ssl(50002)], 72 | ) 73 | .ok(); 74 | discovery 75 | .add_default_server( 76 | "btc.xskyx.net".into(), 77 | vec![Service::Tcp(50001), Service::Ssl(50002)], 78 | ) 79 | .ok(); 80 | discovery 81 | .add_default_server( 82 | "currentlane.lovebitco.in".into(), 83 | vec![Service::Tcp(50001), Service::Ssl(50002)], 84 | ) 85 | .ok(); 86 | discovery 87 | .add_default_server( 88 | "daedalus.bauerj.eu".into(), 89 | vec![Service::Tcp(50001), Service::Ssl(50002)], 90 | ) 91 | .ok(); 92 | discovery 93 | .add_default_server( 94 | "electrum.jochen-hoenicke.de".into(), 95 | vec![Service::Tcp(50003), Service::Ssl(50005)], 96 | ) 97 | .ok(); 98 | discovery 99 | .add_default_server( 100 | "dragon085.startdedicated.de".into(), 101 | vec![Service::Ssl(50002)], 102 | ) 103 | .ok(); 104 | discovery 105 | .add_default_server( 106 | "e-1.claudioboxx.com".into(), 107 | vec![Service::Tcp(50001), Service::Ssl(50002)], 108 | ) 109 | .ok(); 110 | discovery 111 | .add_default_server( 112 | "e.keff.org".into(), 113 | vec![Service::Tcp(50001), Service::Ssl(50002)], 114 | ) 115 | .ok(); 116 | discovery 117 | .add_default_server( 118 | "electrum-server.ninja".into(), 119 | vec![Service::Tcp(50001), Service::Ssl(50002)], 120 | ) 121 | .ok(); 122 | discovery 123 | .add_default_server( 124 | "electrum-unlimited.criptolayer.net".into(), 125 | vec![Service::Ssl(50002)], 126 | ) 127 | .ok(); 128 | discovery 129 | .add_default_server( 130 | "electrum.eff.ro".into(), 131 | vec![Service::Tcp(50001), Service::Ssl(50002)], 132 | ) 133 | .ok(); 134 | discovery 135 | .add_default_server( 136 | "electrum.festivaldelhumor.org".into(), 137 | vec![Service::Tcp(50001), Service::Ssl(50002)], 138 | ) 139 | .ok(); 140 | discovery 141 | .add_default_server( 142 | "electrum.hsmiths.com".into(), 143 | vec![Service::Tcp(50001), Service::Ssl(50002)], 144 | ) 145 | .ok(); 146 | discovery 147 | .add_default_server( 148 | "electrum.leblancnet.us".into(), 149 | vec![Service::Tcp(50001), Service::Ssl(50002)], 150 | ) 151 | .ok(); 152 | discovery 153 | .add_default_server("electrum.mindspot.org".into(), vec![Service::Ssl(50002)]) 154 | .ok(); 155 | discovery 156 | .add_default_server( 157 | "electrum.qtornado.com".into(), 158 | vec![Service::Tcp(50001), Service::Ssl(50002)], 159 | ) 160 | .ok(); 161 | discovery 162 | .add_default_server("electrum.taborsky.cz".into(), vec![Service::Ssl(50002)]) 163 | .ok(); 164 | discovery 165 | .add_default_server( 166 | "electrum.villocq.com".into(), 167 | vec![Service::Tcp(50001), Service::Ssl(50002)], 168 | ) 169 | .ok(); 170 | discovery 171 | .add_default_server( 172 | "electrum2.eff.ro".into(), 173 | vec![Service::Tcp(50001), Service::Ssl(50002)], 174 | ) 175 | .ok(); 176 | discovery 177 | .add_default_server( 178 | "electrum2.villocq.com".into(), 179 | vec![Service::Tcp(50001), Service::Ssl(50002)], 180 | ) 181 | .ok(); 182 | discovery 183 | .add_default_server( 184 | "electrumx.bot.nu".into(), 185 | vec![Service::Tcp(50001), Service::Ssl(50002)], 186 | ) 187 | .ok(); 188 | discovery 189 | .add_default_server( 190 | "electrumx.ddns.net".into(), 191 | vec![Service::Tcp(50001), Service::Ssl(50002)], 192 | ) 193 | .ok(); 194 | discovery 195 | .add_default_server("electrumx.ftp.sh".into(), vec![Service::Ssl(50002)]) 196 | .ok(); 197 | discovery 198 | .add_default_server( 199 | "electrumx.ml".into(), 200 | vec![Service::Tcp(50001), Service::Ssl(50002)], 201 | ) 202 | .ok(); 203 | discovery 204 | .add_default_server( 205 | "electrumx.soon.it".into(), 206 | vec![Service::Tcp(50001), Service::Ssl(50002)], 207 | ) 208 | .ok(); 209 | discovery 210 | .add_default_server("electrumxhqdsmlu.onion".into(), vec![Service::Tcp(50001)]) 211 | .ok(); 212 | discovery 213 | .add_default_server( 214 | "elx01.knas.systems".into(), 215 | vec![Service::Tcp(50001), Service::Ssl(50002)], 216 | ) 217 | .ok(); 218 | discovery 219 | .add_default_server( 220 | "enode.duckdns.org".into(), 221 | vec![Service::Tcp(50001), Service::Ssl(50002)], 222 | ) 223 | .ok(); 224 | discovery 225 | .add_default_server( 226 | "fedaykin.goip.de".into(), 227 | vec![Service::Tcp(50001), Service::Ssl(50002)], 228 | ) 229 | .ok(); 230 | discovery 231 | .add_default_server( 232 | "fn.48.org".into(), 233 | vec![Service::Tcp(50003), Service::Ssl(50002)], 234 | ) 235 | .ok(); 236 | discovery 237 | .add_default_server( 238 | "helicarrier.bauerj.eu".into(), 239 | vec![Service::Tcp(50001), Service::Ssl(50002)], 240 | ) 241 | .ok(); 242 | discovery 243 | .add_default_server( 244 | "hsmiths4fyqlw5xw.onion".into(), 245 | vec![Service::Tcp(50001), Service::Ssl(50002)], 246 | ) 247 | .ok(); 248 | discovery 249 | .add_default_server( 250 | "hsmiths5mjk6uijs.onion".into(), 251 | vec![Service::Tcp(50001), Service::Ssl(50002)], 252 | ) 253 | .ok(); 254 | discovery 255 | .add_default_server( 256 | "icarus.tetradrachm.net".into(), 257 | vec![Service::Tcp(50001), Service::Ssl(50002)], 258 | ) 259 | .ok(); 260 | discovery 261 | .add_default_server( 262 | "electrum.emzy.de".into(), 263 | vec![Service::Tcp(50001), Service::Ssl(50002)], 264 | ) 265 | .ok(); 266 | discovery 267 | .add_default_server( 268 | "ndnd.selfhost.eu".into(), 269 | vec![Service::Tcp(50001), Service::Ssl(50002)], 270 | ) 271 | .ok(); 272 | discovery 273 | .add_default_server("ndndword5lpb7eex.onion".into(), vec![Service::Tcp(50001)]) 274 | .ok(); 275 | discovery 276 | .add_default_server( 277 | "orannis.com".into(), 278 | vec![Service::Tcp(50001), Service::Ssl(50002)], 279 | ) 280 | .ok(); 281 | discovery 282 | .add_default_server( 283 | "ozahtqwp25chjdjd.onion".into(), 284 | vec![Service::Tcp(50001), Service::Ssl(50002)], 285 | ) 286 | .ok(); 287 | discovery 288 | .add_default_server( 289 | "qtornadoklbgdyww.onion".into(), 290 | vec![Service::Tcp(50001), Service::Ssl(50002)], 291 | ) 292 | .ok(); 293 | discovery 294 | .add_default_server("rbx.curalle.ovh".into(), vec![Service::Ssl(50002)]) 295 | .ok(); 296 | discovery 297 | .add_default_server("s7clinmo4cazmhul.onion".into(), vec![Service::Tcp(50001)]) 298 | .ok(); 299 | discovery 300 | .add_default_server( 301 | "tardis.bauerj.eu".into(), 302 | vec![Service::Tcp(50001), Service::Ssl(50002)], 303 | ) 304 | .ok(); 305 | discovery 306 | .add_default_server("technetium.network".into(), vec![Service::Ssl(50002)]) 307 | .ok(); 308 | discovery 309 | .add_default_server( 310 | "tomscryptos.com".into(), 311 | vec![Service::Tcp(50001), Service::Ssl(50002)], 312 | ) 313 | .ok(); 314 | discovery 315 | .add_default_server( 316 | "ulrichard.ch".into(), 317 | vec![Service::Tcp(50001), Service::Ssl(50002)], 318 | ) 319 | .ok(); 320 | discovery 321 | .add_default_server( 322 | "vmd27610.contaboserver.net".into(), 323 | vec![Service::Tcp(50001), Service::Ssl(50002)], 324 | ) 325 | .ok(); 326 | discovery 327 | .add_default_server( 328 | "vmd30612.contaboserver.net".into(), 329 | vec![Service::Tcp(50001), Service::Ssl(50002)], 330 | ) 331 | .ok(); 332 | discovery 333 | .add_default_server( 334 | "wsw6tua3xl24gsmi264zaep6seppjyrkyucpsmuxnjzyt3f3j6swshad.onion".into(), 335 | vec![Service::Tcp(50001), Service::Ssl(50002)], 336 | ) 337 | .ok(); 338 | discovery 339 | .add_default_server( 340 | "xray587.startdedicated.de".into(), 341 | vec![Service::Ssl(50002)], 342 | ) 343 | .ok(); 344 | discovery 345 | .add_default_server( 346 | "yuio.top".into(), 347 | vec![Service::Tcp(50001), Service::Ssl(50002)], 348 | ) 349 | .ok(); 350 | discovery 351 | .add_default_server( 352 | "bitcoin.dragon.zone".into(), 353 | vec![Service::Tcp(50003), Service::Ssl(50004)], 354 | ) 355 | .ok(); 356 | discovery 357 | .add_default_server( 358 | "ecdsa.net".into(), 359 | vec![Service::Tcp(50001), Service::Ssl(110)], 360 | ) 361 | .ok(); 362 | discovery 363 | .add_default_server("btc.usebsv.com".into(), vec![Service::Ssl(50006)]) 364 | .ok(); 365 | discovery 366 | .add_default_server( 367 | "e2.keff.org".into(), 368 | vec![Service::Tcp(50001), Service::Ssl(50002)], 369 | ) 370 | .ok(); 371 | discovery 372 | .add_default_server("electrum.hodlister.co".into(), vec![Service::Ssl(50002)]) 373 | .ok(); 374 | discovery 375 | .add_default_server("electrum3.hodlister.co".into(), vec![Service::Ssl(50002)]) 376 | .ok(); 377 | discovery 378 | .add_default_server("electrum5.hodlister.co".into(), vec![Service::Ssl(50002)]) 379 | .ok(); 380 | discovery 381 | .add_default_server( 382 | "electrumx.electricnewyear.net".into(), 383 | vec![Service::Ssl(50002)], 384 | ) 385 | .ok(); 386 | discovery 387 | .add_default_server( 388 | "fortress.qtornado.com".into(), 389 | vec![Service::Tcp(50001), Service::Ssl(443)], 390 | ) 391 | .ok(); 392 | discovery 393 | .add_default_server( 394 | "green-gold.westeurope.cloudapp.azure.com".into(), 395 | vec![Service::Tcp(56001), Service::Ssl(56002)], 396 | ) 397 | .ok(); 398 | discovery 399 | .add_default_server( 400 | "electrumx.erbium.eu".into(), 401 | vec![Service::Tcp(50001), Service::Ssl(50002)], 402 | ) 403 | .ok(); 404 | } 405 | #[cfg(not(feature = "liquid"))] 406 | Network::Testnet => { 407 | discovery 408 | .add_default_server( 409 | "hsmithsxurybd7uh.onion".into(), 410 | vec![Service::Tcp(53011), Service::Ssl(53012)], 411 | ) 412 | .ok(); 413 | discovery 414 | .add_default_server( 415 | "testnet.hsmiths.com".into(), 416 | vec![Service::Tcp(53011), Service::Ssl(53012)], 417 | ) 418 | .ok(); 419 | discovery 420 | .add_default_server( 421 | "testnet.qtornado.com".into(), 422 | vec![Service::Tcp(51001), Service::Ssl(51002)], 423 | ) 424 | .ok(); 425 | discovery 426 | .add_default_server( 427 | "testnet1.bauerj.eu".into(), 428 | vec![Service::Tcp(50001), Service::Ssl(50002)], 429 | ) 430 | .ok(); 431 | discovery 432 | .add_default_server( 433 | "tn.not.fyi".into(), 434 | vec![Service::Tcp(55001), Service::Ssl(55002)], 435 | ) 436 | .ok(); 437 | discovery 438 | .add_default_server( 439 | "bitcoin.cluelessperson.com".into(), 440 | vec![Service::Tcp(51001), Service::Ssl(51002)], 441 | ) 442 | .ok(); 443 | } 444 | 445 | _ => (), 446 | } 447 | } 448 | -------------------------------------------------------------------------------- /src/electrum/mod.rs: -------------------------------------------------------------------------------- 1 | mod server; 2 | pub use server::RPC; 3 | 4 | #[cfg(feature = "electrum-discovery")] 5 | mod client; 6 | #[cfg(feature = "electrum-discovery")] 7 | mod discovery; 8 | #[cfg(feature = "electrum-discovery")] 9 | pub use {client::Client, discovery::DiscoveryManager}; 10 | 11 | use std::cmp::Ordering; 12 | use std::collections::HashMap; 13 | use std::str::FromStr; 14 | 15 | use serde::{de, Deserialize, Deserializer, Serialize}; 16 | 17 | use crate::chain::BlockHash; 18 | use crate::errors::ResultExt; 19 | use crate::util::BlockId; 20 | 21 | pub fn get_electrum_height(blockid: Option, has_unconfirmed_parents: bool) -> isize { 22 | match (blockid, has_unconfirmed_parents) { 23 | (Some(blockid), _) => blockid.height as isize, 24 | (None, false) => 0, 25 | (None, true) => -1, 26 | } 27 | } 28 | 29 | pub type Port = u16; 30 | pub type Hostname = String; 31 | 32 | pub type ServerHosts = HashMap; 33 | 34 | #[derive(Serialize, Deserialize, Clone, Debug)] 35 | pub struct ServerFeatures { 36 | pub hosts: ServerHosts, 37 | pub genesis_hash: BlockHash, 38 | pub server_version: String, 39 | pub protocol_min: ProtocolVersion, 40 | pub protocol_max: ProtocolVersion, 41 | pub pruning: Option, 42 | pub hash_function: String, 43 | } 44 | 45 | #[derive(Serialize, Deserialize, Clone, Debug)] 46 | pub struct ServerPorts { 47 | tcp_port: Option, 48 | ssl_port: Option, 49 | } 50 | 51 | #[derive(Eq, PartialEq, Debug, Clone, Default)] 52 | pub struct ProtocolVersion { 53 | major: usize, 54 | minor: usize, 55 | } 56 | 57 | impl ProtocolVersion { 58 | pub const fn new(major: usize, minor: usize) -> Self { 59 | Self { major, minor } 60 | } 61 | } 62 | 63 | impl Ord for ProtocolVersion { 64 | fn cmp(&self, other: &Self) -> Ordering { 65 | self.major 66 | .cmp(&other.major) 67 | .then_with(|| self.minor.cmp(&other.minor)) 68 | } 69 | } 70 | 71 | impl PartialOrd for ProtocolVersion { 72 | fn partial_cmp(&self, other: &Self) -> Option { 73 | Some(self.cmp(other)) 74 | } 75 | } 76 | 77 | impl FromStr for ProtocolVersion { 78 | type Err = crate::errors::Error; 79 | fn from_str(s: &str) -> Result { 80 | let mut iter = s.split('.'); 81 | Ok(Self { 82 | major: iter 83 | .next() 84 | .chain_err(|| "missing major")? 85 | .parse() 86 | .chain_err(|| "invalid major")?, 87 | minor: iter 88 | .next() 89 | .chain_err(|| "missing minor")? 90 | .parse() 91 | .chain_err(|| "invalid minor")?, 92 | }) 93 | } 94 | } 95 | 96 | impl std::fmt::Display for ProtocolVersion { 97 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 98 | write!(f, "{}.{}", self.major, self.minor) 99 | } 100 | } 101 | 102 | impl Serialize for ProtocolVersion { 103 | fn serialize(&self, serializer: S) -> Result 104 | where 105 | S: serde::Serializer, 106 | { 107 | serializer.collect_str(&self) 108 | } 109 | } 110 | 111 | impl<'de> Deserialize<'de> for ProtocolVersion { 112 | fn deserialize(deserializer: D) -> Result 113 | where 114 | D: Deserializer<'de>, 115 | { 116 | let s = String::deserialize(deserializer)?; 117 | FromStr::from_str(&s).map_err(de::Error::custom) 118 | } 119 | } 120 | -------------------------------------------------------------------------------- /src/elements/mod.rs: -------------------------------------------------------------------------------- 1 | use bitcoin::hashes::{sha256, Hash}; 2 | use elements::secp256k1_zkp::{PedersenCommitment, Tweak, ZERO_TWEAK}; 3 | use elements::{issuance::ContractHash, AssetId, TxIn}; 4 | 5 | pub mod asset; 6 | pub mod peg; 7 | mod registry; 8 | 9 | use asset::get_issuance_entropy; 10 | pub use asset::{lookup_asset, LiquidAsset}; 11 | pub use registry::{AssetRegistry, AssetSorting}; 12 | 13 | #[derive(Serialize, Deserialize, Clone)] 14 | pub struct IssuanceValue { 15 | pub asset_id: AssetId, 16 | pub is_reissuance: bool, 17 | #[serde(skip_serializing_if = "Option::is_none")] 18 | pub asset_blinding_nonce: Option, 19 | #[serde(skip_serializing_if = "Option::is_none")] 20 | pub contract_hash: Option, 21 | pub asset_entropy: sha256::Midstate, 22 | #[serde(skip_serializing_if = "Option::is_none")] 23 | pub assetamount: Option, 24 | #[serde(skip_serializing_if = "Option::is_none")] 25 | pub assetamountcommitment: Option, 26 | #[serde(skip_serializing_if = "Option::is_none")] 27 | pub tokenamount: Option, 28 | #[serde(skip_serializing_if = "Option::is_none")] 29 | pub tokenamountcommitment: Option, 30 | } 31 | 32 | impl From<&TxIn> for IssuanceValue { 33 | fn from(txin: &TxIn) -> Self { 34 | let issuance = &txin.asset_issuance; 35 | let is_reissuance = issuance.asset_blinding_nonce != ZERO_TWEAK; 36 | 37 | let asset_entropy = get_issuance_entropy(txin).expect("invalid issuance"); 38 | let asset_id = AssetId::from_entropy(asset_entropy); 39 | 40 | let contract_hash = if !is_reissuance { 41 | Some(ContractHash::from_slice(&issuance.asset_entropy).expect("invalid asset entropy")) 42 | } else { 43 | None 44 | }; 45 | 46 | IssuanceValue { 47 | asset_id, 48 | asset_entropy, 49 | contract_hash, 50 | is_reissuance, 51 | asset_blinding_nonce: if is_reissuance { 52 | Some(issuance.asset_blinding_nonce) 53 | } else { 54 | None 55 | }, 56 | assetamount: issuance.amount.explicit(), 57 | assetamountcommitment: issuance.amount.commitment(), 58 | tokenamount: issuance.inflation_keys.explicit(), 59 | tokenamountcommitment: issuance.inflation_keys.commitment(), 60 | } 61 | } 62 | } 63 | 64 | // Traits to make rust-elements' types compatible with the changes made in rust-bitcoin v0.31 65 | // Should hopefully eventually make its way into rust-elements itself. 66 | pub mod ebcompact { 67 | pub trait SizeMethod { 68 | fn total_size(&self) -> usize; 69 | } 70 | impl SizeMethod for elements::Block { 71 | fn total_size(&self) -> usize { 72 | self.size() 73 | } 74 | } 75 | impl SizeMethod for elements::Transaction { 76 | fn total_size(&self) -> usize { 77 | self.size() 78 | } 79 | } 80 | 81 | pub trait ScriptMethods { 82 | fn is_p2wpkh(&self) -> bool; 83 | fn is_p2wsh(&self) -> bool; 84 | fn is_p2tr(&self) -> bool; 85 | } 86 | impl ScriptMethods for elements::Script { 87 | fn is_p2wpkh(&self) -> bool { 88 | self.is_v0_p2wpkh() 89 | } 90 | fn is_p2wsh(&self) -> bool { 91 | self.is_v0_p2wsh() 92 | } 93 | fn is_p2tr(&self) -> bool { 94 | self.is_v1_p2tr() 95 | } 96 | } 97 | 98 | pub trait TxidCompat { 99 | fn compute_txid(&self) -> elements::Txid; 100 | } 101 | impl TxidCompat for elements::Transaction { 102 | fn compute_txid(&self) -> elements::Txid { 103 | self.txid() 104 | } 105 | } 106 | } 107 | -------------------------------------------------------------------------------- /src/elements/peg.rs: -------------------------------------------------------------------------------- 1 | use elements::{confidential::Asset, PeginData, PegoutData, TxIn, TxOut}; 2 | 3 | use crate::chain::{bitcoin_genesis_hash, BNetwork, Network}; 4 | use crate::util::{FullHash, ScriptToAsm}; 5 | 6 | pub fn get_pegin_data(txout: &TxIn, network: Network) -> Option { 7 | let pegged_asset_id = network.pegged_asset()?; 8 | txout 9 | .pegin_data() 10 | .filter(|pegin| pegin.asset == *pegged_asset_id) 11 | } 12 | 13 | pub fn get_pegout_data( 14 | txout: &TxOut, 15 | network: Network, 16 | parent_network: BNetwork, 17 | ) -> Option { 18 | let pegged_asset_id = network.pegged_asset()?; 19 | txout.pegout_data().filter(|pegout| { 20 | pegout.asset == Asset::Explicit(*pegged_asset_id) 21 | && pegout.genesis_hash == bitcoin_genesis_hash(parent_network) 22 | }) 23 | } 24 | 25 | // API representation of pegout data associated with an output 26 | #[derive(Serialize, Clone)] 27 | pub struct PegoutValue { 28 | pub genesis_hash: bitcoin::BlockHash, 29 | pub scriptpubkey: bitcoin::ScriptBuf, 30 | pub scriptpubkey_asm: String, 31 | #[serde(skip_serializing_if = "Option::is_none")] 32 | pub scriptpubkey_address: Option, 33 | } 34 | 35 | impl PegoutValue { 36 | pub fn from_txout(txout: &TxOut, network: Network, parent_network: BNetwork) -> Option { 37 | let pegoutdata = get_pegout_data(txout, network, parent_network)?; 38 | 39 | let scriptpubkey = pegoutdata.script_pubkey; 40 | let address = bitcoin::Address::from_script(&scriptpubkey, parent_network).ok(); 41 | 42 | Some(PegoutValue { 43 | genesis_hash: pegoutdata.genesis_hash, 44 | scriptpubkey_asm: scriptpubkey.to_asm(), 45 | scriptpubkey_address: address, 46 | scriptpubkey, 47 | }) 48 | } 49 | } 50 | 51 | // Inner type for the indexer TxHistoryInfo::Pegin variant 52 | #[derive(Serialize, Deserialize, Debug)] 53 | pub struct PeginInfo { 54 | pub txid: FullHash, 55 | pub vin: u16, 56 | pub value: u64, 57 | } 58 | 59 | // Inner type for the indexer TxHistoryInfo::Pegout variant 60 | #[derive(Serialize, Deserialize, Debug)] 61 | pub struct PegoutInfo { 62 | pub txid: FullHash, 63 | pub vout: u16, 64 | pub value: u64, 65 | } 66 | -------------------------------------------------------------------------------- /src/elements/registry.rs: -------------------------------------------------------------------------------- 1 | use std::collections::HashMap; 2 | use std::str::FromStr; 3 | use std::sync::{Arc, RwLock}; 4 | use std::time::{Duration, SystemTime}; 5 | use std::{cmp, fs, path, thread}; 6 | 7 | use serde_json::Value as JsonValue; 8 | 9 | use elements::AssetId; 10 | 11 | use crate::errors::*; 12 | 13 | // length of asset id prefix to use for sub-directory partitioning 14 | // (in number of hex characters, not bytes) 15 | 16 | const DIR_PARTITION_LEN: usize = 2; 17 | pub struct AssetRegistry { 18 | directory: path::PathBuf, 19 | assets_cache: HashMap, 20 | } 21 | 22 | pub type AssetEntry<'a> = (&'a AssetId, &'a AssetMeta); 23 | 24 | impl AssetRegistry { 25 | pub fn new(directory: path::PathBuf) -> Self { 26 | Self { 27 | directory, 28 | assets_cache: Default::default(), 29 | } 30 | } 31 | 32 | pub fn get(&self, asset_id: &AssetId) -> Option<&AssetMeta> { 33 | self.assets_cache 34 | .get(asset_id) 35 | .map(|(_, metadata)| metadata) 36 | } 37 | 38 | pub fn list( 39 | &self, 40 | start_index: usize, 41 | limit: usize, 42 | sorting: AssetSorting, 43 | ) -> (usize, Vec) { 44 | let mut assets: Vec = self 45 | .assets_cache 46 | .iter() 47 | .map(|(asset_id, (_, metadata))| (asset_id, metadata)) 48 | .collect(); 49 | assets.sort_by(sorting.as_comparator()); 50 | ( 51 | assets.len(), 52 | assets.into_iter().skip(start_index).take(limit).collect(), 53 | ) 54 | } 55 | 56 | pub fn fs_sync(&mut self) -> Result<()> { 57 | for entry in fs::read_dir(&self.directory).chain_err(|| "failed reading asset dir")? { 58 | let entry = entry.chain_err(|| "invalid fh")?; 59 | let filetype = entry.file_type().chain_err(|| "failed getting file type")?; 60 | if !filetype.is_dir() || entry.file_name().len() != DIR_PARTITION_LEN { 61 | continue; 62 | } 63 | 64 | for file_entry in 65 | fs::read_dir(entry.path()).chain_err(|| "failed reading asset subdir")? 66 | { 67 | let file_entry = file_entry.chain_err(|| "invalid fh")?; 68 | let path = file_entry.path(); 69 | if path.extension().and_then(|e| e.to_str()) != Some("json") { 70 | continue; 71 | } 72 | 73 | let asset_id = AssetId::from_str( 74 | path.file_stem() 75 | .unwrap() // cannot fail if extension() succeeded 76 | .to_str() 77 | .chain_err(|| "invalid filename")?, 78 | ) 79 | .chain_err(|| "invalid filename")?; 80 | 81 | let modified = file_entry 82 | .metadata() 83 | .chain_err(|| "failed reading metadata")? 84 | .modified() 85 | .chain_err(|| "metadata modified failed")?; 86 | 87 | if let Some((last_update, _)) = self.assets_cache.get(&asset_id) { 88 | if *last_update == modified { 89 | continue; 90 | } 91 | } 92 | 93 | let metadata: AssetMeta = serde_json::from_str( 94 | &fs::read_to_string(path).chain_err(|| "failed reading file")?, 95 | ) 96 | .chain_err(|| "failed parsing file")?; 97 | 98 | self.assets_cache.insert(asset_id, (modified, metadata)); 99 | } 100 | } 101 | Ok(()) 102 | } 103 | 104 | pub fn spawn_sync(asset_db: Arc>) -> thread::JoinHandle<()> { 105 | thread::spawn(move || loop { 106 | if let Err(e) = asset_db.write().unwrap().fs_sync() { 107 | error!("registry fs_sync failed: {:?}", e); 108 | } 109 | 110 | thread::sleep(Duration::from_secs(15)); 111 | // TODO handle shutdowm 112 | }) 113 | } 114 | } 115 | 116 | #[derive(Serialize, Deserialize, Clone, Debug)] 117 | pub struct AssetMeta { 118 | #[serde(skip_serializing_if = "JsonValue::is_null")] 119 | pub contract: JsonValue, 120 | #[serde(skip_serializing_if = "JsonValue::is_null")] 121 | pub entity: JsonValue, 122 | pub precision: u8, 123 | pub name: String, 124 | #[serde(skip_serializing_if = "Option::is_none")] 125 | pub ticker: Option, 126 | } 127 | 128 | impl AssetMeta { 129 | fn domain(&self) -> Option<&str> { 130 | self.entity["domain"].as_str() 131 | } 132 | } 133 | 134 | pub struct AssetSorting(AssetSortField, AssetSortDir); 135 | 136 | pub enum AssetSortField { 137 | Name, 138 | Domain, 139 | Ticker, 140 | } 141 | pub enum AssetSortDir { 142 | Descending, 143 | Ascending, 144 | } 145 | 146 | impl AssetSorting { 147 | fn as_comparator(self) -> Box cmp::Ordering> { 148 | let sort_fn: Box cmp::Ordering> = match self.0 { 149 | AssetSortField::Name => { 150 | // Order by name first, use asset id as a tie breaker. the other sorting fields 151 | // don't require this because they're guaranteed to be unique. 152 | Box::new(|a, b| lc_cmp(&a.1.name, &b.1.name).then_with(|| a.0.cmp(b.0))) 153 | } 154 | AssetSortField::Domain => Box::new(|a, b| a.1.domain().cmp(&b.1.domain())), 155 | AssetSortField::Ticker => Box::new(|a, b| lc_cmp_opt(&a.1.ticker, &b.1.ticker)), 156 | }; 157 | 158 | match self.1 { 159 | AssetSortDir::Ascending => sort_fn, 160 | AssetSortDir::Descending => Box::new(move |a, b| sort_fn(a, b).reverse()), 161 | } 162 | } 163 | 164 | pub fn from_query_params(query: &HashMap) -> Result { 165 | let field = match query.get("sort_field").map(String::as_str) { 166 | None => AssetSortField::Ticker, 167 | Some("name") => AssetSortField::Name, 168 | Some("domain") => AssetSortField::Domain, 169 | Some("ticker") => AssetSortField::Ticker, 170 | _ => bail!("invalid sort field"), 171 | }; 172 | 173 | let dir = match query.get("sort_dir").map(String::as_str) { 174 | None => AssetSortDir::Ascending, 175 | Some("asc") => AssetSortDir::Ascending, 176 | Some("desc") => AssetSortDir::Descending, 177 | _ => bail!("invalid sort direction"), 178 | }; 179 | 180 | Ok(Self(field, dir)) 181 | } 182 | } 183 | 184 | fn lc_cmp(a: &str, b: &str) -> cmp::Ordering { 185 | a.to_lowercase().cmp(&b.to_lowercase()) 186 | } 187 | fn lc_cmp_opt(a: &Option, b: &Option) -> cmp::Ordering { 188 | a.as_ref() 189 | .map(|a| a.to_lowercase()) 190 | .cmp(&b.as_ref().map(|b| b.to_lowercase())) 191 | } 192 | -------------------------------------------------------------------------------- /src/errors.rs: -------------------------------------------------------------------------------- 1 | error_chain! { 2 | types { 3 | Error, ErrorKind, ResultExt, Result; 4 | } 5 | 6 | errors { 7 | Connection(msg: String) { 8 | description("Connection error") 9 | display("Connection error: {}", msg) 10 | } 11 | 12 | RpcError(code: i64, error: String, method: String) { 13 | description("RPC error") 14 | display("{} RPC error {}: {}", method, code, error) 15 | } 16 | 17 | Interrupt(sig: i32) { 18 | description("Interruption by external signal") 19 | display("Iterrupted by signal {}", sig) 20 | } 21 | 22 | TooPopular { 23 | description("Too many history entries") 24 | display("Too many history entries") 25 | } 26 | 27 | #[cfg(feature = "electrum-discovery")] 28 | ElectrumClient(e: electrum_client::Error) { 29 | description("Electrum client error") 30 | display("Electrum client error: {:?}", e) 31 | } 32 | 33 | } 34 | } 35 | 36 | #[cfg(feature = "electrum-discovery")] 37 | impl From for Error { 38 | fn from(e: electrum_client::Error) -> Self { 39 | Error::from(ErrorKind::ElectrumClient(e)) 40 | } 41 | } 42 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | #![recursion_limit = "1024"] 2 | 3 | #[macro_use] 4 | extern crate clap; 5 | #[macro_use] 6 | extern crate arrayref; 7 | #[macro_use] 8 | extern crate error_chain; 9 | #[macro_use] 10 | extern crate log; 11 | #[macro_use] 12 | extern crate serde_derive; 13 | #[macro_use] 14 | extern crate serde_json; 15 | #[macro_use] 16 | extern crate lazy_static; 17 | 18 | pub mod chain; 19 | pub mod config; 20 | pub mod daemon; 21 | pub mod electrum; 22 | pub mod errors; 23 | pub mod metrics; 24 | pub mod new_index; 25 | pub mod rest; 26 | pub mod signal; 27 | pub mod util; 28 | 29 | #[cfg(feature = "liquid")] 30 | pub mod elements; 31 | 32 | #[cfg(feature = "otlp-tracing")] 33 | pub mod otlp_trace; 34 | -------------------------------------------------------------------------------- /src/metrics.rs: -------------------------------------------------------------------------------- 1 | use page_size; 2 | use prometheus::{self, Encoder}; 3 | use std::fs; 4 | use std::io; 5 | use std::net::SocketAddr; 6 | use std::thread; 7 | use std::time::Duration; 8 | use sysconf; 9 | use tiny_http; 10 | 11 | pub use prometheus::{ 12 | GaugeVec, Histogram, HistogramOpts, HistogramTimer, HistogramVec, IntCounter as Counter, 13 | IntCounterVec as CounterVec, IntGauge as Gauge, Opts as MetricOpts, 14 | }; 15 | 16 | use crate::util::spawn_thread; 17 | 18 | use crate::errors::*; 19 | 20 | pub struct Metrics { 21 | reg: prometheus::Registry, 22 | addr: SocketAddr, 23 | } 24 | 25 | impl Metrics { 26 | pub fn new(addr: SocketAddr) -> Metrics { 27 | Metrics { 28 | reg: prometheus::Registry::new(), 29 | addr, 30 | } 31 | } 32 | 33 | pub fn counter(&self, opts: prometheus::Opts) -> Counter { 34 | let c = Counter::with_opts(opts).unwrap(); 35 | self.reg.register(Box::new(c.clone())).unwrap(); 36 | c 37 | } 38 | 39 | pub fn counter_vec(&self, opts: prometheus::Opts, labels: &[&str]) -> CounterVec { 40 | let c = CounterVec::new(opts, labels).unwrap(); 41 | self.reg.register(Box::new(c.clone())).unwrap(); 42 | c 43 | } 44 | 45 | pub fn gauge(&self, opts: prometheus::Opts) -> Gauge { 46 | let g = Gauge::with_opts(opts).unwrap(); 47 | self.reg.register(Box::new(g.clone())).unwrap(); 48 | g 49 | } 50 | 51 | pub fn gauge_vec(&self, opts: prometheus::Opts, labels: &[&str]) -> GaugeVec { 52 | let g = GaugeVec::new(opts, labels).unwrap(); 53 | self.reg.register(Box::new(g.clone())).unwrap(); 54 | g 55 | } 56 | 57 | pub fn histogram(&self, opts: prometheus::HistogramOpts) -> Histogram { 58 | let h = Histogram::with_opts(opts).unwrap(); 59 | self.reg.register(Box::new(h.clone())).unwrap(); 60 | h 61 | } 62 | 63 | pub fn histogram_vec(&self, opts: prometheus::HistogramOpts, labels: &[&str]) -> HistogramVec { 64 | let h = HistogramVec::new(opts, labels).unwrap(); 65 | self.reg.register(Box::new(h.clone())).unwrap(); 66 | h 67 | } 68 | 69 | pub fn start(&self) { 70 | let server = tiny_http::Server::http(self.addr) 71 | .unwrap_or_else(|_| panic!("failed to start monitoring HTTP server at {}", self.addr)); 72 | start_process_exporter(&self); 73 | let reg = self.reg.clone(); 74 | spawn_thread("metrics", move || loop { 75 | if let Err(e) = handle_request(®, server.recv()) { 76 | error!("http error: {}", e); 77 | } 78 | }); 79 | } 80 | } 81 | 82 | fn handle_request( 83 | reg: &prometheus::Registry, 84 | request: io::Result, 85 | ) -> io::Result<()> { 86 | let request = request?; 87 | let mut buffer = vec![]; 88 | prometheus::TextEncoder::new() 89 | .encode(®.gather(), &mut buffer) 90 | .unwrap(); 91 | let response = tiny_http::Response::from_data(buffer); 92 | request.respond(response) 93 | } 94 | 95 | struct Stats { 96 | utime: f64, 97 | rss: u64, 98 | fds: usize, 99 | } 100 | 101 | fn parse_stats() -> Result { 102 | if cfg!(target_os = "macos") { 103 | return Ok(Stats { 104 | utime: 0f64, 105 | rss: 0u64, 106 | fds: 0usize, 107 | }); 108 | } 109 | let value = fs::read_to_string("/proc/self/stat").chain_err(|| "failed to read stats")?; 110 | let parts: Vec<&str> = value.split_whitespace().collect(); 111 | let page_size = page_size::get() as u64; 112 | let ticks_per_second = sysconf::raw::sysconf(sysconf::raw::SysconfVariable::ScClkTck) 113 | .expect("failed to get _SC_CLK_TCK") as f64; 114 | 115 | let parse_part = |index: usize, name: &str| -> Result { 116 | Ok(parts 117 | .get(index) 118 | .chain_err(|| format!("missing {}: {:?}", name, parts))? 119 | .parse::() 120 | .chain_err(|| format!("invalid {}: {:?}", name, parts))?) 121 | }; 122 | 123 | // For details, see '/proc/[pid]/stat' section at `man 5 proc`: 124 | let utime = parse_part(13, "utime")? as f64 / ticks_per_second; 125 | let rss = parse_part(23, "rss")? * page_size; 126 | let fds = fs::read_dir("/proc/self/fd") 127 | .chain_err(|| "failed to read fd directory")? 128 | .count(); 129 | Ok(Stats { utime, rss, fds }) 130 | } 131 | 132 | fn start_process_exporter(metrics: &Metrics) { 133 | let rss = metrics.gauge(MetricOpts::new( 134 | "process_memory_rss", 135 | "Resident memory size [bytes]", 136 | )); 137 | let cpu = metrics.gauge_vec( 138 | MetricOpts::new("process_cpu_usage", "CPU usage by this process [seconds]"), 139 | &["type"], 140 | ); 141 | let fds = metrics.gauge(MetricOpts::new("process_fs_fds", "# of file descriptors")); 142 | spawn_thread("exporter", move || loop { 143 | match parse_stats() { 144 | Ok(stats) => { 145 | cpu.with_label_values(&["utime"]).set(stats.utime as f64); 146 | rss.set(stats.rss as i64); 147 | fds.set(stats.fds as i64); 148 | } 149 | Err(e) => warn!("failed to export stats: {}", e), 150 | } 151 | thread::sleep(Duration::from_secs(5)); 152 | }); 153 | } 154 | -------------------------------------------------------------------------------- /src/new_index/db.rs: -------------------------------------------------------------------------------- 1 | use rocksdb; 2 | 3 | use std::path::Path; 4 | 5 | use crate::config::Config; 6 | use crate::util::{bincode, Bytes}; 7 | 8 | static DB_VERSION: u32 = 1; 9 | 10 | #[derive(Debug, Eq, PartialEq)] 11 | pub struct DBRow { 12 | pub key: Vec, 13 | pub value: Vec, 14 | } 15 | 16 | pub struct ScanIterator<'a> { 17 | prefix: Vec, 18 | iter: rocksdb::DBIterator<'a>, 19 | done: bool, 20 | } 21 | 22 | impl<'a> Iterator for ScanIterator<'a> { 23 | type Item = DBRow; 24 | 25 | fn next(&mut self) -> Option { 26 | if self.done { 27 | return None; 28 | } 29 | let (key, value) = self.iter.next()?.expect("valid iterator"); 30 | if !key.starts_with(&self.prefix) { 31 | self.done = true; 32 | return None; 33 | } 34 | Some(DBRow { 35 | key: key.to_vec(), 36 | value: value.to_vec(), 37 | }) 38 | } 39 | } 40 | 41 | pub struct ReverseScanIterator<'a> { 42 | prefix: Vec, 43 | iter: rocksdb::DBRawIterator<'a>, 44 | done: bool, 45 | } 46 | 47 | impl<'a> Iterator for ReverseScanIterator<'a> { 48 | type Item = DBRow; 49 | 50 | fn next(&mut self) -> Option { 51 | if self.done || !self.iter.valid() { 52 | return None; 53 | } 54 | 55 | let key = self.iter.key().unwrap(); 56 | if !key.starts_with(&self.prefix) { 57 | self.done = true; 58 | return None; 59 | } 60 | 61 | let row = DBRow { 62 | key: key.into(), 63 | value: self.iter.value().unwrap().into(), 64 | }; 65 | 66 | self.iter.prev(); 67 | 68 | Some(row) 69 | } 70 | } 71 | 72 | #[derive(Debug)] 73 | pub struct DB { 74 | db: rocksdb::DB, 75 | } 76 | 77 | #[derive(Copy, Clone, Debug)] 78 | pub enum DBFlush { 79 | Disable, 80 | Enable, 81 | } 82 | 83 | impl DB { 84 | pub fn open(path: &Path, config: &Config) -> DB { 85 | debug!("opening DB at {:?}", path); 86 | let mut db_opts = rocksdb::Options::default(); 87 | db_opts.create_if_missing(true); 88 | db_opts.set_max_open_files(100_000); // TODO: make sure to `ulimit -n` this process correctly 89 | db_opts.set_compaction_style(rocksdb::DBCompactionStyle::Level); 90 | db_opts.set_compression_type(rocksdb::DBCompressionType::Snappy); 91 | db_opts.set_target_file_size_base(1_073_741_824); 92 | db_opts.set_write_buffer_size(256 << 20); 93 | db_opts.set_disable_auto_compactions(!config.initial_sync_compaction); // for initial bulk load 94 | 95 | // db_opts.set_advise_random_on_open(???); 96 | db_opts.set_compaction_readahead_size(1 << 20); 97 | db_opts.increase_parallelism(2); 98 | 99 | // let mut block_opts = rocksdb::BlockBasedOptions::default(); 100 | // block_opts.set_block_size(???); 101 | 102 | let db = DB { 103 | db: rocksdb::DB::open(&db_opts, path).expect("failed to open RocksDB"), 104 | }; 105 | db.verify_compatibility(config); 106 | db 107 | } 108 | 109 | pub fn full_compaction(&self) { 110 | // TODO: make sure this doesn't fail silently 111 | debug!("starting full compaction on {:?}", self.db); 112 | self.db.compact_range(None::<&[u8]>, None::<&[u8]>); 113 | debug!("finished full compaction on {:?}", self.db); 114 | } 115 | 116 | pub fn enable_auto_compaction(&self) { 117 | let opts = [("disable_auto_compactions", "false")]; 118 | self.db.set_options(&opts).unwrap(); 119 | } 120 | 121 | pub fn raw_iterator(&self) -> rocksdb::DBRawIterator { 122 | self.db.raw_iterator() 123 | } 124 | 125 | pub fn iter_scan(&self, prefix: &[u8]) -> ScanIterator { 126 | ScanIterator { 127 | prefix: prefix.to_vec(), 128 | iter: self.db.prefix_iterator(prefix), 129 | done: false, 130 | } 131 | } 132 | 133 | pub fn iter_scan_from(&self, prefix: &[u8], start_at: &[u8]) -> ScanIterator { 134 | let iter = self.db.iterator(rocksdb::IteratorMode::From( 135 | start_at, 136 | rocksdb::Direction::Forward, 137 | )); 138 | ScanIterator { 139 | prefix: prefix.to_vec(), 140 | iter, 141 | done: false, 142 | } 143 | } 144 | 145 | pub fn iter_scan_reverse(&self, prefix: &[u8], prefix_max: &[u8]) -> ReverseScanIterator { 146 | let mut iter = self.db.raw_iterator(); 147 | iter.seek_for_prev(prefix_max); 148 | 149 | ReverseScanIterator { 150 | prefix: prefix.to_vec(), 151 | iter, 152 | done: false, 153 | } 154 | } 155 | 156 | pub fn write(&self, mut rows: Vec, flush: DBFlush) { 157 | log::trace!( 158 | "writing {} rows to {:?}, flush={:?}", 159 | rows.len(), 160 | self.db, 161 | flush 162 | ); 163 | rows.sort_unstable_by(|a, b| a.key.cmp(&b.key)); 164 | let mut batch = rocksdb::WriteBatch::default(); 165 | for row in rows { 166 | batch.put(&row.key, &row.value); 167 | } 168 | let do_flush = match flush { 169 | DBFlush::Enable => true, 170 | DBFlush::Disable => false, 171 | }; 172 | let mut opts = rocksdb::WriteOptions::new(); 173 | opts.set_sync(do_flush); 174 | opts.disable_wal(!do_flush); 175 | self.db.write_opt(batch, &opts).unwrap(); 176 | } 177 | 178 | pub fn flush(&self) { 179 | self.db.flush().unwrap(); 180 | } 181 | 182 | pub fn put(&self, key: &[u8], value: &[u8]) { 183 | self.db.put(key, value).unwrap(); 184 | } 185 | 186 | pub fn put_sync(&self, key: &[u8], value: &[u8]) { 187 | let mut opts = rocksdb::WriteOptions::new(); 188 | opts.set_sync(true); 189 | self.db.put_opt(key, value, &opts).unwrap(); 190 | } 191 | 192 | pub fn get(&self, key: &[u8]) -> Option { 193 | self.db.get(key).unwrap().map(|v| v.to_vec()) 194 | } 195 | 196 | pub fn multi_get(&self, keys: I) -> Vec>, rocksdb::Error>> 197 | where 198 | K: AsRef<[u8]>, 199 | I: IntoIterator, 200 | { 201 | self.db.multi_get(keys) 202 | } 203 | 204 | fn verify_compatibility(&self, config: &Config) { 205 | let mut compatibility_bytes = bincode::serialize_little(&DB_VERSION).unwrap(); 206 | 207 | if config.light_mode { 208 | // append a byte to indicate light_mode is enabled. 209 | // we're not letting bincode serialize this so that the compatiblity bytes won't change 210 | // (and require a reindex) when light_mode is disabled. this should be chagned the next 211 | // time we bump DB_VERSION and require a re-index anyway. 212 | compatibility_bytes.push(1); 213 | } 214 | 215 | match self.get(b"V") { 216 | None => self.put(b"V", &compatibility_bytes), 217 | Some(ref x) if x != &compatibility_bytes => { 218 | panic!("Incompatible database found. Please reindex.") 219 | } 220 | Some(_) => (), 221 | } 222 | } 223 | } 224 | -------------------------------------------------------------------------------- /src/new_index/fetch.rs: -------------------------------------------------------------------------------- 1 | use rayon::prelude::*; 2 | 3 | #[cfg(feature = "liquid")] 4 | use crate::elements::ebcompact::*; 5 | #[cfg(not(feature = "liquid"))] 6 | use bitcoin::consensus::encode::{deserialize, Decodable}; 7 | #[cfg(feature = "liquid")] 8 | use elements::encode::{deserialize, Decodable}; 9 | 10 | use std::collections::HashMap; 11 | use std::fs; 12 | use std::io::Cursor; 13 | use std::path::PathBuf; 14 | use std::sync::mpsc::Receiver; 15 | use std::thread; 16 | 17 | use electrs_macros::trace; 18 | 19 | use crate::chain::{Block, BlockHash}; 20 | use crate::daemon::Daemon; 21 | use crate::errors::*; 22 | use crate::util::{spawn_thread, HeaderEntry, SyncChannel}; 23 | 24 | #[derive(Clone, Copy, Debug)] 25 | pub enum FetchFrom { 26 | Bitcoind, 27 | BlkFiles, 28 | } 29 | 30 | #[trace] 31 | pub fn start_fetcher( 32 | from: FetchFrom, 33 | daemon: &Daemon, 34 | new_headers: Vec, 35 | ) -> Result>> { 36 | let fetcher = match from { 37 | FetchFrom::Bitcoind => bitcoind_fetcher, 38 | FetchFrom::BlkFiles => blkfiles_fetcher, 39 | }; 40 | fetcher(daemon, new_headers) 41 | } 42 | 43 | #[derive(Clone)] 44 | pub struct BlockEntry { 45 | pub block: Block, 46 | pub entry: HeaderEntry, 47 | pub size: u32, 48 | } 49 | 50 | type SizedBlock = (Block, u32); 51 | 52 | pub struct Fetcher { 53 | receiver: Receiver, 54 | thread: thread::JoinHandle<()>, 55 | } 56 | 57 | impl Fetcher { 58 | fn from(receiver: Receiver, thread: thread::JoinHandle<()>) -> Self { 59 | Fetcher { receiver, thread } 60 | } 61 | 62 | pub fn map(self, mut func: F) 63 | where 64 | F: FnMut(T) -> (), 65 | { 66 | for item in self.receiver { 67 | func(item); 68 | } 69 | self.thread.join().expect("fetcher thread panicked") 70 | } 71 | } 72 | 73 | #[trace] 74 | fn bitcoind_fetcher( 75 | daemon: &Daemon, 76 | new_headers: Vec, 77 | ) -> Result>> { 78 | if let Some(tip) = new_headers.last() { 79 | debug!("{:?} ({} left to index)", tip, new_headers.len()); 80 | }; 81 | let daemon = daemon.reconnect()?; 82 | let chan = SyncChannel::new(1); 83 | let sender = chan.sender(); 84 | Ok(Fetcher::from( 85 | chan.into_receiver(), 86 | spawn_thread("bitcoind_fetcher", move || { 87 | for entries in new_headers.chunks(100) { 88 | let blockhashes: Vec = entries.iter().map(|e| *e.hash()).collect(); 89 | let blocks = daemon 90 | .getblocks(&blockhashes) 91 | .expect("failed to get blocks from bitcoind"); 92 | assert_eq!(blocks.len(), entries.len()); 93 | let block_entries: Vec = blocks 94 | .into_iter() 95 | .zip(entries) 96 | .map(|(block, entry)| BlockEntry { 97 | entry: entry.clone(), // TODO: remove this clone() 98 | size: block.total_size() as u32, 99 | block, 100 | }) 101 | .collect(); 102 | assert_eq!(block_entries.len(), entries.len()); 103 | sender 104 | .send(block_entries) 105 | .expect("failed to send fetched blocks"); 106 | log::debug!("last fetch {:?}", entries.last()); 107 | } 108 | }), 109 | )) 110 | } 111 | 112 | #[trace] 113 | fn blkfiles_fetcher( 114 | daemon: &Daemon, 115 | new_headers: Vec, 116 | ) -> Result>> { 117 | let magic = daemon.magic(); 118 | let blk_files = daemon.list_blk_files()?; 119 | let xor_key = daemon.read_blk_file_xor_key()?; 120 | 121 | let chan = SyncChannel::new(1); 122 | let sender = chan.sender(); 123 | 124 | let mut entry_map: HashMap = 125 | new_headers.into_iter().map(|h| (*h.hash(), h)).collect(); 126 | 127 | let parser = blkfiles_parser(blkfiles_reader(blk_files, xor_key), magic); 128 | Ok(Fetcher::from( 129 | chan.into_receiver(), 130 | spawn_thread("blkfiles_fetcher", move || { 131 | parser.map(|sizedblocks| { 132 | let block_entries: Vec = sizedblocks 133 | .into_iter() 134 | .filter_map(|(block, size)| { 135 | let blockhash = block.block_hash(); 136 | entry_map 137 | .remove(&blockhash) 138 | .map(|entry| BlockEntry { block, entry, size }) 139 | .or_else(|| { 140 | trace!("skipping block {}", blockhash); 141 | None 142 | }) 143 | }) 144 | .collect(); 145 | trace!("fetched {} blocks", block_entries.len()); 146 | sender 147 | .send(block_entries) 148 | .expect("failed to send blocks entries from blk*.dat files"); 149 | }); 150 | if !entry_map.is_empty() { 151 | panic!( 152 | "failed to index {} blocks from blk*.dat files", 153 | entry_map.len() 154 | ) 155 | } 156 | }), 157 | )) 158 | } 159 | 160 | #[trace] 161 | fn blkfiles_reader(blk_files: Vec, xor_key: Option<[u8; 8]>) -> Fetcher> { 162 | let chan = SyncChannel::new(1); 163 | let sender = chan.sender(); 164 | 165 | Fetcher::from( 166 | chan.into_receiver(), 167 | spawn_thread("blkfiles_reader", move || { 168 | for path in blk_files { 169 | trace!("reading {:?}", path); 170 | let mut blob = fs::read(&path) 171 | .unwrap_or_else(|e| panic!("failed to read {:?}: {:?}", path, e)); 172 | if let Some(xor_key) = xor_key { 173 | blkfile_apply_xor_key(xor_key, &mut blob); 174 | } 175 | sender 176 | .send(blob) 177 | .unwrap_or_else(|_| panic!("failed to send {:?} contents", path)); 178 | } 179 | }), 180 | ) 181 | } 182 | 183 | /// By default, bitcoind v28.0+ applies an 8-byte "xor key" over each "blk*.dat" 184 | /// file. We have xor again to undo this transformation. 185 | fn blkfile_apply_xor_key(xor_key: [u8; 8], blob: &mut [u8]) { 186 | for (i, blob_i) in blob.iter_mut().enumerate() { 187 | *blob_i ^= xor_key[i & 0x7]; 188 | } 189 | } 190 | 191 | #[trace] 192 | fn blkfiles_parser(blobs: Fetcher>, magic: u32) -> Fetcher> { 193 | let chan = SyncChannel::new(1); 194 | let sender = chan.sender(); 195 | 196 | Fetcher::from( 197 | chan.into_receiver(), 198 | spawn_thread("blkfiles_parser", move || { 199 | blobs.map(|blob| { 200 | trace!("parsing {} bytes", blob.len()); 201 | let blocks = parse_blocks(blob, magic).expect("failed to parse blk*.dat file"); 202 | sender 203 | .send(blocks) 204 | .expect("failed to send blocks from blk*.dat file"); 205 | }); 206 | }), 207 | ) 208 | } 209 | 210 | #[trace] 211 | fn parse_blocks(blob: Vec, magic: u32) -> Result> { 212 | let mut cursor = Cursor::new(&blob); 213 | let mut slices = vec![]; 214 | let max_pos = blob.len() as u64; 215 | 216 | while cursor.position() < max_pos { 217 | let offset = cursor.position(); 218 | match u32::consensus_decode(&mut cursor) { 219 | Ok(value) => { 220 | if magic != value { 221 | cursor.set_position(offset + 1); 222 | continue; 223 | } 224 | } 225 | Err(_) => break, // EOF 226 | }; 227 | let block_size = u32::consensus_decode(&mut cursor).chain_err(|| "no block size")?; 228 | let start = cursor.position(); 229 | let end = start + block_size as u64; 230 | 231 | // If Core's WriteBlockToDisk ftell fails, only the magic bytes and size will be written 232 | // and the block body won't be written to the blk*.dat file. 233 | // Since the first 4 bytes should contain the block's version, we can skip such blocks 234 | // by peeking the cursor (and skipping previous `magic` and `block_size`). 235 | match u32::consensus_decode(&mut cursor) { 236 | Ok(value) => { 237 | if magic == value { 238 | cursor.set_position(start); 239 | continue; 240 | } 241 | } 242 | Err(_) => break, // EOF 243 | } 244 | slices.push((&blob[start as usize..end as usize], block_size)); 245 | cursor.set_position(end as u64); 246 | } 247 | 248 | let pool = rayon::ThreadPoolBuilder::new() 249 | .num_threads(0) // CPU-bound 250 | .thread_name(|i| format!("parse-blocks-{}", i)) 251 | .build() 252 | .unwrap(); 253 | Ok(pool.install(|| { 254 | slices 255 | .into_par_iter() 256 | .map(|(slice, size)| (deserialize(slice).expect("failed to parse Block"), size)) 257 | .collect() 258 | })) 259 | } 260 | -------------------------------------------------------------------------------- /src/new_index/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod db; 2 | mod fetch; 3 | mod mempool; 4 | pub mod precache; 5 | mod query; 6 | pub mod schema; 7 | pub mod zmq; 8 | 9 | pub use self::db::{DBRow, DB}; 10 | pub use self::fetch::{BlockEntry, FetchFrom}; 11 | pub use self::mempool::Mempool; 12 | pub use self::query::Query; 13 | pub use self::schema::{ 14 | compute_script_hash, parse_hash, ChainQuery, FundingInfo, GetAmountVal, Indexer, ScriptStats, 15 | SpendingInfo, SpendingInput, Store, TxHistoryInfo, TxHistoryKey, TxHistoryRow, Utxo, 16 | }; 17 | -------------------------------------------------------------------------------- /src/new_index/precache.rs: -------------------------------------------------------------------------------- 1 | use crate::chain::address::Address; 2 | use crate::errors::*; 3 | use crate::new_index::ChainQuery; 4 | use crate::util::FullHash; 5 | 6 | use crypto::digest::Digest; 7 | use crypto::sha2::Sha256; 8 | use rayon::prelude::*; 9 | 10 | use bitcoin::hex::FromHex; 11 | use std::fs::File; 12 | use std::io; 13 | use std::io::prelude::*; 14 | use std::str::FromStr; 15 | 16 | use electrs_macros::trace; 17 | 18 | #[trace] 19 | pub fn precache(chain: &ChainQuery, scripthashes: Vec) { 20 | let total = scripthashes.len(); 21 | info!("Pre-caching stats and utxo set for {} scripthashes", total); 22 | 23 | let pool = rayon::ThreadPoolBuilder::new() 24 | .num_threads(16) 25 | .thread_name(|i| format!("precache-{}", i)) 26 | .build() 27 | .unwrap(); 28 | pool.install(|| { 29 | scripthashes 30 | .par_iter() 31 | .enumerate() 32 | .for_each(|(i, scripthash)| { 33 | if i % 5 == 0 { 34 | info!("running pre-cache for scripthash {}/{}", i + 1, total); 35 | } 36 | chain.stats(&scripthash[..]); 37 | //chain.utxo(&scripthash[..]); 38 | }) 39 | }); 40 | } 41 | 42 | #[trace] 43 | pub fn scripthashes_from_file(path: String) -> Result> { 44 | let reader = 45 | io::BufReader::new(File::open(path).chain_err(|| "cannot open precache scripthash file")?); 46 | reader 47 | .lines() 48 | .map(|line| { 49 | let line = line.chain_err(|| "cannot read scripthash line")?; 50 | let cols: Vec<&str> = line.split(',').collect(); 51 | to_scripthash(cols[0], cols[1]) 52 | }) 53 | .collect() 54 | } 55 | 56 | fn to_scripthash(script_type: &str, script_str: &str) -> Result { 57 | match script_type { 58 | "address" => address_to_scripthash(script_str), 59 | "scripthash" => Ok(FullHash::from_hex(script_str).chain_err(|| "invalid hex")?), 60 | "scriptpubkey" => Ok(compute_script_hash( 61 | &Vec::from_hex(script_str).chain_err(|| "invalid hex")?, 62 | )), 63 | _ => bail!("Invalid script type".to_string()), 64 | } 65 | } 66 | 67 | fn address_to_scripthash(addr: &str) -> Result { 68 | let addr = Address::from_str(addr).chain_err(|| "invalid address")?; 69 | 70 | #[cfg(not(feature = "liquid"))] 71 | let addr = addr.assume_checked(); 72 | 73 | Ok(compute_script_hash(&addr.script_pubkey().as_bytes())) 74 | } 75 | 76 | pub fn compute_script_hash(data: &[u8]) -> FullHash { 77 | let mut hash = FullHash::default(); 78 | let mut sha2 = Sha256::new(); 79 | sha2.input(data); 80 | sha2.result(&mut hash); 81 | hash 82 | } 83 | -------------------------------------------------------------------------------- /src/new_index/query.rs: -------------------------------------------------------------------------------- 1 | use rayon::prelude::*; 2 | 3 | use std::collections::{BTreeSet, HashMap}; 4 | use std::sync::{Arc, RwLock, RwLockReadGuard}; 5 | use std::time::{Duration, Instant}; 6 | 7 | use crate::chain::{Network, OutPoint, Transaction, TxOut, Txid}; 8 | use crate::config::Config; 9 | use crate::daemon::Daemon; 10 | use crate::errors::*; 11 | use crate::new_index::{ChainQuery, Mempool, ScriptStats, SpendingInput, Utxo}; 12 | use crate::util::{is_spendable, BlockId, Bytes, TransactionStatus}; 13 | 14 | use electrs_macros::trace; 15 | 16 | #[cfg(feature = "liquid")] 17 | use crate::{ 18 | chain::AssetId, 19 | elements::{ebcompact::TxidCompat, lookup_asset, AssetRegistry, AssetSorting, LiquidAsset}, 20 | }; 21 | 22 | const FEE_ESTIMATES_TTL: u64 = 60; // seconds 23 | 24 | const CONF_TARGETS: [u16; 28] = [ 25 | 1u16, 2u16, 3u16, 4u16, 5u16, 6u16, 7u16, 8u16, 9u16, 10u16, 11u16, 12u16, 13u16, 14u16, 15u16, 26 | 16u16, 17u16, 18u16, 19u16, 20u16, 21u16, 22u16, 23u16, 24u16, 25u16, 144u16, 504u16, 1008u16, 27 | ]; 28 | 29 | pub struct Query { 30 | chain: Arc, // TODO: should be used as read-only 31 | mempool: Arc>, 32 | daemon: Arc, 33 | config: Arc, 34 | cached_estimates: RwLock<(HashMap, Option)>, 35 | cached_relayfee: RwLock>, 36 | #[cfg(feature = "liquid")] 37 | asset_db: Option>>, 38 | } 39 | 40 | impl Query { 41 | #[cfg(not(feature = "liquid"))] 42 | pub fn new( 43 | chain: Arc, 44 | mempool: Arc>, 45 | daemon: Arc, 46 | config: Arc, 47 | ) -> Self { 48 | Query { 49 | chain, 50 | mempool, 51 | daemon, 52 | config, 53 | cached_estimates: RwLock::new((HashMap::new(), None)), 54 | cached_relayfee: RwLock::new(None), 55 | } 56 | } 57 | 58 | pub fn chain(&self) -> &ChainQuery { 59 | &self.chain 60 | } 61 | 62 | pub fn config(&self) -> &Config { 63 | &self.config 64 | } 65 | 66 | pub fn network(&self) -> Network { 67 | self.config.network_type 68 | } 69 | 70 | pub fn mempool(&self) -> RwLockReadGuard { 71 | self.mempool.read().unwrap() 72 | } 73 | 74 | #[trace] 75 | pub fn broadcast_raw(&self, txhex: &str) -> Result { 76 | let txid = self.daemon.broadcast_raw(txhex)?; 77 | let _ = self 78 | .mempool 79 | .write() 80 | .unwrap() 81 | .add_by_txid(&self.daemon, txid); 82 | Ok(txid) 83 | } 84 | 85 | #[trace] 86 | pub fn utxo(&self, scripthash: &[u8]) -> Result> { 87 | let mut utxos = self.chain.utxo(scripthash, self.config.utxos_limit)?; 88 | let mempool = self.mempool(); 89 | utxos.retain(|utxo| !mempool.has_spend(&OutPoint::from(utxo))); 90 | utxos.extend(mempool.utxo(scripthash)); 91 | Ok(utxos) 92 | } 93 | 94 | #[trace] 95 | pub fn history_txids(&self, scripthash: &[u8], limit: usize) -> Vec<(Txid, Option)> { 96 | let confirmed_txids = self.chain.history_txids(scripthash, limit); 97 | let confirmed_len = confirmed_txids.len(); 98 | let confirmed_txids = confirmed_txids.into_iter().map(|(tx, b)| (tx, Some(b))); 99 | 100 | let mempool_txids = self 101 | .mempool() 102 | .history_txids(scripthash, limit - confirmed_len) 103 | .into_iter() 104 | .map(|tx| (tx, None)); 105 | 106 | confirmed_txids.chain(mempool_txids).collect() 107 | } 108 | 109 | pub fn stats(&self, scripthash: &[u8]) -> (ScriptStats, ScriptStats) { 110 | ( 111 | self.chain.stats(scripthash), 112 | self.mempool().stats(scripthash), 113 | ) 114 | } 115 | 116 | #[trace] 117 | pub fn lookup_txn(&self, txid: &Txid) -> Option { 118 | self.chain 119 | .lookup_txn(txid, None) 120 | .or_else(|| self.mempool().lookup_txn(txid)) 121 | } 122 | 123 | #[trace] 124 | pub fn lookup_raw_txn(&self, txid: &Txid) -> Option { 125 | self.chain 126 | .lookup_raw_txn(txid, None) 127 | .or_else(|| self.mempool().lookup_raw_txn(txid)) 128 | } 129 | 130 | #[trace] 131 | pub fn lookup_txos(&self, outpoints: BTreeSet) -> HashMap { 132 | // the mempool lookup_txos() internally looks up confirmed txos as well 133 | self.mempool() 134 | .lookup_txos(outpoints) 135 | .expect("failed loading txos") 136 | } 137 | 138 | #[trace] 139 | pub fn lookup_spend(&self, outpoint: &OutPoint) -> Option { 140 | self.chain 141 | .lookup_spend(outpoint) 142 | .or_else(|| self.mempool().lookup_spend(outpoint)) 143 | } 144 | 145 | #[trace] 146 | pub fn lookup_tx_spends(&self, tx: Transaction) -> Vec> { 147 | let txid = tx.compute_txid(); 148 | 149 | tx.output 150 | .par_iter() 151 | .enumerate() 152 | .map(|(vout, txout)| { 153 | if is_spendable(txout) { 154 | self.lookup_spend(&OutPoint { 155 | txid, 156 | vout: vout as u32, 157 | }) 158 | } else { 159 | None 160 | } 161 | }) 162 | .collect() 163 | } 164 | 165 | #[trace] 166 | pub fn get_tx_status(&self, txid: &Txid) -> TransactionStatus { 167 | TransactionStatus::from(self.chain.tx_confirming_block(txid)) 168 | } 169 | 170 | #[trace] 171 | pub fn get_mempool_tx_fee(&self, txid: &Txid) -> Option { 172 | self.mempool().get_tx_fee(txid) 173 | } 174 | 175 | #[trace] 176 | pub fn has_unconfirmed_parents(&self, txid: &Txid) -> bool { 177 | self.mempool().has_unconfirmed_parents(txid) 178 | } 179 | 180 | #[trace] 181 | pub fn estimate_fee(&self, conf_target: u16) -> Option { 182 | if self.config.network_type.is_regtest() { 183 | return self.get_relayfee().ok(); 184 | } 185 | if let (ref cache, Some(cache_time)) = *self.cached_estimates.read().unwrap() { 186 | if cache_time.elapsed() < Duration::from_secs(FEE_ESTIMATES_TTL) { 187 | return cache.get(&conf_target).copied(); 188 | } 189 | } 190 | 191 | self.update_fee_estimates(); 192 | self.cached_estimates 193 | .read() 194 | .unwrap() 195 | .0 196 | .get(&conf_target) 197 | .copied() 198 | } 199 | 200 | #[trace] 201 | pub fn estimate_fee_map(&self) -> HashMap { 202 | if let (ref cache, Some(cache_time)) = *self.cached_estimates.read().unwrap() { 203 | if cache_time.elapsed() < Duration::from_secs(FEE_ESTIMATES_TTL) { 204 | return cache.clone(); 205 | } 206 | } 207 | 208 | self.update_fee_estimates(); 209 | self.cached_estimates.read().unwrap().0.clone() 210 | } 211 | 212 | #[trace] 213 | fn update_fee_estimates(&self) { 214 | match self.daemon.estimatesmartfee_batch(&CONF_TARGETS) { 215 | Ok(estimates) => { 216 | *self.cached_estimates.write().unwrap() = (estimates, Some(Instant::now())); 217 | } 218 | Err(err) => { 219 | warn!("failed estimating feerates: {:?}", err); 220 | } 221 | } 222 | } 223 | 224 | #[trace] 225 | pub fn get_relayfee(&self) -> Result { 226 | if let Some(cached) = *self.cached_relayfee.read().unwrap() { 227 | return Ok(cached); 228 | } 229 | 230 | let relayfee = self.daemon.get_relayfee()?; 231 | self.cached_relayfee.write().unwrap().replace(relayfee); 232 | Ok(relayfee) 233 | } 234 | 235 | #[cfg(feature = "liquid")] 236 | pub fn new( 237 | chain: Arc, 238 | mempool: Arc>, 239 | daemon: Arc, 240 | config: Arc, 241 | asset_db: Option>>, 242 | ) -> Self { 243 | Query { 244 | chain, 245 | mempool, 246 | daemon, 247 | config, 248 | asset_db, 249 | cached_estimates: RwLock::new((HashMap::new(), None)), 250 | cached_relayfee: RwLock::new(None), 251 | } 252 | } 253 | 254 | #[cfg(feature = "liquid")] 255 | #[trace] 256 | pub fn lookup_asset(&self, asset_id: &AssetId) -> Result> { 257 | lookup_asset(&self, self.asset_db.as_ref(), asset_id, None) 258 | } 259 | 260 | #[cfg(feature = "liquid")] 261 | #[trace] 262 | pub fn list_registry_assets( 263 | &self, 264 | start_index: usize, 265 | limit: usize, 266 | sorting: AssetSorting, 267 | ) -> Result<(usize, Vec)> { 268 | let asset_db = match &self.asset_db { 269 | None => return Ok((0, vec![])), 270 | Some(db) => db.read().unwrap(), 271 | }; 272 | let (total_num, results) = asset_db.list(start_index, limit, sorting); 273 | // Attach on-chain information alongside the registry metadata 274 | let results = results 275 | .into_iter() 276 | .map(|(asset_id, metadata)| { 277 | Ok(lookup_asset(&self, None, asset_id, Some(metadata))? 278 | .chain_err(|| "missing registered asset")?) 279 | }) 280 | .collect::>>()?; 281 | Ok((total_num, results)) 282 | } 283 | } 284 | -------------------------------------------------------------------------------- /src/new_index/zmq.rs: -------------------------------------------------------------------------------- 1 | use bitcoin::{hashes::Hash, BlockHash}; 2 | use crossbeam_channel::Sender; 3 | 4 | use crate::util::spawn_thread; 5 | 6 | pub fn start(url: &str, block_hash_notify: Sender) { 7 | log::debug!("Starting ZMQ thread"); 8 | let ctx = zmq::Context::new(); 9 | let subscriber: zmq::Socket = ctx.socket(zmq::SUB).expect("failed creating subscriber"); 10 | subscriber 11 | .connect(url) 12 | .expect("failed connecting subscriber"); 13 | 14 | // subscriber.set_subscribe(b"rawtx").unwrap(); 15 | subscriber 16 | .set_subscribe(b"hashblock") 17 | .expect("failed subscribing to hashblock"); 18 | 19 | spawn_thread("zmq", move || loop { 20 | match subscriber.recv_multipart(0) { 21 | Ok(data) => match (data.get(0), data.get(1)) { 22 | (Some(topic), Some(data)) => { 23 | if &topic[..] == &[114, 97, 119, 116, 120] { 24 | //rawtx 25 | } else if &topic[..] == &[104, 97, 115, 104, 98, 108, 111, 99, 107] { 26 | //hashblock 27 | let mut reversed = data.to_vec(); 28 | reversed.reverse(); 29 | if let Ok(block_hash) = BlockHash::from_slice(&reversed[..]) { 30 | log::debug!("New block from ZMQ: {block_hash}"); 31 | let _ = block_hash_notify.send(block_hash); 32 | } 33 | } 34 | } 35 | _ => (), 36 | }, 37 | Err(e) => log::warn!("recv_multipart error: {e:?}"), 38 | } 39 | }); 40 | } 41 | -------------------------------------------------------------------------------- /src/otlp_trace.rs: -------------------------------------------------------------------------------- 1 | use opentelemetry::{ 2 | runtime, 3 | sdk::{ 4 | trace::{BatchConfig, RandomIdGenerator, Sampler, Tracer}, 5 | Resource, 6 | }, 7 | KeyValue, 8 | }; 9 | use opentelemetry_otlp::{ExportConfig, Protocol, WithExportConfig}; 10 | use opentelemetry_semantic_conventions::{ 11 | resource::{SERVICE_NAME, SERVICE_VERSION}, 12 | SCHEMA_URL, 13 | }; 14 | use std::env::var; 15 | use std::time::Duration; 16 | use tracing_opentelemetry::OpenTelemetryLayer; 17 | use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt, EnvFilter}; 18 | 19 | fn init_tracer(resource: Resource, endpoint: &str) -> Tracer { 20 | let export_config = ExportConfig { 21 | endpoint: endpoint.to_string(), 22 | timeout: Duration::from_secs(3), 23 | protocol: Protocol::Grpc, 24 | }; 25 | 26 | opentelemetry_otlp::new_pipeline() 27 | .tracing() 28 | .with_trace_config( 29 | opentelemetry::sdk::trace::Config::default() 30 | .with_sampler(Sampler::ParentBased(Box::new(Sampler::TraceIdRatioBased( 31 | 1.0, 32 | )))) 33 | .with_id_generator(RandomIdGenerator::default()) 34 | .with_resource(resource), 35 | ) 36 | .with_batch_config(BatchConfig::default()) 37 | .with_exporter( 38 | opentelemetry_otlp::new_exporter() 39 | .tonic() 40 | .with_endpoint(endpoint) 41 | .with_export_config(export_config), 42 | ) 43 | .install_batch(runtime::Tokio) 44 | .unwrap() 45 | } 46 | 47 | fn init_tracing_subscriber(service_name: &str) -> OtelGuard { 48 | let resource = Resource::from_schema_url( 49 | [ 50 | KeyValue::new(SERVICE_NAME, service_name.to_owned()), 51 | KeyValue::new(SERVICE_VERSION, "0.4.1"), 52 | ], 53 | SCHEMA_URL, 54 | ); 55 | 56 | let env_filter = EnvFilter::from_default_env(); 57 | 58 | let reg = tracing_subscriber::registry().with(env_filter).with( 59 | tracing_subscriber::fmt::layer() 60 | .with_thread_ids(true) 61 | .with_ansi(false) 62 | .compact(), 63 | ); 64 | let _ = if let Ok(endpoint) = var("OTLP_ENDPOINT") { 65 | reg.with(OpenTelemetryLayer::new(init_tracer(resource, &endpoint))) 66 | .try_init() 67 | } else { 68 | reg.try_init() 69 | }; 70 | 71 | log::debug!("Initialized tracing"); 72 | 73 | OtelGuard {} 74 | } 75 | 76 | pub fn init_tracing(service_name: &str) -> OtelGuard { 77 | init_tracing_subscriber(service_name) 78 | } 79 | 80 | pub struct OtelGuard {} 81 | impl Drop for OtelGuard { 82 | fn drop(&mut self) { 83 | opentelemetry::global::shutdown_tracer_provider(); 84 | } 85 | } 86 | -------------------------------------------------------------------------------- /src/signal.rs: -------------------------------------------------------------------------------- 1 | use bitcoin::BlockHash; 2 | use crossbeam_channel::{self as channel, after, select}; 3 | use std::thread; 4 | use std::time::{Duration, Instant}; 5 | 6 | use signal_hook::consts::{SIGINT, SIGTERM, SIGUSR1}; 7 | 8 | use crate::errors::*; 9 | 10 | #[derive(Clone)] // so multiple threads could wait on signals 11 | pub struct Waiter { 12 | receiver: channel::Receiver, 13 | zmq_receiver: channel::Receiver, 14 | } 15 | 16 | fn notify(signals: &[i32]) -> channel::Receiver { 17 | let (s, r) = channel::bounded(1); 18 | let mut signals = 19 | signal_hook::iterator::Signals::new(signals).expect("failed to register signal hook"); 20 | thread::spawn(move || { 21 | for signal in signals.forever() { 22 | s.send(signal) 23 | .unwrap_or_else(|_| panic!("failed to send signal {}", signal)); 24 | } 25 | }); 26 | r 27 | } 28 | 29 | impl Waiter { 30 | pub fn start(block_hash_receive: channel::Receiver) -> Waiter { 31 | Waiter { 32 | receiver: notify(&[ 33 | SIGINT, SIGTERM, 34 | SIGUSR1, // allow external triggering (e.g. via bitcoind `blocknotify`) 35 | ]), 36 | zmq_receiver: block_hash_receive, 37 | } 38 | } 39 | 40 | pub fn wait(&self, duration: Duration, accept_block_notification: bool) -> Result<()> { 41 | let start = Instant::now(); 42 | select! { 43 | recv(self.receiver) -> msg => { 44 | match msg { 45 | Ok(sig) if sig == SIGUSR1 => { 46 | trace!("notified via SIGUSR1"); 47 | if accept_block_notification { 48 | Ok(()) 49 | } else { 50 | let wait_more = duration.saturating_sub(start.elapsed()); 51 | self.wait(wait_more, accept_block_notification) 52 | } 53 | } 54 | Ok(sig) => bail!(ErrorKind::Interrupt(sig)), 55 | Err(_) => bail!("signal hook channel disconnected"), 56 | } 57 | }, 58 | recv(self.zmq_receiver) -> msg => { 59 | match msg { 60 | Ok(_) => { 61 | if accept_block_notification { 62 | Ok(()) 63 | } else { 64 | let wait_more = duration.saturating_sub(start.elapsed()); 65 | self.wait(wait_more, accept_block_notification) 66 | } 67 | } 68 | Err(_) => bail!("signal hook channel disconnected"), 69 | } 70 | }, 71 | recv(after(duration)) -> _ => Ok(()), 72 | 73 | } 74 | } 75 | } 76 | -------------------------------------------------------------------------------- /src/util/bincode.rs: -------------------------------------------------------------------------------- 1 | //! This module creates two sets of serialize and deserialize for bincode. 2 | //! They explicitly spell out the bincode settings so that switching to 3 | //! new versions in the future is less error prone. 4 | //! 5 | //! This is a list of all the row types and their settings for bincode. 6 | //! +--------------+--------+------------+----------------+------------+ 7 | //! | | Endian | Int Length | Allow Trailing | Byte Limit | 8 | //! +--------------+--------+------------+----------------+------------+ 9 | //! | TxHistoryRow | big | fixed | allow | unlimited | 10 | //! | All others | little | fixed | allow | unlimited | 11 | //! +--------------+--------+------------+----------------+------------+ 12 | //! 13 | //! Based on @junderw's https://github.com/mempool/electrs/pull/34. Thanks! 14 | 15 | use bincode::Options; 16 | 17 | pub fn serialize_big(value: &T) -> Result, bincode::Error> 18 | where 19 | T: ?Sized + serde::Serialize, 20 | { 21 | big_endian().serialize(value) 22 | } 23 | 24 | pub fn deserialize_big<'a, T>(bytes: &'a [u8]) -> Result 25 | where 26 | T: serde::Deserialize<'a>, 27 | { 28 | big_endian().deserialize(bytes) 29 | } 30 | 31 | pub fn serialize_little(value: &T) -> Result, bincode::Error> 32 | where 33 | T: ?Sized + serde::Serialize, 34 | { 35 | little_endian().serialize(value) 36 | } 37 | 38 | pub fn deserialize_little<'a, T>(bytes: &'a [u8]) -> Result 39 | where 40 | T: serde::Deserialize<'a>, 41 | { 42 | little_endian().deserialize(bytes) 43 | } 44 | 45 | /// This is the default settings for Options, 46 | /// but all explicitly spelled out, except for endianness. 47 | /// The following functions will add endianness. 48 | #[inline] 49 | fn options() -> impl Options { 50 | bincode::options() 51 | .with_fixint_encoding() 52 | .with_no_limit() 53 | .allow_trailing_bytes() 54 | } 55 | 56 | /// Adding the endian flag for big endian 57 | #[inline] 58 | fn big_endian() -> impl Options { 59 | options().with_big_endian() 60 | } 61 | 62 | /// Adding the endian flag for little endian 63 | #[inline] 64 | fn little_endian() -> impl Options { 65 | options().with_little_endian() 66 | } 67 | -------------------------------------------------------------------------------- /src/util/block.rs: -------------------------------------------------------------------------------- 1 | use crate::chain::{BlockHash, BlockHeader}; 2 | use crate::errors::*; 3 | use crate::new_index::BlockEntry; 4 | 5 | use std::collections::HashMap; 6 | use std::fmt; 7 | use std::iter::FromIterator; 8 | use std::slice; 9 | use time::format_description::well_known::Rfc3339; 10 | use time::OffsetDateTime as DateTime; 11 | 12 | use electrs_macros::trace; 13 | 14 | const MTP_SPAN: usize = 11; 15 | 16 | lazy_static! { 17 | pub static ref DEFAULT_BLOCKHASH: BlockHash = 18 | "0000000000000000000000000000000000000000000000000000000000000000" 19 | .parse() 20 | .unwrap(); 21 | } 22 | 23 | #[derive(Debug, Serialize, Deserialize, Clone, Copy)] 24 | pub struct BlockId { 25 | pub height: usize, 26 | pub hash: BlockHash, 27 | pub time: u32, 28 | } 29 | 30 | impl From<&HeaderEntry> for BlockId { 31 | fn from(header: &HeaderEntry) -> Self { 32 | BlockId { 33 | height: header.height(), 34 | hash: *header.hash(), 35 | time: header.header().time, 36 | } 37 | } 38 | } 39 | 40 | #[derive(Eq, PartialEq, Clone)] 41 | pub struct HeaderEntry { 42 | height: usize, 43 | hash: BlockHash, 44 | header: BlockHeader, 45 | } 46 | 47 | impl HeaderEntry { 48 | #[cfg(feature = "bench")] 49 | pub fn new(height: usize, hash: BlockHash, header: BlockHeader) -> Self { 50 | Self { 51 | height, 52 | hash, 53 | header, 54 | } 55 | } 56 | pub fn hash(&self) -> &BlockHash { 57 | &self.hash 58 | } 59 | 60 | pub fn header(&self) -> &BlockHeader { 61 | &self.header 62 | } 63 | 64 | pub fn height(&self) -> usize { 65 | self.height 66 | } 67 | } 68 | 69 | impl fmt::Debug for HeaderEntry { 70 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 71 | let last_block_time = DateTime::from_unix_timestamp(self.header().time as i64).unwrap(); 72 | write!( 73 | f, 74 | "hash={} height={} @ {}", 75 | self.hash(), 76 | self.height(), 77 | last_block_time.format(&Rfc3339).unwrap(), 78 | ) 79 | } 80 | } 81 | 82 | pub struct HeaderList { 83 | headers: Vec, 84 | heights: HashMap, 85 | tip: BlockHash, 86 | } 87 | 88 | impl HeaderList { 89 | pub fn empty() -> HeaderList { 90 | HeaderList { 91 | headers: vec![], 92 | heights: HashMap::new(), 93 | tip: *DEFAULT_BLOCKHASH, 94 | } 95 | } 96 | 97 | #[trace] 98 | pub fn new( 99 | mut headers_map: HashMap, 100 | tip_hash: BlockHash, 101 | ) -> HeaderList { 102 | trace!( 103 | "processing {} headers, tip at {:?}", 104 | headers_map.len(), 105 | tip_hash 106 | ); 107 | 108 | let mut blockhash = tip_hash; 109 | let mut headers_chain: Vec = vec![]; 110 | 111 | while blockhash != *DEFAULT_BLOCKHASH { 112 | let header = headers_map.remove(&blockhash).unwrap_or_else(|| { 113 | panic!( 114 | "missing expected blockhash in headers map: {:?}, pointed from: {:?}", 115 | blockhash, 116 | headers_chain.last().map(|h| h.block_hash()) 117 | ) 118 | }); 119 | blockhash = header.prev_blockhash; 120 | headers_chain.push(header); 121 | } 122 | headers_chain.reverse(); 123 | 124 | trace!( 125 | "{} chained headers ({} orphan blocks left)", 126 | headers_chain.len(), 127 | headers_map.len() 128 | ); 129 | 130 | let mut headers = HeaderList::empty(); 131 | headers.apply(headers.order(headers_chain)); 132 | headers 133 | } 134 | 135 | #[trace] 136 | pub fn order(&self, new_headers: Vec) -> Vec { 137 | // header[i] -> header[i-1] (i.e. header.last() is the tip) 138 | struct HashedHeader { 139 | blockhash: BlockHash, 140 | header: BlockHeader, 141 | } 142 | let hashed_headers = 143 | Vec::::from_iter(new_headers.into_iter().map(|header| HashedHeader { 144 | blockhash: header.block_hash(), 145 | header, 146 | })); 147 | for i in 1..hashed_headers.len() { 148 | assert_eq!( 149 | hashed_headers[i].header.prev_blockhash, 150 | hashed_headers[i - 1].blockhash 151 | ); 152 | } 153 | let prev_blockhash = match hashed_headers.first() { 154 | Some(h) => h.header.prev_blockhash, 155 | None => return vec![], // hashed_headers is empty 156 | }; 157 | let new_height: usize = if prev_blockhash == *DEFAULT_BLOCKHASH { 158 | 0 159 | } else { 160 | self.header_by_blockhash(&prev_blockhash) 161 | .unwrap_or_else(|| panic!("{} is not part of the blockchain", prev_blockhash)) 162 | .height() 163 | + 1 164 | }; 165 | (new_height..) 166 | .zip(hashed_headers.into_iter()) 167 | .map(|(height, hashed_header)| HeaderEntry { 168 | height, 169 | hash: hashed_header.blockhash, 170 | header: hashed_header.header, 171 | }) 172 | .collect() 173 | } 174 | 175 | #[trace] 176 | pub fn apply(&mut self, new_headers: Vec) { 177 | // new_headers[i] -> new_headers[i - 1] (i.e. new_headers.last() is the tip) 178 | for i in 1..new_headers.len() { 179 | assert_eq!(new_headers[i - 1].height() + 1, new_headers[i].height()); 180 | assert_eq!( 181 | *new_headers[i - 1].hash(), 182 | new_headers[i].header().prev_blockhash 183 | ); 184 | } 185 | let new_height = match new_headers.first() { 186 | Some(entry) => { 187 | let height = entry.height(); 188 | let expected_prev_blockhash = if height > 0 { 189 | *self.headers[height - 1].hash() 190 | } else { 191 | *DEFAULT_BLOCKHASH 192 | }; 193 | assert_eq!(entry.header().prev_blockhash, expected_prev_blockhash); 194 | height 195 | } 196 | None => return, 197 | }; 198 | debug!( 199 | "applying {} new headers from height {}", 200 | new_headers.len(), 201 | new_height 202 | ); 203 | let _removed = self.headers.split_off(new_height); // keep [0..new_height) entries 204 | for new_header in new_headers { 205 | let height = new_header.height(); 206 | assert_eq!(height, self.headers.len()); 207 | self.tip = *new_header.hash(); 208 | self.headers.push(new_header); 209 | self.heights.insert(self.tip, height); 210 | } 211 | } 212 | 213 | #[trace] 214 | pub fn header_by_blockhash(&self, blockhash: &BlockHash) -> Option<&HeaderEntry> { 215 | let height = self.heights.get(blockhash)?; 216 | let header = self.headers.get(*height)?; 217 | if *blockhash == *header.hash() { 218 | Some(header) 219 | } else { 220 | None 221 | } 222 | } 223 | 224 | #[trace] 225 | pub fn header_by_height(&self, height: usize) -> Option<&HeaderEntry> { 226 | self.headers.get(height).map(|entry| { 227 | assert_eq!(entry.height(), height); 228 | entry 229 | }) 230 | } 231 | 232 | pub fn equals(&self, other: &HeaderList) -> bool { 233 | self.headers.last() == other.headers.last() 234 | } 235 | 236 | pub fn tip(&self) -> &BlockHash { 237 | assert_eq!( 238 | self.tip, 239 | self.headers 240 | .last() 241 | .map(|h| *h.hash()) 242 | .unwrap_or(*DEFAULT_BLOCKHASH) 243 | ); 244 | &self.tip 245 | } 246 | 247 | pub fn len(&self) -> usize { 248 | self.headers.len() 249 | } 250 | 251 | pub fn is_empty(&self) -> bool { 252 | self.headers.is_empty() 253 | } 254 | 255 | pub fn iter(&self) -> slice::Iter { 256 | self.headers.iter() 257 | } 258 | 259 | /// Get the Median Time Past 260 | pub fn get_mtp(&self, height: usize) -> u32 { 261 | // Use the timestamp as the mtp of the genesis block. 262 | // Matches bitcoind's behaviour: bitcoin-cli getblock `bitcoin-cli getblockhash 0` | jq '.time == .mediantime' 263 | if height == 0 { 264 | self.headers.get(0).unwrap().header.time 265 | } else if height > self.len() - 1 { 266 | 0 267 | } else { 268 | let mut timestamps = (height.saturating_sub(MTP_SPAN - 1)..=height) 269 | .map(|p_height| self.headers.get(p_height).unwrap().header.time) 270 | .collect::>(); 271 | timestamps.sort_unstable(); 272 | timestamps[timestamps.len() / 2] 273 | } 274 | } 275 | } 276 | 277 | #[derive(Serialize, Deserialize)] 278 | pub struct BlockStatus { 279 | pub in_best_chain: bool, 280 | pub height: Option, 281 | pub next_best: Option, 282 | } 283 | 284 | impl BlockStatus { 285 | pub fn confirmed(height: usize, next_best: Option) -> BlockStatus { 286 | BlockStatus { 287 | in_best_chain: true, 288 | height: Some(height), 289 | next_best, 290 | } 291 | } 292 | 293 | pub fn orphaned() -> BlockStatus { 294 | BlockStatus { 295 | in_best_chain: false, 296 | height: None, 297 | next_best: None, 298 | } 299 | } 300 | } 301 | 302 | #[derive(Serialize, Deserialize, Debug)] 303 | pub struct BlockMeta { 304 | #[serde(alias = "nTx")] 305 | pub tx_count: u32, 306 | pub size: u32, 307 | pub weight: u32, 308 | } 309 | 310 | pub struct BlockHeaderMeta { 311 | pub header_entry: HeaderEntry, 312 | pub meta: BlockMeta, 313 | pub mtp: u32, 314 | } 315 | 316 | impl From<&BlockEntry> for BlockMeta { 317 | fn from(b: &BlockEntry) -> BlockMeta { 318 | let weight = b.block.weight(); 319 | #[cfg(not(feature = "liquid"))] // rust-bitcoin has a wrapper Weight type 320 | let weight = weight.to_wu(); 321 | 322 | BlockMeta { 323 | tx_count: b.block.txdata.len() as u32, 324 | // To retain DB compatibility, block weights are converted from the u64 325 | // representation used as of rust-bitcoin v0.30 back to a u32. This is OK 326 | // because u32::MAX is far above MAX_BLOCK_WEIGHT. 327 | weight: weight as u32, 328 | size: b.size, 329 | } 330 | } 331 | } 332 | 333 | impl BlockMeta { 334 | pub fn parse_getblock(val: ::serde_json::Value) -> Result { 335 | Ok(BlockMeta { 336 | tx_count: val 337 | .get("nTx") 338 | .chain_err(|| "missing nTx")? 339 | .as_f64() 340 | .chain_err(|| "nTx not a number")? as u32, 341 | size: val 342 | .get("size") 343 | .chain_err(|| "missing size")? 344 | .as_f64() 345 | .chain_err(|| "size not a number")? as u32, 346 | weight: val 347 | .get("weight") 348 | .chain_err(|| "missing weight")? 349 | .as_f64() 350 | .chain_err(|| "weight not a number")? as u32, 351 | }) 352 | } 353 | } 354 | -------------------------------------------------------------------------------- /src/util/electrum_merkle.rs: -------------------------------------------------------------------------------- 1 | use crate::chain::{BlockHash, Txid}; 2 | use crate::errors::*; 3 | use crate::new_index::ChainQuery; 4 | use bitcoin::hashes::{sha256d::Hash as Sha256dHash, Hash}; 5 | 6 | use electrs_macros::trace; 7 | 8 | #[trace] 9 | pub fn get_tx_merkle_proof( 10 | chain: &ChainQuery, 11 | tx_hash: &Txid, 12 | block_hash: &BlockHash, 13 | ) -> Result<(Vec, usize)> { 14 | let txids = chain 15 | .get_block_txids(&block_hash) 16 | .chain_err(|| format!("missing block txids for #{}", block_hash))?; 17 | let pos = txids 18 | .iter() 19 | .position(|txid| txid == tx_hash) 20 | .chain_err(|| format!("missing txid {}", tx_hash))?; 21 | let txids = txids.into_iter().map(Sha256dHash::from).collect(); 22 | 23 | let (branch, _root) = create_merkle_branch_and_root(txids, pos); 24 | Ok((branch, pos)) 25 | } 26 | 27 | #[trace] 28 | pub fn get_header_merkle_proof( 29 | chain: &ChainQuery, 30 | height: usize, 31 | cp_height: usize, 32 | ) -> Result<(Vec, Sha256dHash)> { 33 | if cp_height < height { 34 | bail!("cp_height #{} < height #{}", cp_height, height); 35 | } 36 | 37 | let best_height = chain.best_height(); 38 | if best_height < cp_height { 39 | bail!( 40 | "cp_height #{} above best block height #{}", 41 | cp_height, 42 | best_height 43 | ); 44 | } 45 | 46 | let heights: Vec = (0..=cp_height).collect(); 47 | let header_hashes: Vec = heights 48 | .into_iter() 49 | .map(|height| chain.hash_by_height(height)) 50 | .collect::>>() 51 | .chain_err(|| "missing block headers")?; 52 | 53 | let header_hashes = header_hashes.into_iter().map(Sha256dHash::from).collect(); 54 | Ok(create_merkle_branch_and_root(header_hashes, height)) 55 | } 56 | #[trace] 57 | pub fn get_id_from_pos( 58 | chain: &ChainQuery, 59 | height: usize, 60 | tx_pos: usize, 61 | want_merkle: bool, 62 | ) -> Result<(Txid, Vec)> { 63 | let header_hash = chain 64 | .hash_by_height(height) 65 | .chain_err(|| format!("missing block #{}", height))?; 66 | 67 | let txids = chain 68 | .get_block_txids(&header_hash) 69 | .chain_err(|| format!("missing block txids #{}", height))?; 70 | 71 | let txid = *txids 72 | .get(tx_pos) 73 | .chain_err(|| format!("No tx in position #{} in block #{}", tx_pos, height))?; 74 | 75 | let txids = txids.into_iter().map(Sha256dHash::from).collect(); 76 | 77 | let branch = if want_merkle { 78 | create_merkle_branch_and_root(txids, tx_pos).0 79 | } else { 80 | vec![] 81 | }; 82 | Ok((txid, branch)) 83 | } 84 | 85 | fn merklize(left: Sha256dHash, right: Sha256dHash) -> Sha256dHash { 86 | let data = [&left[..], &right[..]].concat(); 87 | Sha256dHash::hash(&data) 88 | } 89 | 90 | fn create_merkle_branch_and_root( 91 | mut hashes: Vec, 92 | mut index: usize, 93 | ) -> (Vec, Sha256dHash) { 94 | let mut merkle = vec![]; 95 | while hashes.len() > 1 { 96 | if hashes.len() % 2 != 0 { 97 | let last = *hashes.last().unwrap(); 98 | hashes.push(last); 99 | } 100 | index = if index % 2 == 0 { index + 1 } else { index - 1 }; 101 | merkle.push(hashes[index]); 102 | index /= 2; 103 | hashes = hashes 104 | .chunks(2) 105 | .map(|pair| merklize(pair[0], pair[1])) 106 | .collect() 107 | } 108 | (merkle, hashes[0]) 109 | } 110 | -------------------------------------------------------------------------------- /src/util/fees.rs: -------------------------------------------------------------------------------- 1 | use crate::chain::{Network, Transaction, TxOut}; 2 | use std::collections::HashMap; 3 | 4 | use electrs_macros::trace; 5 | 6 | const VSIZE_BIN_WIDTH: u64 = 50_000; // in vbytes 7 | 8 | pub struct TxFeeInfo { 9 | pub fee: u64, // in satoshis 10 | pub vsize: u64, // in virtual bytes (= weight/4) 11 | pub fee_per_vbyte: f64, // in sat/vb 12 | } 13 | 14 | impl TxFeeInfo { 15 | pub fn new(tx: &Transaction, prevouts: &HashMap, network: Network) -> Self { 16 | let fee = get_tx_fee(tx, prevouts, network); 17 | 18 | let weight = tx.weight(); 19 | #[cfg(not(feature = "liquid"))] // rust-bitcoin has a wrapper Weight type 20 | let weight = weight.to_wu(); 21 | 22 | let vsize_float = weight as f64 / 4f64; // for more accurate sat/vB 23 | 24 | TxFeeInfo { 25 | fee, 26 | vsize: vsize_float.ceil() as u64, 27 | fee_per_vbyte: fee as f64 / vsize_float, 28 | } 29 | } 30 | } 31 | 32 | #[cfg(not(feature = "liquid"))] 33 | pub fn get_tx_fee(tx: &Transaction, prevouts: &HashMap, _network: Network) -> u64 { 34 | if tx.is_coinbase() { 35 | return 0; 36 | } 37 | 38 | let total_in: u64 = prevouts 39 | .values() 40 | .map(|prevout| prevout.value.to_sat()) 41 | .sum(); 42 | let total_out: u64 = tx.output.iter().map(|vout| vout.value.to_sat()).sum(); 43 | total_in - total_out 44 | } 45 | 46 | #[cfg(feature = "liquid")] 47 | pub fn get_tx_fee(tx: &Transaction, _prevouts: &HashMap, network: Network) -> u64 { 48 | tx.fee_in(*network.native_asset()) 49 | } 50 | 51 | #[trace] 52 | pub fn make_fee_histogram(mut entries: Vec<&TxFeeInfo>) -> Vec<(f64, u64)> { 53 | entries.sort_unstable_by(|e1, e2| e1.fee_per_vbyte.partial_cmp(&e2.fee_per_vbyte).unwrap()); 54 | 55 | let mut histogram = vec![]; 56 | let mut bin_size = 0; 57 | let mut last_fee_rate = 0.0; 58 | for e in entries.iter().rev() { 59 | if bin_size > VSIZE_BIN_WIDTH && last_fee_rate != e.fee_per_vbyte { 60 | // vsize of transactions paying >= last_fee_rate 61 | histogram.push((last_fee_rate, bin_size)); 62 | bin_size = 0; 63 | } 64 | last_fee_rate = e.fee_per_vbyte; 65 | bin_size += e.vsize; 66 | } 67 | if bin_size > 0 { 68 | histogram.push((last_fee_rate, bin_size)); 69 | } 70 | histogram 71 | } 72 | -------------------------------------------------------------------------------- /src/util/mod.rs: -------------------------------------------------------------------------------- 1 | mod block; 2 | mod script; 3 | mod transaction; 4 | 5 | pub mod bincode; 6 | pub mod electrum_merkle; 7 | pub mod fees; 8 | 9 | pub use self::block::{ 10 | BlockHeaderMeta, BlockId, BlockMeta, BlockStatus, HeaderEntry, HeaderList, DEFAULT_BLOCKHASH, 11 | }; 12 | pub use self::fees::get_tx_fee; 13 | pub use self::script::{get_innerscripts, ScriptToAddr, ScriptToAsm}; 14 | pub use self::transaction::{ 15 | extract_tx_prevouts, get_prev_outpoints, has_prevout, is_coinbase, is_spendable, 16 | serialize_outpoint, TransactionStatus, TxInput, 17 | }; 18 | 19 | #[cfg(feature = "liquid")] 20 | pub use self::transaction::optional_value_for_newer_blocks; 21 | 22 | use std::collections::HashMap; 23 | use std::sync::mpsc::{channel, sync_channel, Receiver, Sender, SyncSender}; 24 | use std::thread; 25 | 26 | use crate::chain::BlockHeader; 27 | use bitcoin::hashes::sha256d::Hash as Sha256dHash; 28 | use socket2::{Domain, Protocol, Socket, Type}; 29 | use std::net::SocketAddr; 30 | 31 | pub type Bytes = Vec; 32 | pub type HeaderMap = HashMap; 33 | 34 | // TODO: consolidate serialization/deserialize code for bincode/bitcoin. 35 | const HASH_LEN: usize = 32; 36 | 37 | pub type FullHash = [u8; HASH_LEN]; 38 | 39 | pub fn full_hash(hash: &[u8]) -> FullHash { 40 | *array_ref![hash, 0, HASH_LEN] 41 | } 42 | 43 | pub struct SyncChannel { 44 | tx: SyncSender, 45 | rx: Receiver, 46 | } 47 | 48 | impl SyncChannel { 49 | pub fn new(size: usize) -> SyncChannel { 50 | let (tx, rx) = sync_channel(size); 51 | SyncChannel { tx, rx } 52 | } 53 | 54 | pub fn sender(&self) -> SyncSender { 55 | self.tx.clone() 56 | } 57 | 58 | pub fn receiver(&self) -> &Receiver { 59 | &self.rx 60 | } 61 | 62 | pub fn into_receiver(self) -> Receiver { 63 | self.rx 64 | } 65 | } 66 | 67 | pub struct Channel { 68 | tx: Sender, 69 | rx: Receiver, 70 | } 71 | 72 | impl Channel { 73 | pub fn unbounded() -> Self { 74 | let (tx, rx) = channel(); 75 | Channel { tx, rx } 76 | } 77 | 78 | pub fn sender(&self) -> Sender { 79 | self.tx.clone() 80 | } 81 | 82 | pub fn receiver(&self) -> &Receiver { 83 | &self.rx 84 | } 85 | 86 | pub fn into_receiver(self) -> Receiver { 87 | self.rx 88 | } 89 | } 90 | 91 | pub fn spawn_thread(name: &str, f: F) -> thread::JoinHandle 92 | where 93 | F: FnOnce() -> T, 94 | F: Send + 'static, 95 | T: Send + 'static, 96 | { 97 | thread::Builder::new() 98 | .name(name.to_owned()) 99 | .spawn(f) 100 | .unwrap() 101 | } 102 | 103 | // Similar to https://doc.rust-lang.org/std/primitive.bool.html#method.then (nightly only), 104 | // but with a function that returns an `Option` instead of `T`. Adding something like 105 | // this to std is being discussed: https://github.com/rust-lang/rust/issues/64260 106 | 107 | pub trait BoolThen { 108 | fn and_then(self, f: impl FnOnce() -> Option) -> Option; 109 | } 110 | 111 | impl BoolThen for bool { 112 | fn and_then(self, f: impl FnOnce() -> Option) -> Option { 113 | if self { 114 | f() 115 | } else { 116 | None 117 | } 118 | } 119 | } 120 | 121 | pub fn create_socket(addr: &SocketAddr) -> Socket { 122 | let domain = match &addr { 123 | SocketAddr::V4(_) => Domain::IPV4, 124 | SocketAddr::V6(_) => Domain::IPV6, 125 | }; 126 | let socket = 127 | Socket::new(domain, Type::STREAM, Some(Protocol::TCP)).expect("creating socket failed"); 128 | 129 | #[cfg(unix)] 130 | socket 131 | .set_reuse_port(true) 132 | .expect("cannot enable SO_REUSEPORT"); 133 | 134 | socket.bind(&addr.clone().into()).expect("cannot bind"); 135 | 136 | socket 137 | } 138 | -------------------------------------------------------------------------------- /src/util/script.rs: -------------------------------------------------------------------------------- 1 | #[cfg(feature = "liquid")] 2 | use crate::elements::ebcompact::*; 3 | #[cfg(feature = "liquid")] 4 | use elements::address as elements_address; 5 | 6 | use crate::chain::{script, Network, Script, TxIn, TxOut}; 7 | use script::Instruction::PushBytes; 8 | 9 | pub struct InnerScripts { 10 | pub redeem_script: Option