├── .cargo └── audit.toml ├── .dockerignore ├── .github ├── ISSUE_TEMPLATE │ ├── bug.md │ ├── build_problem.md │ ├── config_problem.md │ └── feature_request.md ├── dependabot.yml └── workflows │ └── rust.yml ├── .gitignore ├── CONTRIBUTING.md ├── Cargo.lock ├── Cargo.toml ├── Dockerfile ├── Dockerfile.ci ├── LICENSE ├── README.md ├── RELEASE-NOTES.md ├── TODO.md ├── build.rs ├── contrib ├── .gitignore ├── client.py ├── daemon.py ├── get_balance.py ├── get_balance.sh ├── get_tip.py ├── get_tx.py ├── health_check.py ├── history.py ├── history.sh ├── local-electrum.bash ├── mempool.py ├── script_hash.py ├── script_hash.sh ├── testChanges.sh ├── tx_fee.py └── venv_wrapper.sh ├── doc ├── binaries.md ├── config.md ├── config_example.toml ├── cookie_deprecation.md ├── install.md ├── monitoring.md ├── schema.md ├── upgrading.md └── usage.md ├── examples └── tx_collisions.rs ├── internal ├── README.md └── config_specification.toml ├── logo ├── icon.svg ├── logo.svg └── manual.pdf ├── server.sh ├── src ├── bin │ └── electrs.rs ├── cache.rs ├── chain.rs ├── config.rs ├── daemon.rs ├── db.rs ├── electrum.rs ├── index.rs ├── lib.rs ├── mempool.rs ├── merkle.rs ├── metrics.rs ├── p2p.rs ├── server.rs ├── signals.rs ├── status.rs ├── tests │ └── blocks │ │ ├── 000000000000000002d249a3d89f63ef3fee203adcca7c24008c13fd854513f2 │ │ └── 00000000000000001203c1ea455e38612bdf36e9967fdead11935c8e22283ecc ├── thread.rs ├── tracker.rs └── types.rs └── tests └── run.sh /.cargo/audit.toml: -------------------------------------------------------------------------------- 1 | [advisories] 2 | -------------------------------------------------------------------------------- /.dockerignore: -------------------------------------------------------------------------------- 1 | .* 2 | _* 3 | contrib 4 | db* 5 | dist 6 | doc 7 | Dockerfile 8 | examples 9 | scripts 10 | target 11 | tests 12 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: Generic bug report 4 | title: 'Bug:' 5 | labels: bug 6 | assignees: '' 7 | 8 | --- 9 | 10 | 18 | 19 | **Describe the bug** 20 | A clear and concise description of what the bug is. 21 | 22 | **Electrs version** 23 | Which version of `electrs` do you use? Please try to use newest version if possible. 24 | If it's not the newest version why you can't try the newest one? 25 | 26 | **To Reproduce** 27 | Steps to reproduce the behavior: 28 | 1. Configure and start electrs 29 | 2. Connect with electrum client XYZ 30 | 3. Wait 31 | 4. See error 32 | 33 | **Expected behavior** 34 | A clear and concise description of what you expected to happen. 35 | 36 | **Configuration** 37 | 38 | 39 |
40 | electrs.toml 41 | 42 | ``` 43 | type error message here 44 | ``` 45 | 46 |
47 | 48 | Environment variables: `ELECTRS_X=Y;...` 49 | Arguments: `--foo` 50 | 51 | **System running electrs** 52 | - Deployment method: manual (which guide did you follow?)/native OS package/Docker 53 | - OS name and version (name of distribution and version in case of Linux) 54 | 55 | **Electrum client** 56 | Client name (if not upstream desktop Electrum) and version: 57 | 58 | **Additional context** 59 | Add any other context about the problem here. 60 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/build_problem.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Build problem 3 | about: Building of electrs failed 4 | title: 'Build:' 5 | labels: bug, build 6 | assignees: Kixunil 7 | 8 | --- 9 | 10 | 18 | 19 | **Have you read the documentation?** 20 | Yes. (Please, read usage.md first if you did not.) 21 | 22 | **Did you double-check that you installed all dependencies?** 23 | Yes. (Please, double check the dependencies if you didn't.) 24 | 25 | **Which command failed?** 26 | `cargo build` 27 | 28 | **What was the error message?** 29 | 30 |
31 | Error message 32 | 33 | ``` 34 | type error message here 35 | ``` 36 | 37 |
38 | 39 | **System** 40 | OS name and version: (If Linux, the distribution name and version) 41 | rustc version: (run `rustc --version`) 42 | cargo version: (run `cargo --version`; not guaranteed to be same as rustc version!) 43 | 44 | **Compilation** 45 | Linking: static/dynamic 46 | Cross compilation: yes/no 47 | Target architecture: (uname -m on Linux if not cross-compiling) 48 | 49 | **Additional context** 50 | Any additional information that seems to be relevant. 51 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/config_problem.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Configuration problem 3 | about: The configuration behaves unexpectedly 4 | title: 'Config:' 5 | labels: bug 6 | assignees: Kixunil 7 | 8 | --- 9 | 10 | 17 | 18 | **Have you read the documentation?** 19 | Yes. (Please, read usage.md first if you did not.) 20 | 21 | **How did you configure electrs?** 22 | 23 | 24 | 25 |
26 | electrs.toml 27 | 28 | ``` 29 | type error message here 30 | ``` 31 | 32 |
33 | 34 | Environment variables: `ELECTRS_X=Y;...` 35 | Arguments: `--foo` 36 | 37 | **Debug output of configuration** 38 | ``` 39 | Enter the debug output of configuration shown at start of electrs 40 | ``` 41 | 42 | **Expected behavior** 43 | How did you expect `electrs` to be configured 44 | 45 | **Actual behavior** 46 | How does `electrs` behave? 47 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature request 3 | about: Generic feature request 4 | title: 'Feature:' 5 | labels: enhancement 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Is your feature request related to a problem? Please describe.** 11 | A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] 12 | 13 | **Describe the solution you'd like** 14 | A clear and concise description of what you want to happen. 15 | 16 | **Describe alternatives you've considered** 17 | A clear and concise description of any alternative solutions or features you've considered. 18 | 19 | **Additional context** 20 | Add any other context or screenshots about the feature request here. 21 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | - package-ecosystem: "cargo" 4 | directory: "/" 5 | schedule: 6 | interval: "weekly" 7 | -------------------------------------------------------------------------------- /.github/workflows/rust.yml: -------------------------------------------------------------------------------- 1 | name: electrs 2 | 3 | on: 4 | push: 5 | branches: 6 | - master 7 | pull_request: 8 | schedule: 9 | - cron: "0 0 * * *" # once a day 10 | 11 | jobs: 12 | build: 13 | name: Build 14 | 15 | runs-on: ${{ matrix.os }} 16 | 17 | strategy: 18 | matrix: 19 | os: [ubuntu-latest, windows-latest] 20 | build-args: 21 | [ 22 | --locked --no-default-features, 23 | --locked 24 | ] 25 | include: 26 | - os: ubuntu-latest 27 | build-args: --locked --features metrics_process 28 | 29 | steps: 30 | - uses: actions/checkout@v4 31 | - uses: dtolnay/rust-toolchain@stable 32 | with: 33 | components: rustfmt, clippy 34 | 35 | - name: Install Rust 36 | run: rustup component add rustfmt clippy 37 | 38 | - name: Format 39 | run: cargo fmt --all -- --check 40 | 41 | - name: Build 42 | run: cargo build ${{ matrix.build-args }} --all 43 | 44 | - name: Test 45 | run: cargo test ${{ matrix.build-args }} --all 46 | 47 | - name: Clippy 48 | run: cargo clippy -- -D warnings 49 | 50 | integration: 51 | name: Integration 52 | runs-on: ubuntu-latest 53 | steps: 54 | - name: Checkout 55 | uses: actions/checkout@v4 56 | - name: Build 57 | run: docker build -f Dockerfile.ci . --rm -t electrs:tests 58 | - name: Test 59 | run: docker run -v $PWD/contrib/:/contrib -v $PWD/tests/:/tests --rm electrs:tests bash /tests/run.sh 60 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | target 2 | /db*/ 3 | _*/ 4 | *.log 5 | *.sublime* 6 | *~ 7 | *.pyc 8 | .env 9 | *.dat 10 | electrs.toml 11 | data/ 12 | tests/bitcoin-* 13 | tests/bin 14 | .idea/ 15 | *.txt 16 | *.json 17 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing to electrs 2 | 3 | :+1::tada: First off, thanks for taking the time to contribute! :tada::+1: 4 | 5 | The following is a set of guidelines for contributing to this project! These are 6 | mostly guidelines, not rules. Use your best judgment, and feel free to propose 7 | changes to this document in a pull request. 8 | 9 | ## General 10 | 11 | Electrs project operates an open contributor model where anyone is 12 | welcome to contribute towards development in the form of peer review, 13 | documentation, testing and patches. 14 | 15 | Anyone is invited to contribute without regard to technical experience, 16 | "expertise", OSS experience, age, or other concern. However, the development of 17 | standards & reference implementations demands a high-level of rigor, adversarial 18 | thinking, thorough testing and risk-minimization. Any bug may cost users real 19 | money. That being said, we deeply welcome people contributing for the first time 20 | to an open source project or pick up Rust while contributing. Don't be shy, 21 | you'll learn. 22 | 23 | 24 | ## Contribution workflow 25 | 26 | The codebase is maintained using the "contributor workflow" where everyone 27 | without exception contributes patch proposals using "pull requests". This 28 | facilitates social contribution, easy testing and peer review. 29 | 30 | To contribute a patch, the workflow is as follows: 31 | 32 | 1. Fork repository 33 | 2. Create topic branch 34 | 3. Commit patches 35 | 36 | Please keep commits atomic and diffs easy to read. For this reason 37 | do not mix any formatting fixes or code moves with actual code changes. 38 | Further, each commit, individually, should compile and pass tests, in order to 39 | ensure git bisect and other automated tools function properly. 40 | 41 | Please cover every new feature with unit tests. 42 | 43 | When refactoring, structure your PR to make it easy to review and don't hesitate 44 | to split it into multiple small, focused PRs. 45 | 46 | To facilitate communication with other contributors, the project is making use 47 | of GitHub's "assignee" field. First check that no one is assigned and then 48 | comment suggesting that you're working on it. If someone is already assigned, 49 | don't hesitate to ask if the assigned party or previous commenters are still 50 | working on it if it has been awhile. 51 | 52 | 53 | ## Preparing PRs 54 | 55 | The main library development happens in the `master` branch. This branch must 56 | always compile without errors (using GitHub CI). All external contributions are 57 | made within PRs into this branch. 58 | 59 | Prerequisites that a PR must satisfy for merging into the `master` branch: 60 | * final commit within the PR must compile and pass unit tests with no error 61 | * final commit of the PR must be properly formatted and linted 62 | * be based on the recent `master` tip from the original repository at 63 | . 64 | 65 | ## Checking if the PR will pass the GitHub CI 66 | PR authors may also find it useful to run the following script locally in order 67 | to check that their code satisfies all the requirements of the GitHub workflows and doesn't fail 68 | automated build. 69 | 70 | You can run the following command from the root of the project: 71 | ``` 72 | ./contrib/testChanges.sh 73 | ``` 74 | 75 | 76 | ### Peer review 77 | 78 | Anyone may participate in peer review which is expressed by comments in the pull 79 | request. Typically, reviewers will review the code for obvious errors, as well as 80 | test out the patch set and opine on the technical merits of the patch. Please, 81 | first review PR on the conceptual level before focusing on code style or 82 | grammar fixes. 83 | 84 | 85 | ### Formatting 86 | 87 | The repository currently uses `rustfmt` for all the formatting needs. Running the automated 88 | script mentioned above would format all your code for you :) 89 | 90 | ## Ending Notes 91 | Get cracking, have fun, and do ask for help when in doubt! 92 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "electrs" 3 | version = "0.10.9" 4 | authors = ["Roman Zeyde "] 5 | description = "An efficient re-implementation of Electrum Server in Rust" 6 | license = "MIT" 7 | homepage = "https://github.com/romanz/electrs" 8 | repository = "https://github.com/romanz/electrs" 9 | keywords = ["bitcoin", "electrum", "server", "index", "database"] 10 | documentation = "https://docs.rs/electrs/" 11 | readme = "README.md" 12 | edition = "2021" 13 | rust-version = "1.63.0" 14 | build = "build.rs" 15 | 16 | [features] 17 | default = ["metrics"] 18 | metrics = ["prometheus", "tiny_http"] 19 | metrics_process = ["prometheus/process"] 20 | 21 | [package.metadata.configure_me] 22 | spec = "internal/config_specification.toml" 23 | 24 | [dependencies] 25 | anyhow = "1.0" 26 | bitcoin = { version = "0.32.6", features = ["serde", "rand-std"] } 27 | bitcoin_slices = { version = "0.10.0", features = ["bitcoin", "sha2"] } 28 | bitcoincore-rpc = { version = "0.19.0" } 29 | configure_me = "0.4" 30 | crossbeam-channel = "0.5" 31 | dirs-next = "2.0" 32 | env_logger = "0.10" 33 | log = "0.4" 34 | parking_lot = "0.12" 35 | prometheus = { version = "0.13", optional = true } 36 | rayon = "1.9" 37 | serde = "1.0" 38 | serde_derive = "1.0, <=1.0.171" # avoid precompiled binaries (https://github.com/serde-rs/serde/issues/2538) 39 | serde_json = "1.0" 40 | tiny_http = { version = "0.12", optional = true } 41 | 42 | [target.'cfg(windows)'.dependencies] 43 | ctrlc = "=3.4.2" 44 | 45 | [target.'cfg(not(windows))'.dependencies] 46 | signal-hook = "0.3" 47 | 48 | [dependencies.electrs-rocksdb] 49 | version = "0.19.0-e3" 50 | 51 | default-features = false 52 | # ZSTD is used for data compression 53 | # Snappy is only for checking old DB 54 | features = ["zstd", "snappy"] 55 | 56 | [build-dependencies] 57 | configure_me_codegen = { version = "0.4.8", default-features = false } 58 | 59 | [dev-dependencies] 60 | bitcoin-test-data = "0.2.0" 61 | hex_lit = "0.1.1" 62 | tempfile = "3.20" 63 | 64 | [profile.release] 65 | lto = true 66 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | # Important: This file is provided for demonstration purposes and may NOT be suitable for production use. 2 | # The maintainers of electrs are not deeply familiar with Docker, so you should DYOR. 3 | # If you are not familiar with Docker either it's probably be safer to NOT use it. 4 | 5 | FROM debian:bookworm-slim AS base 6 | RUN apt-get update -qqy 7 | RUN apt-get install -qqy librocksdb-dev curl 8 | 9 | ### Electrum Rust Server ### 10 | FROM base AS electrs-build 11 | RUN apt-get install -qqy cargo clang cmake 12 | 13 | # Install electrs 14 | WORKDIR /build/electrs 15 | COPY . . 16 | ENV ROCKSDB_INCLUDE_DIR=/usr/include 17 | ENV ROCKSDB_LIB_DIR=/usr/lib 18 | RUN cargo install --locked --path . 19 | 20 | FROM base AS result 21 | # Copy the binaries 22 | COPY --from=electrs-build /root/.cargo/bin/electrs /usr/bin/electrs 23 | 24 | WORKDIR / 25 | -------------------------------------------------------------------------------- /Dockerfile.ci: -------------------------------------------------------------------------------- 1 | # Important: This file is provided for demonstration purposes and may NOT be suitable for production use. 2 | # The maintainers of electrs are not deeply familiar with Docker, so you should DYOR. 3 | # If you are not familiar with Docker either it's probably be safer to NOT use it. 4 | 5 | FROM debian:bookworm-slim as base 6 | RUN apt-get update -qqy 7 | RUN apt-get install -qqy librocksdb-dev wget 8 | 9 | ### Electrum Rust Server ### 10 | FROM base as electrs-build 11 | RUN apt-get install -qqy cargo clang cmake 12 | 13 | # Install electrs 14 | WORKDIR /build/electrs 15 | COPY . . 16 | ENV ROCKSDB_INCLUDE_DIR=/usr/include 17 | ENV ROCKSDB_LIB_DIR=/usr/lib 18 | RUN cargo install --locked --path . 19 | 20 | ### Bitcoin Core ### 21 | FROM base as bitcoin-build 22 | # Download 23 | WORKDIR /build/bitcoin 24 | ARG ARCH=x86_64 25 | ARG BITCOIND_VERSION=29.0 26 | RUN wget -q https://bitcoincore.org/bin/bitcoin-core-$BITCOIND_VERSION/bitcoin-$BITCOIND_VERSION-$ARCH-linux-gnu.tar.gz 27 | RUN tar xvf bitcoin-$BITCOIND_VERSION-$ARCH-linux-gnu.tar.gz 28 | RUN mv -v bitcoin-$BITCOIND_VERSION/bin/bitcoind . 29 | RUN mv -v bitcoin-$BITCOIND_VERSION/bin/bitcoin-cli . 30 | 31 | FROM base as result 32 | # Copy the binaries 33 | COPY --from=electrs-build /root/.cargo/bin/electrs /usr/bin/electrs 34 | COPY --from=bitcoin-build /build/bitcoin/bitcoind /build/bitcoin/bitcoin-cli /usr/bin/ 35 | RUN bitcoind -version && bitcoin-cli -version 36 | 37 | ### Electrum ### 38 | # Clone latest Electrum wallet and a few test tools 39 | WORKDIR /build/ 40 | RUN apt-get install -qqy git libsecp256k1-1 python3-cryptography python3-setuptools python3-venv python3-pip jq curl 41 | RUN git clone --recurse-submodules https://github.com/spesmilo/electrum/ && cd electrum/ && git log -1 42 | RUN python3 -m venv --system-site-packages venv && \ 43 | ELECTRUM_ECC_DONT_COMPILE=1 venv/bin/pip install -e electrum/ && \ 44 | ln /build/venv/bin/electrum /usr/bin/electrum 45 | 46 | RUN electrum version --offline 47 | WORKDIR / 48 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (C) 2018, Roman Zeyde. 2 | 3 | Permission is hereby granted, free of charge, to any person obtaining a copy 4 | of this software and associated documentation files (the "Software"), to deal 5 | in the Software without restriction, including without limitation the rights 6 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 | copies of the Software, and to permit persons to whom the Software is 8 | furnished to do so, subject to the following conditions: 9 | 10 | The above copyright notice and this permission notice shall be included in 11 | all copies or substantial portions of the Software. 12 | 13 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 19 | THE SOFTWARE. 20 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ![Logo](logo/logo.svg) 2 | 3 | # Electrum Server in Rust 4 | 5 | [![CI](https://github.com/romanz/electrs/actions/workflows/rust.yml/badge.svg)](https://github.com/romanz/electrs/actions) 6 | [![PRs Welcome](https://img.shields.io/badge/PRs-welcome-brightgreen.svg?style=flat-square)](https://github.com/romanz/electrs/compare) 7 | [![crates.io](https://img.shields.io/crates/v/electrs.svg)](https://crates.io/crates/electrs) 8 | [![gitter.im](https://badges.gitter.im/romanz/electrs.svg)](https://gitter.im/romanz/electrs) 9 | 10 | An efficient re-implementation of Electrum Server, inspired by [ElectrumX](https://github.com/kyuupichan/electrumx), [Electrum Personal Server](https://github.com/chris-belcher/electrum-personal-server) and [bitcoincore-indexd](https://github.com/jonasschnelli/bitcoincore-indexd). 11 | 12 | The motivation behind this project is to enable a user to self host an Electrum server, 13 | with required hardware resources not much beyond those of a [full node](https://en.bitcoin.it/wiki/Full_node#Why_should_you_use_a_full_node_wallet). 14 | The server indexes the entire Bitcoin blockchain, and the resulting index enables fast queries for any given user wallet, 15 | allowing the user to keep real-time track of balances and transaction history using the [Electrum wallet](https://electrum.org/). 16 | Since it runs on the user's own machine, there is no need for the wallet to communicate with external Electrum servers, 17 | thus preserving the privacy of the user's addresses and balances. 18 | 19 | [BTC Prague 2024 dev/hack/day](https://btcprague.com/dev-hack-day/) slides are here: https://bit.ly/electrs 20 | 21 | 22 | ## Usage 23 | 24 | **Please prefer to use OUR usage guide!** 25 | 26 | External guides can be out-of-date and have various problems. 27 | At least double-check that the guide you're using is actively maintained. 28 | If you can't use our guide, please ask about what you don't understand or consider using automated deployments. 29 | 30 | Note that this implementation of Electrum server is optimized for **personal/small-scale (family/friends) usage**. 31 | It's a bad idea to run it publicly as it'd expose you to DoS and maybe also other attacks. 32 | If you want to run a public server you may be interested in the [Blockstream fork of electrs](https://github.com/Blockstream/electrs) 33 | which is better optimized for public usage at the cost of consuming *significantly* more resources. 34 | 35 | * [Installation from source](doc/install.md) 36 | * [Pre-built binaries](doc/binaries.md) (No official binaries available but a beta repository is available for installation) 37 | * [Configuration](doc/config.md) 38 | * [Usage](doc/usage.md) 39 | * [Monitoring](doc/monitoring.md) 40 | * [Upgrading](doc/upgrading.md) - **contains information about important changes from older versions** 41 | 42 | ## Features 43 | 44 | * Supports Electrum protocol [v1.4](https://electrumx-spesmilo.readthedocs.io/en/latest/protocol.html) 45 | * Maintains an index over transaction inputs and outputs, allowing fast balance queries 46 | * Fast synchronization of the Bitcoin blockchain (~6.5 hours for ~504GB @ August 2023) using HDD storage. 47 | * Low index storage overhead (~10%), relying on a local full node for transaction retrieval 48 | * Efficient mempool tracker (allowing better fee [estimation](https://github.com/spesmilo/electrum/blob/59c1d03f018026ac301c4e74facfc64da8ae4708/RELEASE-NOTES#L34-L46)) 49 | * Low CPU & memory usage (after initial indexing) 50 | * [`txindex`](https://github.com/bitcoinbook/bitcoinbook/blob/develop/ch03_bitcoin-core.adoc#txindex) is not required for the Bitcoin node 51 | * Uses a single [RocksDB](https://github.com/spacejam/rust-rocksdb) database, for better consistency and crash recovery 52 | 53 | ## Altcoins 54 | 55 | Altcoins are **not supported**! 56 | Forks of Bitcoin codebase that relax the consensus rules (hard forks) are also **not supported**. 57 | 58 | You may be able to find a fork of electrs that does support them, look around or make your own, just don't file issues/PRs here. 59 | 60 | ## Index database 61 | 62 | The database schema is described [here](doc/schema.md). 63 | 64 | ## Contributing 65 | 66 | All contributions to this project are welcome. Please refer to the [Contributing Guidelines](CONTRIBUTING.md) for more details. 67 | 68 | ## Logo 69 | 70 | [Our logo](logo/) is generously provided by [Dominik Průša](https://github.com/DominoPrusa) under the MIT license. 71 | Based on the [Electrum logo](https://github.com/spesmilo/electrum/blob/master/LICENCE) 72 | and the [Rust language logo](https://www.rust-lang.org/policies/media-guide). 73 | -------------------------------------------------------------------------------- /TODO.md: -------------------------------------------------------------------------------- 1 | # Electrum 2 | 3 | * Snapshot DB after successful indexing - and run queries on the latest snapshot 4 | * Update height to -1 for txns with any [unconfirmed input](https://electrumx.readthedocs.io/en/latest/protocol-basics.html#status) 5 | 6 | # Rust 7 | 8 | * Use [bytes](https://carllerche.github.io/bytes/bytes/index.html) instead of `Vec` when possible 9 | * Use generators instead of vectors 10 | * Use proper HTTP parser for JSONRPC replies over persistent connection 11 | 12 | # Performance 13 | 14 | * Consider https://github.com/facebook/rocksdb/wiki/RocksDB-Tuning-Guide#difference-of-spinning-disk 15 | -------------------------------------------------------------------------------- /build.rs: -------------------------------------------------------------------------------- 1 | fn main() { 2 | configure_me_codegen::build_script_auto().unwrap_or_else(|error| error.report_and_exit()) 3 | } 4 | -------------------------------------------------------------------------------- /contrib/.gitignore: -------------------------------------------------------------------------------- 1 | .venv 2 | -------------------------------------------------------------------------------- /contrib/client.py: -------------------------------------------------------------------------------- 1 | import json 2 | import socket 3 | 4 | class Client: 5 | def __init__(self, addr): 6 | self.s = socket.create_connection(addr) 7 | self.f = self.s.makefile('r') 8 | self.id = 0 9 | 10 | def call(self, requests): 11 | requests = list(requests) 12 | for request in requests: 13 | request['id'] = self.id 14 | request['jsonrpc'] = '2.0' 15 | self.id += 1 16 | 17 | msg = json.dumps(requests) + '\n' 18 | self.s.sendall(msg.encode('ascii')) 19 | response = json.loads(self.f.readline()) 20 | try: 21 | return [r['result'] for r in response] 22 | except KeyError: 23 | raise ValueError(response) 24 | 25 | 26 | def request(method, *args): 27 | return {'method': method, 'params': list(args)} 28 | -------------------------------------------------------------------------------- /contrib/daemon.py: -------------------------------------------------------------------------------- 1 | import binascii 2 | import json 3 | import os 4 | import socket 5 | 6 | 7 | class Daemon: 8 | def __init__(self, port, cookie_dir): 9 | self.sock = socket.create_connection(('localhost', port)) 10 | self.fd = self.sock.makefile() 11 | path = os.path.join(os.path.expanduser(cookie_dir), '.cookie') 12 | cookie = binascii.b2a_base64(open(path, 'rb').read()) 13 | self.cookie = cookie.decode('ascii').strip() 14 | self.index = 0 15 | 16 | def request(self, method, params_list): 17 | obj = [{"method": method, "params": params, "id": self.index} 18 | for params in params_list] 19 | request = json.dumps(obj) 20 | 21 | msg = ('POST / HTTP/1.1\n' 22 | 'Authorization: Basic {}\n' 23 | 'Content-Length: {}\n\n' 24 | '{}'.format(self.cookie, len(request), request)) 25 | self.sock.sendall(msg.encode('ascii')) 26 | 27 | status = self.fd.readline().strip() 28 | while True: 29 | if self.fd.readline().strip(): 30 | continue # skip headers 31 | else: 32 | break # next line will contain the response 33 | 34 | data = self.fd.readline().strip() 35 | replies = json.loads(data) 36 | for reply in replies: 37 | assert reply['error'] is None, reply 38 | assert reply['id'] == self.index 39 | 40 | self.index += 1 41 | return [d['result'] for d in replies] 42 | -------------------------------------------------------------------------------- /contrib/get_balance.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | import argparse 3 | import base58 4 | import hashlib 5 | import sys 6 | 7 | from logbook import Logger, StreamHandler 8 | 9 | import client 10 | 11 | log = Logger("get_balance") 12 | 13 | 14 | prefix_dict = { 15 | 'mainnet': { 16 | 'xpub': '0488b21e', # P2PKH or P2SH - m/44'/0' 17 | 'ypub': '049d7cb2', # P2WPKH in P2SH - m/49'/0' 18 | 'zpub': '04b24746', # P2WPKH - m/84'/0' 19 | }, 20 | 'testnet': { 21 | 'tpub': '043587cf', # P2PKH or P2SH - m/44'/1' 22 | 'upub': '044a5262', # P2WPKH in P2SH - m/49'/1' 23 | 'vpub': '045f1cf6', # P2WPKH - m/84'/1' 24 | }, 25 | 'regtest': { 26 | }, 27 | } 28 | 29 | 30 | def convert_key(key, target_prefix, network_name): 31 | decoded_key_bytes = base58.b58decode_check(key) 32 | target_key_bytes = ( 33 | bytes.fromhex(prefix_dict[network_name][target_prefix]) + 34 | decoded_key_bytes[4:]) 35 | return base58.b58encode_check(target_key_bytes).decode('ascii') 36 | 37 | 38 | def compute_xpub_balance(xpub, conn, network, details): 39 | total = 0 40 | for change in (0, 1): 41 | empty = 0 42 | for n in range(1000): 43 | address = xpub.subkey(change).subkey(n).address() 44 | script = network.parse.address(address).script() 45 | script_hash = hashlib.sha256(script).digest()[::-1].hex() 46 | # conn.call([client.request('blockchain.scripthash.subscribe', 47 | # script_hash)]) 48 | result, = conn.call( 49 | [client.request('blockchain.scripthash.get_history', 50 | script_hash)]) 51 | ntx = len(result) 52 | if len(result): 53 | log.debug(result) 54 | result, = conn.call( 55 | [client.request('blockchain.scripthash.get_balance', 56 | script_hash)]) 57 | confirmed = result['confirmed'] / 1e8 58 | total += confirmed 59 | 60 | log.debug( 61 | '{}/{}: {} -> {} BTC confirmed, {} BTC unconfirmed, ' 62 | '{} txs balance = {} BTC', change, n, address, 63 | result["confirmed"] / 1e8, result["unconfirmed"] / 1e8, ntx, 64 | total) 65 | 66 | if confirmed or ntx: 67 | empty = 0 68 | if confirmed > 0: 69 | details[address] = confirmed 70 | else: 71 | empty += 1 72 | if empty >= 10: 73 | break 74 | return total 75 | 76 | 77 | def compute_address_balance(address, conn, network): 78 | script = network.parse.address(address).script() 79 | script_hash = hashlib.sha256(script).digest()[::-1].hex() 80 | result, = conn.call( 81 | [client.request('blockchain.scripthash.get_balance', 82 | script_hash)]) 83 | return result['confirmed'] / 1e8 84 | 85 | 86 | def main(): 87 | parser = argparse.ArgumentParser() 88 | parser.add_argument('--host', default='localhost') 89 | parser.add_argument('--network', default='mainnet', 90 | choices=['mainnet', 'testnet', 'regtest']) 91 | parser.add_argument('address') 92 | args = parser.parse_args() 93 | 94 | if args.network == 'regtest': 95 | port = 60401 96 | from pycoin.symbols.xrt import network 97 | elif args.network == 'testnet': 98 | port = 60001 99 | from pycoin.symbols.xtn import network 100 | elif args.network == 'mainnet': 101 | port = 50001 102 | from pycoin.symbols.btc import network 103 | else: 104 | raise ValueError(f"unknown network: {args.network}") 105 | 106 | conn = client.Client((args.host, port)) 107 | total = 0 108 | xpub = (network.parse.bip32(args.address) or 109 | network.parse.bip49(args.address) or 110 | network.parse.bip84(args.address)) 111 | 112 | if xpub is None: 113 | total = compute_address_balance(args.address, conn, network) 114 | else: 115 | details = {} 116 | total = compute_xpub_balance(xpub, conn, network, details) 117 | 118 | for prefix in prefix_dict[args.network]: 119 | if args.address[:4] != prefix: 120 | key = convert_key(args.address, prefix, args.network) 121 | log.debug('Trying with {}', key) 122 | xpub = (network.parse.bip32(key) or network.parse.bip49(key) 123 | or network.parse.bip84(key)) 124 | total += compute_xpub_balance(xpub, conn, network, details) 125 | 126 | for addr in details: 127 | log.info('{} balance: {} BTC', addr, details[addr]) 128 | 129 | log.info('total balance: {} BTC', total) 130 | 131 | 132 | if __name__ == '__main__': 133 | with StreamHandler(sys.stderr, level='DEBUG').applicationbound(): 134 | main() 135 | -------------------------------------------------------------------------------- /contrib/get_balance.sh: -------------------------------------------------------------------------------- 1 | venv_wrapper.sh -------------------------------------------------------------------------------- /contrib/get_tip.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | import argparse 3 | import client 4 | import json 5 | 6 | def main(): 7 | parser = argparse.ArgumentParser() 8 | parser.add_argument("host") 9 | parser.add_argument("port", type=int) 10 | args = parser.parse_args() 11 | 12 | conn = client.Client((args.host, args.port)) 13 | print(conn.call([client.request("blockchain.headers.subscribe")])) 14 | 15 | if __name__ == '__main__': 16 | main() 17 | -------------------------------------------------------------------------------- /contrib/get_tx.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | import argparse 3 | import client 4 | import json 5 | 6 | def main(): 7 | parser = argparse.ArgumentParser() 8 | parser.add_argument('--host', default='localhost') 9 | parser.add_argument("txid") 10 | args = parser.parse_args() 11 | 12 | conn = client.Client((args.host, 50001)) 13 | tx, = conn.call([client.request("blockchain.transaction.get", args.txid, True)]) 14 | print(json.dumps(tx)) 15 | 16 | if __name__ == "__main__": 17 | main() 18 | -------------------------------------------------------------------------------- /contrib/health_check.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | import argparse 3 | import client 4 | import json 5 | 6 | def main(): 7 | parser = argparse.ArgumentParser() 8 | parser.add_argument("host") 9 | parser.add_argument("port", type=int) 10 | args = parser.parse_args() 11 | 12 | conn = client.Client((args.host, args.port)) 13 | print(json.dumps(conn.call([client.request("server.version", "health_check", "1.4")]))) 14 | 15 | if __name__ == '__main__': 16 | main() 17 | -------------------------------------------------------------------------------- /contrib/history.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | import argparse 3 | import datetime 4 | import hashlib 5 | import io 6 | import sys 7 | 8 | from logbook import Logger, StreamHandler 9 | import prettytable 10 | 11 | import client 12 | 13 | log = Logger('electrum') 14 | 15 | 16 | def _script_hash(script): 17 | return hashlib.sha256(script).digest()[::-1].hex() 18 | 19 | 20 | def show_rows(rows, field_names): 21 | t = prettytable.PrettyTable() 22 | t.field_names = field_names 23 | t.add_rows(rows) 24 | for f in t.field_names: 25 | if "mBTC" in f: 26 | t.align[f] = "r" 27 | print(t) 28 | 29 | 30 | def main(): 31 | parser = argparse.ArgumentParser() 32 | parser.add_argument('--host', default='localhost') 33 | parser.add_argument('--network', default='mainnet') 34 | parser.add_argument('address', nargs='+') 35 | parser.add_argument('--only-subscribe', action='store_true', default=False) 36 | parser.add_argument('--no-merkle-proofs', action='store_true', default=False) 37 | args = parser.parse_args() 38 | 39 | if args.network == 'regtest': 40 | port = 60401 41 | from pycoin.symbols.xrt import network 42 | elif args.network == 'testnet': 43 | port = 60001 44 | from pycoin.symbols.xtn import network 45 | elif args.network == 'mainnet': 46 | port = 50001 47 | from pycoin.symbols.btc import network 48 | else: 49 | raise ValueError(f"unknown network: {args.network}") 50 | 51 | hostport = (args.host, port) 52 | log.info('connecting to {}:{}', *hostport) 53 | conn = client.Client(hostport) 54 | 55 | tip, = conn.call([client.request('blockchain.headers.subscribe')]) 56 | 57 | script_hashes = [ 58 | _script_hash(network.parse.address(addr).script()) 59 | for addr in args.address 60 | ] 61 | 62 | conn.call( 63 | client.request('blockchain.scripthash.subscribe', script_hash) 64 | for script_hash in script_hashes 65 | ) 66 | log.info('subscribed to {} scripthashes', len(script_hashes)) 67 | if args.only_subscribe: 68 | return 69 | 70 | balances = conn.call( 71 | client.request('blockchain.scripthash.get_balance', script_hash) 72 | for script_hash in script_hashes 73 | ) 74 | 75 | unspents = conn.call( 76 | client.request('blockchain.scripthash.listunspent', script_hash) 77 | for script_hash in script_hashes 78 | ) 79 | for addr, balance, unspent in sorted(zip(args.address, balances, unspents), key=lambda v: v[0]): 80 | if unspent: 81 | log.debug("{}: confirmed={:,.5f} mBTC, unconfirmed={:,.5f} mBTC", 82 | addr, balance["confirmed"] / 1e5, balance["unconfirmed"] / 1e5) 83 | for u in unspent: 84 | log.debug("\t{}:{} = {:,.5f} mBTC {}", 85 | u["tx_hash"], u["tx_pos"], u["value"] / 1e5, 86 | f'@ {u["height"]}' if u["height"] else "") 87 | 88 | histories = conn.call( 89 | client.request('blockchain.scripthash.get_history', script_hash) 90 | for script_hash in script_hashes 91 | ) 92 | txids_map = dict( 93 | (tx['tx_hash'], tx['height'] if tx['height'] > 0 else None) 94 | for history in histories 95 | for tx in history 96 | ) 97 | log.info('got history of {} transactions', len(txids_map)) 98 | 99 | txs = map(network.tx.from_hex, conn.call( 100 | client.request('blockchain.transaction.get', txid) 101 | for txid in txids_map.keys() 102 | )) 103 | txs_map = dict(zip(txids_map.keys(), txs)) 104 | log.info('loaded {} transactions', len(txids_map)) 105 | 106 | confirmed_txids = {txid: height for txid, height in txids_map.items() if height is not None} 107 | 108 | heights = set(confirmed_txids.values()) 109 | def _parse_header(header): 110 | return network.block.parse_as_header(io.BytesIO(bytes.fromhex(header))) 111 | headers = map(_parse_header, conn.call( 112 | client.request('blockchain.block.header', height) 113 | for height in heights 114 | )) 115 | def _parse_timestamp(header): 116 | return datetime.datetime.utcfromtimestamp(header.timestamp).strftime('%Y-%m-%dT%H:%M:%SZ') 117 | timestamps = map(_parse_timestamp, headers) 118 | timestamps_map = dict(zip(heights, timestamps)) 119 | log.info('loaded {} header timestamps', len(heights)) 120 | 121 | if args.no_merkle_proofs: 122 | return 123 | 124 | proofs = conn.call( 125 | client.request('blockchain.transaction.get_merkle', txid, height) 126 | for txid, height in confirmed_txids.items() 127 | ) 128 | log.info('loaded {} merkle proofs', len(proofs)) # TODO: verify proofs 129 | 130 | sorted_txdata = sorted( 131 | (proof['block_height'], proof['pos'], txid) 132 | for proof, txid in zip(proofs, confirmed_txids) 133 | ) 134 | 135 | utxos = {} 136 | balance = 0 137 | 138 | rows = [] 139 | script_hashes = set(script_hashes) 140 | for block_height, block_pos, txid in sorted_txdata: 141 | tx_obj = txs_map[txid] 142 | for txi in tx_obj.txs_in: 143 | utxos.pop((str(txi.previous_hash), txi.previous_index), None) 144 | 145 | for index, txo in enumerate(tx_obj.txs_out): 146 | if _script_hash(txo.puzzle_script()) in script_hashes: 147 | utxos[(txid, index)] = txo 148 | 149 | diff = sum(txo.coin_value for txo in utxos.values()) - balance 150 | balance += diff 151 | confirmations = tip['height'] - block_height + 1 152 | rows.append([txid, timestamps_map[block_height], block_height, confirmations, f'{diff/1e5:,.5f}', f'{balance/1e5:,.5f}']) 153 | show_rows(rows, ["txid", "block timestamp", "height", "confirmations", "delta (mBTC)", "total (mBTC)"]) 154 | 155 | tip_header = _parse_header(tip['hex']) 156 | log.info('tip={}, height={} @ {}', tip_header.id(), tip['height'], _parse_timestamp(tip_header)) 157 | 158 | unconfirmed = {txs_map[txid] for txid, height in txids_map.items() if height is None} 159 | # TODO: show unconfirmed balance 160 | 161 | if __name__ == '__main__': 162 | StreamHandler(sys.stderr).push_application() 163 | main() 164 | -------------------------------------------------------------------------------- /contrib/history.sh: -------------------------------------------------------------------------------- 1 | venv_wrapper.sh -------------------------------------------------------------------------------- /contrib/local-electrum.bash: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -eux 3 | 4 | ADDR=127.0.0.1 # localhost 5 | PORT=50001 # default mainnet Electrum RPC port 6 | PROTOCOL=t # TCP (no SSL) 7 | 8 | # Use only local Electrum server: 9 | electrum --oneserver --server="$ADDR:$PORT:$PROTOCOL" $* 10 | -------------------------------------------------------------------------------- /contrib/mempool.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import argparse 4 | from daemon import Daemon 5 | 6 | import numpy as np 7 | import matplotlib.pyplot as plt 8 | 9 | 10 | def main(): 11 | parser = argparse.ArgumentParser() 12 | parser.add_argument('--testnet', action='store_true') 13 | args = parser.parse_args() 14 | 15 | if args.testnet: 16 | d = Daemon(port=18332, cookie_dir='~/.bitcoin/testnet3') 17 | else: 18 | d = Daemon(port=8332, cookie_dir='~/.bitcoin') 19 | 20 | txids, = d.request('getrawmempool', [[False]]) 21 | txids = list(map(lambda a: [a], txids)) 22 | 23 | entries = d.request('getmempoolentry', txids) 24 | entries = [{'fee': e['fees']['base']*1e8, 'vsize': e['vsize']} for e in entries] 25 | for e in entries: 26 | e['rate'] = e['fee'] / e['vsize'] # sat/vbyte 27 | entries.sort(key=lambda e: e['rate'], reverse=True) 28 | 29 | vsize = np.array([e['vsize'] for e in entries]).cumsum() 30 | rate = np.array([e['rate'] for e in entries]) 31 | 32 | plt.semilogy(vsize / 1e6, rate, '-') 33 | plt.xlabel('Mempool size (MB)') 34 | plt.ylabel('Fee rate (sat/vbyte)') 35 | plt.title('{} transactions'.format(len(entries))) 36 | plt.grid() 37 | plt.show() 38 | 39 | 40 | if __name__ == '__main__': 41 | main() 42 | -------------------------------------------------------------------------------- /contrib/script_hash.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | import argparse 3 | import datetime 4 | import hashlib 5 | import io 6 | import sys 7 | 8 | 9 | def main(): 10 | parser = argparse.ArgumentParser() 11 | parser.add_argument('--network', default='mainnet') 12 | args = parser.parse_args() 13 | 14 | if args.network == 'regtest': 15 | from pycoin.symbols.xrt import network 16 | elif args.network == 'testnet': 17 | from pycoin.symbols.xtn import network 18 | elif args.network == 'mainnet': 19 | from pycoin.symbols.btc import network 20 | else: 21 | raise ValueError(f"unknown network: {args.network}") 22 | 23 | for line in sys.stdin: 24 | addr = line.strip() 25 | script = network.parse.address(addr).script() 26 | script_hash = hashlib.sha256(script).digest() 27 | print(script_hash[::-1].hex()) 28 | 29 | 30 | if __name__ == '__main__': 31 | main() -------------------------------------------------------------------------------- /contrib/script_hash.sh: -------------------------------------------------------------------------------- 1 | venv_wrapper.sh -------------------------------------------------------------------------------- /contrib/testChanges.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | cd `dirname $0`/.. 3 | cargo build --locked --no-default-features --all 4 | cargo build --locked --no-default-features --all 5 | cargo build --locked --all 6 | cargo build --locked --features metrics_process --all 7 | cargo fmt 8 | cargo clippy -- -D warnings 9 | cargo test --locked --all 10 | -------------------------------------------------------------------------------- /contrib/tx_fee.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | import argparse 3 | import client 4 | 5 | def main(): 6 | parser = argparse.ArgumentParser() 7 | parser.add_argument('--host', default='localhost') 8 | parser.add_argument("txid") 9 | args = parser.parse_args() 10 | 11 | conn = client.Client((args.host, 50001)) 12 | tx, = conn.call([client.request("blockchain.transaction.get", args.txid, True)]) 13 | requests = [] 14 | for vin in tx["vin"]: 15 | prev_txid = vin["txid"] 16 | requests.append(client.request("blockchain.transaction.get", prev_txid, True)) 17 | 18 | fee = 0 19 | for vin, prev_tx in zip(tx["vin"], conn.call(requests)): 20 | txo = prev_tx["vout"][vin["vout"]] 21 | fee += txo["value"] 22 | 23 | fee -= sum(vout["value"] for vout in tx["vout"]) 24 | 25 | print(f'vSize = {tx["vsize"]}, Fee = {1e3 * fee:.2f} mBTC = {1e8 * fee / tx["vsize"]:.2f} sat/vB') 26 | 27 | if __name__ == "__main__": 28 | main() 29 | -------------------------------------------------------------------------------- /contrib/venv_wrapper.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" 6 | 7 | cmd="$(basename -- "${BASH_SOURCE[0]}" .sh)".py 8 | 9 | if [ "$1" = "--venv" ]; then 10 | shift 11 | if [ ! -d .venv ]; then 12 | python -m venv .venv 13 | .venv/bin/pip install pycoin logbook prettytable base58 14 | fi 15 | PATH=$PWD/.venv/bin:$PATH 16 | fi 17 | 18 | exec python "$cmd" "$@" 19 | -------------------------------------------------------------------------------- /doc/binaries.md: -------------------------------------------------------------------------------- 1 | ## Native OS packages 2 | 3 | There are currently no official/stable binary packages. 4 | 5 | However, there's a [*beta* repository for Debian 10](https://deb.ln-ask.me) (should work on recent Ubuntu, but not tested well-enough) 6 | The repository provides several significant advantages: 7 | 8 | * Everything is completely automatic - after installing `electrs` via `apt`, it's running and will automatically run on reboot, restart after crash.. 9 | It also connects to bitcoind out-of-the-box, no messing with config files or anything else. 10 | It just works. 11 | * Prebuilt binaries save you a lot of time. 12 | The binary installation of all the components is under 3 minutes on common hardware. 13 | Building from source is much longer. 14 | * The repository contains some security hardening out-of-the-box - separate users for services, use of [btc-rpc-proxy](https://github.com/Kixunil/btc-rpc-proxy), etc. 15 | 16 | And two disadvantages: 17 | 18 | * It's currently not trivial to independently verify the built packages, so you may need to trust the author of the repository. 19 | The build is now deterministic but nobody verified it independently yet. 20 | * The repository is considered beta. 21 | electrs seems to work well so far but was not tested heavily. 22 | The author of the repository is also a contributor to `electrs` and appreciates [bug reports](https://github.com/Kixunil/cryptoanarchy-deb-repo-builder/issues), 23 | [test reports](https://github.com/Kixunil/cryptoanarchy-deb-repo-builder/issues/61), and other contributions. 24 | -------------------------------------------------------------------------------- /doc/config.md: -------------------------------------------------------------------------------- 1 | ## Manual configuration 2 | 3 | This applies only if you do **not** use some other automated systems such as Debian packages. 4 | If you use automated systems, refer to their documentation first! 5 | 6 | ### Bitcoind configuration 7 | 8 | Pruning must be turned **off** for `electrs` to work. 9 | `txindex` is allowed but unnecessary for `electrs`. 10 | However, you might still need it if you run other services (e.g.`eclair`). 11 | The option `maxconnections` (if used) should be set to 12 or more for bitcoind to accept inbound p2p connections. 12 | Note that setting `maxuploadtarget` may cause p2p-based sync to fail - so consider using `-whitelist=download@127.0.0.1` to disable the limit for local p2p connections. 13 | 14 | The highly recommended way of authenticating `electrs` is using cookie file. 15 | It's the most [secure](https://github.com/Kixunil/security_writings/blob/master/cookie_files.md) and robust method. 16 | Set `rpccookiefile` option of `bitcoind` to a file within an existing directory which it can access. 17 | You can skip it if you're running both daemons under the same user and with the default directories. 18 | 19 | `electrs` will wait for `bitcoind` to sync, however, you will be unable to use it until the syncing is done. 20 | 21 | Example command for running `bitcoind` (assuming same user, default dirs): 22 | 23 | ```bash 24 | $ bitcoind -server=1 -txindex=0 -prune=0 25 | ``` 26 | ### Electrs configuration 27 | 28 | **Note:** this documentation may occasionally become stale. We recommend running `electrs --help` to get an up-to-date list of options. 29 | 30 | Electrs can be configured using command line, environment variables and configuration files (or their combination). 31 | It is highly recommended to use configuration files for any non-trivial setups since it's easier to manage. 32 | If you're setting password manually instead of cookie files, configuration file is the only way to set it due to security reasons. 33 | 34 | **Important:** you must configure `db_dir` to be either an empty directory or previously used by `electrs`! 35 | The contents of this directory is considered **internal to `electrs`** and any tampering that is **not** explicitly allowed by documentation 36 | can lead to serious problems! Currently the *only* permitted operation is *deleting whole `mainnet` subdirectory when upgrading to version 0.9.0* - see the upgrading section. 37 | 38 | #### Configuration files and priorities 39 | 40 | The Toml-formatted config files ([an example here](config_example.toml)) are (from lowest priority to highest): `/etc/electrs/config.toml`, `~/.electrs/config.toml`, `./electrs.toml`. 41 | They are loaded if they *exist* and ignored if not however, to aid debugging, any other error when opening them such as permission error will make electrs exit with error. 42 | 43 | The options in highest-priority config files override options set in lowest-priority config files. 44 | If loading these files is undesirable (common in case of protected systemd services), use the `--skip-default-conf-files` argument to prevent it. 45 | 46 | **Environment variables** override options in config files and finally **arguments** override everything else. 47 | 48 | There are two special arguments `--conf` which reads the specified file and `--conf-dir`, which read all the files in the specified directory. 49 | 50 | The options in those files override **everything** that was set previously, **including arguments** that were passed before these two special arguments. 51 | 52 | In general, later arguments override previous ones. 53 | It is a good practice to use these special arguments at the beginning of the command line in order to avoid confusion. 54 | 55 | **Naming convention** 56 | 57 | For each command line argument an **environment variable** of the same name with `ELECTRS_` prefix, upper case letters and underscores instead of hyphens exists 58 | (e.g. you can use `ELECTRS_ELECTRUM_RPC_ADDR` instead of `--electrum-rpc-addr`). 59 | 60 | Similarly, for each such argument an option in config file exists with underscores instead of hyphens (e.g. `electrum_rpc_addr`). 61 | 62 | You need to use `true` value in case of flags (e.g. `timestamp = true`). 63 | 64 | **Authentication** 65 | 66 | In addition, config files support `auth` option to specify username and password. 67 | This is not available using command line or environment variables for security reasons (other applications could read it otherwise). 68 | **Important note**: `auth` is different from `cookie_file`, which points to a file containing the cookie instead of being the cookie itself! 69 | 70 | If you are using `-rpcuser=USER` and `-rpcpassword=PASSWORD` of `bitcoind` for authentication, please use `auth="USER:PASSWORD"` option in one of the [config files](config.md#configuration-files-and-priorities). 71 | Otherwise, [`~/.bitcoin/.cookie`](https://github.com/bitcoin/bitcoin/blob/0212187fc624ea4a02fc99bc57ebd413499a9ee1/contrib/debian/examples/bitcoin.conf#L70-L72) will be used as the default cookie file, 72 | allowing this server to use bitcoind JSONRPC interface. 73 | 74 | Note: there was a `cookie` option in the version 0.8.7 and below, it's now deprecated - do **not** use, it will be removed. 75 | Please read upgrade notes if you're upgrading to a newer version. 76 | 77 | ## Connecting an Electrum client ## 78 | 79 | To connect to your Electrs server, you will need to point Electrum to your server using the `ip_address:port` syntax. You will notice that most default servers in Electrum use the `50002` port (which is for SSL connections), while Electrs serves port `50001` and does not provide SSL out of the box. 80 | 81 | You would need to either use a webserver to provide SSL (see _SSL connection_ below), or connect without SSL. To tell Electrum to connect to your server without SSL, you need to add `:t` after the port (ie: `localhost:50001:t`). Please note that this is not secure and therefore recommended only for local connections. 82 | 83 | Electrs will listen by default on `127.0.0.1:50001`, which means it will only serve clients in the local machine. This is configured via the `electrum_rpc_addr` setting and if you wish to connect from another machine, you need to change it to `0.0.0.0:50001`. This is less secure though, and the recommended way to access Electrs remotely is to keep listening on `127.0.0.1` and tunnel to your server. 84 | 85 | ## Extra configuration suggestions 86 | 87 | ### SSL connection 88 | 89 | In order to use a secure connection, you can also use [NGINX as an SSL endpoint](https://docs.nginx.com/nginx/admin-guide/security-controls/terminating-ssl-tcp/#) 90 | by placing the following block in `nginx.conf`. 91 | Notice that while electrs doesn't use HTTP the configuration below uses raw TCP stream which works. 92 | 93 | ```nginx 94 | stream { 95 | upstream electrs { 96 | server 127.0.0.1:50001; 97 | } 98 | 99 | server { 100 | listen 50002 ssl; 101 | proxy_pass electrs; 102 | 103 | ssl_certificate /path/to/example.crt; 104 | ssl_certificate_key /path/to/example.key; 105 | ssl_session_cache shared:SSL:1m; 106 | ssl_session_timeout 4h; 107 | ssl_protocols TLSv1 TLSv1.1 TLSv1.2 TLSv1.3; 108 | ssl_prefer_server_ciphers on; 109 | } 110 | } 111 | ``` 112 | 113 | ```bash 114 | $ sudo systemctl restart nginx 115 | $ electrum --oneserver --server=example:50002:s 116 | ``` 117 | 118 | Note: If you are connecting to electrs from Eclair Mobile or another similar client which does not allow self-signed SSL certificates, you can obtain a free SSL certificate as follows: 119 | 120 | 1. Follow the instructions at https://certbot.eff.org/ to install the certbot on your system. 121 | 2. When certbot obtains the SSL certificates for you, change the SSL paths in the nginx template above as follows: 122 | ``` 123 | ssl_certificate /etc/letsencrypt/live//fullchain.pem; 124 | ssl_certificate_key /etc/letsencrypt/live//privkey.pem; 125 | ``` 126 | 127 | ### Tor hidden service 128 | 129 | Install Tor on your server and client machines (assuming Ubuntu/Debian): 130 | 131 | ``` 132 | $ sudo apt install tor 133 | ``` 134 | 135 | Add the following config to `/etc/tor/torrc`: 136 | ``` 137 | HiddenServiceDir /var/lib/tor/electrs_hidden_service/ 138 | HiddenServiceVersion 3 139 | HiddenServicePort 50001 127.0.0.1:50001 140 | ``` 141 | 142 | If you use [the *beta* Debian repository](binaries.md#cnative-os-packages), 143 | it is cleaner to install `tor-hs-patch-config` using `apt` and then placing the configuration into a file inside `/etc/tor/hidden-services.d`. 144 | 145 | Restart the service: 146 | ``` 147 | $ sudo systemctl restart tor 148 | ``` 149 | 150 | Note: your server's onion address is stored under: 151 | ``` 152 | $ sudo cat /var/lib/tor/electrs_hidden_service/hostname 153 | .onion 154 | ``` 155 | 156 | On your client machine, run the following command (assuming Tor proxy service runs on port 9050): 157 | ``` 158 | $ electrum --oneserver --server .onion:50001:t --proxy socks5:127.0.0.1:9050 159 | ``` 160 | 161 | For more details, see http://docs.electrum.org/en/latest/tor.html. 162 | 163 | ### Sample Systemd Unit File 164 | 165 | If you use [the *beta* Debian repository](binaries.md#cnative-os-packages), you should skip this section, 166 | as the appropriate systemd unit file is installed automatically. 167 | 168 | You may wish to have systemd manage electrs so that it's "always on". 169 | Here is a sample unit file (which assumes that the bitcoind unit file is `bitcoind.service`): 170 | 171 | ``` 172 | [Unit] 173 | Description=Electrs 174 | After=bitcoind.service 175 | 176 | [Service] 177 | WorkingDirectory=/home/bitcoin/electrs 178 | ExecStart=/home/bitcoin/electrs/target/release/electrs --log-filters INFO --db-dir ./db --electrum-rpc-addr="127.0.0.1:50001" 179 | User=bitcoin 180 | Group=bitcoin 181 | Type=simple 182 | KillMode=process 183 | TimeoutSec=60 184 | Restart=always 185 | RestartSec=60 186 | 187 | Environment="RUST_BACKTRACE=1" 188 | 189 | # Hardening measures 190 | PrivateTmp=true 191 | ProtectSystem=full 192 | NoNewPrivileges=true 193 | MemoryDenyWriteExecute=true 194 | 195 | [Install] 196 | WantedBy=multi-user.target 197 | ``` 198 | -------------------------------------------------------------------------------- /doc/config_example.toml: -------------------------------------------------------------------------------- 1 | # DO NOT EDIT THIS FILE DIRECTLY - COPY IT FIRST! 2 | # If you edit this, you will cry a lot during update and will not want to live anymore! 3 | 4 | # This is an EXAMPLE of how configuration file should look like. 5 | # Do NOT blindly copy this and expect it to work for you! 6 | # If you don't know what you're doing consider using automated setup or ask an experienced friend. 7 | 8 | # This example contains only the most important settings. 9 | # See docs or electrs man page for advanced settings. 10 | 11 | # File where bitcoind stores the cookie, usually file .cookie in its datadir 12 | cookie_file = "/var/run/bitcoin-mainnet/cookie" 13 | 14 | # The listening RPC address of bitcoind, port is usually 8332 15 | daemon_rpc_addr = "127.0.0.1:8332" 16 | 17 | # The listening P2P address of bitcoind, port is usually 8333 18 | daemon_p2p_addr = "127.0.0.1:8333" 19 | 20 | # Directory where the index should be stored. It should have at least 70GB of free space. 21 | db_dir = "/some/fast/storage/with/big/size" 22 | 23 | # bitcoin means mainnet. Don't set to anything else unless you're a developer. 24 | network = "bitcoin" 25 | 26 | # The address on which electrs should listen. Warning: 0.0.0.0 is probably a bad idea! 27 | # Tunneling is the recommended way to access electrs remotely. 28 | electrum_rpc_addr = "127.0.0.1:50001" 29 | 30 | # How much information about internal workings should electrs print. Increase before reporting a bug. 31 | log_filters = "INFO" 32 | -------------------------------------------------------------------------------- /doc/cookie_deprecation.md: -------------------------------------------------------------------------------- 1 | # Deprecation of cookie option 2 | 3 | ## What? 4 | 5 | As of 0.8.8 the `cookie` option is deprecated and it will be removed. 6 | A new `auth` option was added. 7 | If you don't use the `cookie` option, you're not affected and don't need to read this. 8 | Note that this is different from `cookie_file`. 9 | 10 | ## Why? 11 | 12 | The option was confusing: 13 | 14 | * If you entered the path to cookie file (usually `~/.bitcoin/.cookie`), it wouldn't work. 15 | * If you copied the contents of cookie file into it, `electrs` would break at the next restart of the system. 16 | * If you used a script to fix the above run before `electrs` starts, it'd still break if `bitcoind` restarted for any reason. 17 | * If you used `BindsTo` option of systemd, you'd solve the issue but introduce needless downtime and waste of performance. 18 | * Entering `username:password` was the only valid use of `cookie` but it had nothing to do with cookie. 19 | 20 | ## What to do? 21 | 22 | If you're installing `electrs` for the first time, just don't use `cookie`. 23 | If you're updating, reconsider the motivation above. 24 | If you used a copying script, just use `cookie_file` to get the cookie directly. 25 | If you also used `BindsTo`, we recommend removing it. 26 | If you used fixed username and password because you didn't know about cookie or did it before `cookie_file` was implemented, reconsider using cookie authentication. 27 | If you really have to use fixed username and password, specify them using `auth` option (`username:password` like before) and remove the `cookie` option. 28 | 29 | ## When the option will be removed? 30 | 31 | Probably in a few months. 32 | It'll still be detected and turned into explicit error for a while to make sure people really see the message and know what's going on. 33 | You can see [the tracking issue #371](https://github.com/romanz/electrs/issues/371) to monitor the progress of the change. 34 | -------------------------------------------------------------------------------- /doc/install.md: -------------------------------------------------------------------------------- 1 | ## Quickstart 2 | 3 |
4 | Building from source on an Ubuntu 21.10 VM: 5 | 6 | ```bash 7 | $ sudo apt update 8 | $ sudo apt install -y clang cmake build-essential git cargo 9 | $ git clone https://github.com/romanz/electrs 10 | $ cd electrs 11 | $ cargo build --locked --release 12 | $ ./target/release/electrs --version # should print the latest version 13 | ``` 14 | 15 |
16 | 17 | [![asciicast](https://asciinema.org/a/XKznxilP4O7lCZiVZ9vZNd5vx.svg)](https://asciinema.org/a/XKznxilP4O7lCZiVZ9vZNd5vx?speed=3) 18 | 19 | ## Manual installation from source 20 | 21 | **See below for automated/binary installation options.** 22 | 23 | ### Build dependencies 24 | 25 | Note for Raspberry Pi 4 owners: the old versions of OS/toolchains produce broken binaries. 26 | Make sure to use latest OS! (see #226) 27 | 28 | Install [recent Rust](https://rustup.rs/) (1.63.0+, `apt install cargo` is preferred for Debian 12), 29 | [latest Bitcoin Core](https://bitcoincore.org/en/download/) (0.21+) 30 | and [latest Electrum wallet](https://electrum.org/#download) (4.0+). 31 | 32 | Also, install the following packages (on Debian or Ubuntu): 33 | ```bash 34 | $ sudo apt update 35 | $ sudo apt install clang cmake build-essential # for building 'rust-rocksdb' 36 | ``` 37 | 38 | There are two ways to compile `electrs`: by statically linking to `librocksdb` or dynamically linking. 39 | 40 | The advantages of static linking: 41 | 42 | * The binary is self-contained and doesn't need other dependencies, it can be transferred to other machine without worrying 43 | * The binary should work pretty much with every common distro 44 | * Different library installed elsewhere doesn't affect the behavior of `electrs` 45 | 46 | The advantages of dynamic linking: 47 | 48 | * If a (security) bug is found in the library, you only need to upgrade/recompile the library to fix it, no need to recompile `electrs` 49 | * Updating rocksdb can be as simple as `apt upgrade` 50 | * The build is significantly faster (if you already have the binary version of the library from packages) 51 | * The build is deterministic 52 | * Cross compilation is more reliable 53 | * If another application is also using `rocksdb`, you don't store it on disk and in RAM twice 54 | 55 | If you decided to use dynamic linking, you will also need to install the library ([7.8.3 release](https://github.com/facebook/rocksdb/releases/tag/v7.8.3) is required). 56 | On [Debian 12 (bookworm)](https://packages.debian.org/bookworm/librocksdb-dev) and [Ubuntu 23.04 (lunar)](https://packages.ubuntu.com/lunar/librocksdb-dev): 57 | 58 | ```bash 59 | $ sudo apt install librocksdb-dev=7.8.3-2 60 | ``` 61 | 62 | For other versions of Debian or Ubuntu, you can build librocksdb and install inside `/usr/local` directory using following command. 63 | 64 | ```bash 65 | $ sudo apt install -y libgflags-dev libsnappy-dev zlib1g-dev libbz2-dev liblz4-dev libzstd-dev 66 | $ git clone -b v7.8.3 --depth 1 https://github.com/facebook/rocksdb && cd rocksdb 67 | $ make shared_lib -j $(nproc) && sudo make install-shared 68 | $ cd .. && rm -r rocksdb 69 | ``` 70 | 71 | #### Preparing for cross compilation 72 | 73 | Cross compilation can save you some time since you can compile `electrs` for a slower computer (like Raspberry Pi) on a faster machine 74 | even with different CPU architecture. 75 | Skip this if it's not your case. 76 | 77 | If you want to cross-compile, you need to install some additional packages. 78 | These cross compilation instructions use `aarch64`/`arm64` + Linux as an example. 79 | (The resulting binary should work on RPi 4 with aarch64-enabled OS). 80 | Change to your desired architecture/OS. 81 | 82 | If you use Debian (or a derived distribution) you need to enable the target architecture: 83 | 84 | ``` 85 | $ sudo dpkg --add-architecture arm64 86 | $ sudo apt update 87 | ``` 88 | 89 | If you use `cargo` from the repository 90 | 91 | ```bash 92 | $ sudo apt install gcc-aarch64-linux-gnu g++-aarch64-linux-gnu libc6-dev:arm64 libstd-rust-dev:arm64 93 | ``` 94 | 95 | If you use Rustup: 96 | 97 | ```bash 98 | $ sudo apt install gcc-aarch64-linux-gnu g++-aarch64-linux-gnu libc6-dev:arm64 99 | $ rustup target add aarch64-unknown-linux-gnu 100 | ``` 101 | 102 | If you decided to use the system rocksdb (recommended if the target OS supports it), you need the version from the other architecture: 103 | 104 | ```bash 105 | $ sudo apt install librocksdb-dev:arm64 106 | ``` 107 | 108 | #### Preparing for cross compilation on a different (Debian-based) OS distribution/version 109 | *Note: Unless you run into the below mentioned libc (GLIBC) version issue, avoiding the approach described in this section is faster and requires less disk space on the build host. You may want to try the above cross compilation approach first and only use this one here as the last resort.* 110 | 111 | If your build system runs on a different OS distribution and/or release than the target system electrs is going to run on, you may run into `GLIBC` version issues like: 112 | ``` 113 | $ ./electrs --help 114 | ./electrs: /lib/arm-linux-gnueabihf/libm.so.6: version `GLIBC_2.29' not found (required by ./electrs) 115 | ``` 116 | 117 | To cross-compile electrs for a different (Debian based) target distribution you can use a [debootstrap](https://wiki.debian.org/Debootstrap) based approach. For example your build system may be a 64-bit Debian `stable` (bullseye) system and you want to cross-compile for an armv7l (32-bit) Debian `oldstable` (buster) target, like an Odroid HC1/HC2. 118 | 119 | Install and setup debootstrap: 120 | 121 | ``` 122 | sudo apt install debootstrap 123 | ``` 124 | 125 | Next, create working directory for a `buster` based system and set it up: 126 | ``` 127 | mkdir debootstrap-buster 128 | sudo debootstrap buster debootstrap-buster http://deb.debian.org/debian/ 129 | ``` 130 | (This takes a while to download.) 131 | 132 | Next, mount proc, sys and dev to the target system: 133 | ``` 134 | sudo mount -t proc /proc debootstrap-buster/proc 135 | sudo mount --rbind /sys debootstrap-buster/sys 136 | sudo mount --rbind /dev debootstrap-buster/dev 137 | ``` 138 | 139 | If you have checked out the electrs git repository somewhere already and don't want to have a duplicate copy inside the debootstrap working directory, just mount bind the exiting directory into the chroot: 140 | ``` 141 | sudo mkdir -p debootstrap-buster/mnt/electrs 142 | sudo mount --rbind ./electrs debootstrap-buster/mnt/electrs 143 | ``` 144 | 145 | chroot into the `buster` system and install the required dependencies to build electrs with a statically linked rocksdb: 146 | ``` 147 | sudo chroot debootstrap-buster /bin/bash 148 | 149 | apt install curl 150 | curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh 151 | source "$HOME/.cargo/env" 152 | apt install clang cmake build-essential 153 | 154 | # install target specific cross compiler (armhf/gnueabihf) 155 | apt install gcc-arm-linux-gnueabihf g++-arm-linux-gnueabihf libc6-dev-armhf-cross 156 | rustup target add arm-unknown-linux-gnueabihf 157 | ``` 158 | 159 | Cross-compile `electrs` release with a statically linked rocksdb for armv7l (armhf) with libatomic inside `buster` chroot: *(bindgen needs an include path to the sources (header files) provided by the libc6-dev-armhf-cross package)* 160 | ``` 161 | cd /mnt/electrs/ 162 | BINDGEN_EXTRA_CLANG_ARGS="-target arm-linux-gnueabihf -I/usr/arm-linux-gnueabihf/include/"\ 163 | RUSTFLAGS="-C linker=arm-linux-gnueabihf-gcc -C linker-args=-latomic"\ 164 | cargo build --locked --release --target arm-unknown-linux-gnueabihf 165 | ``` 166 | 167 | The built electrs binary will be in `/mnt/electrs/target/arm-unknown-linux-gnueabihf/release` within the chroot or can be accessed from outside the chroot respectively. 168 | 169 | #### Preparing man page generation (optional) 170 | 171 | Optionally, you may install [`cfg_me`](https://github.com/Kixunil/cfg_me) tool for generating the manual page. 172 | The easiest way is to run `cargo install cfg_me`. 173 | 174 | #### Download electrs 175 | 176 | ```bash 177 | $ git clone https://github.com/romanz/electrs 178 | $ cd electrs 179 | ``` 180 | 181 | ### Build 182 | 183 | Note: you need to have enough free RAM to build `electrs`. 184 | The build will fail otherwise. 185 | Close those 100 old tabs in the browser. ;) 186 | 187 | #### Cargo features 188 | 189 | By default `electrs` builds with Prometheus support. 190 | However this causes problems on some platforms. 191 | If you don't need Prometheus you may disable it using `--no-default-features` argument to `cargo build`/`cargo install`. 192 | 193 | #### Static linking 194 | 195 | First build should take ~20 minutes: 196 | ```bash 197 | $ cargo build --locked --release 198 | ``` 199 | 200 | If RocksDB build fails with "`undefined reference to __atomic_*`" linker errors 201 | (usually happens on a 32-bit OS), set the following environment variable: 202 | ```bash 203 | $ RUSTFLAGS="-C link-args=-latomic" cargo build --locked --release 204 | ``` 205 | Relevant issues: [#134](https://github.com/romanz/electrs/issues/134) and [#391](https://github.com/romanz/electrs/issues/391). 206 | 207 | #### Dynamic linking 208 | 209 | Note that if you have previously done a static linking build, it is recommended to clean the build artifacts to avoid build errors (e.g. https://github.com/romanz/electrs/issues/1001): 210 | ``` 211 | $ cargo clean 212 | ``` 213 | 214 | ``` 215 | $ ROCKSDB_INCLUDE_DIR=/usr/include ROCKSDB_LIB_DIR=/usr/lib cargo build --locked --release 216 | ``` 217 | 218 | Or if you have installed librocksdb from source 219 | 220 | ``` 221 | $ ROCKSDB_INCLUDE_DIR=/usr/local/include ROCKSDB_LIB_DIR=/usr/local/lib cargo build --locked --release 222 | ``` 223 | 224 | #### Cross compilation 225 | 226 | Run one of the commands above (depending on linking type) with argument `--target aarch64-unknown-linux-gnu` and prepended with env vars: `BINDGEN_EXTRA_CLANG_ARGS="-target gcc-aarch64-linux-gnu" RUSTFLAGS="-C linker=aarch64-linux-gnu-gcc"` 227 | 228 | E.g. for dynamic linking case: 229 | 230 | ``` 231 | $ ROCKSDB_INCLUDE_DIR=/usr/include ROCKSDB_LIB_DIR=/usr/lib BINDGEN_EXTRA_CLANG_ARGS="-target gcc-aarch64-linux-gnu" RUSTFLAGS="-C linker=aarch64-linux-gnu-gcc" cargo build --locked --release --target aarch64-unknown-linux-gnu 232 | ``` 233 | 234 | It's a bit long but sufficient! You will find the resulting binary in `target/aarch64-unknown-linux-gnu/release/electrs` - copy it to your target machine. 235 | 236 | #### Generating man pages 237 | 238 | If you installed `cfg_me` to generate man page, you can run `cfg_me man` to see it right away or `cfg_me -o electrs.1 man` to save it into a file (`electrs.1`). 239 | 240 | ## Docker-based installation from source 241 | 242 | **Important**: The `Dockerfile` is provided for demonstration purposes and may NOT be suitable for production use. 243 | The maintainers of electrs are not deeply familiar with Docker, so you should DYOR. 244 | If you are not familiar with Docker either it's probably be safer to NOT use it. 245 | 246 | Note: currently Docker installation links statically 247 | 248 | Note: health check only works if Prometheus is running on port 4224 inside container 249 | 250 | ```bash 251 | $ docker build -t electrs-app . 252 | $ mkdir db 253 | $ docker run --network host \ 254 | --volume $HOME/.bitcoin:/home/user/.bitcoin:ro \ 255 | --volume $PWD/db:/home/user/db \ 256 | --env ELECTRS_DB_DIR=/home/user/db \ 257 | --rm -i -t electrs-app 258 | ``` 259 | 260 | If not using the host-network, you probably want to expose the ports for electrs and Prometheus like so: 261 | 262 | ```bash 263 | $ docker run --volume $HOME/.bitcoin:/home/user/.bitcoin:ro \ 264 | --volume $PWD/db:/home/user/db \ 265 | --env ELECTRS_DB_DIR=/home/user/db \ 266 | --env ELECTRS_ELECTRUM_RPC_ADDR=0.0.0.0:50001 \ 267 | --env ELECTRS_MONITORING_ADDR=0.0.0.0:4224 \ 268 | --rm -i -t electrs-app 269 | ``` 270 | 271 | To access the server from outside Docker, add `-p 50001:50001 -p 4224:4224` but be aware of the security risks. Good practice is to group containers that needs access to the server inside the same Docker network and not expose the ports to the outside world. 272 | -------------------------------------------------------------------------------- /doc/monitoring.md: -------------------------------------------------------------------------------- 1 | ## Monitoring 2 | 3 | Indexing and serving metrics are exported via [Prometheus](https://github.com/tikv/rust-prometheus): 4 | 5 | ```bash 6 | $ sudo apt install prometheus 7 | ``` 8 | 9 | Add `electrs` job to `scrape_configs` section in `/etc/prometheus/prometheus.yml`: 10 | 11 | ``` 12 | - job_name: electrs 13 | static_configs: 14 | - targets: ['localhost:4224'] 15 | ``` 16 | 17 | Restart and check the collected metrics: 18 | 19 | ``` 20 | $ sudo systemctl restart prometheus 21 | $ firefox 'http://localhost:9090/graph?g0.range_input=1h&g0.expr=index_height&g0.tab=0' 22 | ``` 23 | -------------------------------------------------------------------------------- /doc/schema.md: -------------------------------------------------------------------------------- 1 | # Index Schema 2 | 3 | The index is stored at a single RocksDB database using the following column families. 4 | Most of the data is stored in key-only DB rows (i.e. having empty values). 5 | 6 | ## Transaction outputs' index (`funding`) 7 | 8 | Allows efficiently finding all funding transactions for a specific address: 9 | 10 | | Script Hash Prefix | Confirmed Block Height | 11 | | -------------------- | ---------------------- | 12 | | `SHA256(script)[:8]` | `height as u32` | 13 | 14 | ## Transaction inputs' index (`spending`) 15 | 16 | Allows efficiently finding spending transaction of a specific output: 17 | 18 | | Previous Outpoint Prefix | Confirmed Block Height | 19 | | ------------------------ | ---------------------- | 20 | | `txid[:8] as u64 + vout` | `height as u32` | 21 | 22 | 23 | ## Transaction ID index (`txid`) 24 | 25 | In order to save storage space, we map the 8-byte transaction ID prefix to its confirmed block height: 26 | 27 | | Txid Prefix | Confirmed height | 28 | | ----------- | ---------------- | 29 | | `txid[:8]` | `height as u32` | 30 | 31 | Note that this mapping allows us to use `getrawtransaction` RPC to retrieve actual transaction data from without `-txindex` enabled 32 | (by explicitly specifying the [blockhash](https://github.com/bitcoin/bitcoin/commit/497d0e014cc79d46531d570e74e4aeae72db602d)). 33 | 34 | ## Headers (`headers`) 35 | 36 | For faster loading, we store all block headers in RocksDB: 37 | 38 | | Serialized header | 39 | | ----------------------- | 40 | | `header as BlockHeader` | 41 | 42 | In addition, we also store the chain tip: 43 | 44 | | Key | | Value | 45 | | --- | - | ------------------------ | 46 | | `T` | | `blockhash as BlockHash` | 47 | 48 | ## Configuration (`config`) 49 | 50 | | Key | | Value | 51 | | --- | - | --------------------------- | 52 | | `C` | | `serialized config as JSON` | 53 | 54 | -------------------------------------------------------------------------------- /doc/upgrading.md: -------------------------------------------------------------------------------- 1 | ### Important changes from versions older than 0.9.3 2 | 3 | * If you use `verbose` (or `-v` argument), switch to `log_filters` (or `RUST_LOG` environment variable). 4 | Please note that it allows setting per-module filters, but module naming is considered unstable. 5 | If you have used `-vv` (the value suggested in the documentation), switch to `--log-filters INFO`: 6 | 7 | 8 | |Log filter|Old `verbose` value|Description | 9 | |----------|-------------------|----------------------------------------------------------------------| 10 | |ERROR | 0|Only fatal errors | 11 | |WARN | 1|Things that could indicate serious problems | 12 | |INFO | 2|Various significant events and suggestions | 13 | |DEBUG | 3|Details that could be useful when debugging - only use when debugging!| 14 | |TRACE | 4|**Very** detailed information - only use when debugging! | 15 | 16 | 17 | ### Important changes from versions older than 0.9.0 18 | 19 | In 0.9.0 we have changed the RocksDB index format to optimize electrs performance. 20 | We also use Bitcoin P2P protocol instead of reading blocks from disk or JSON RPC. 21 | Some guides were suggesting trace log level and we started to trace much more information. 22 | 23 | Upgrading checklist: 24 | 25 | * Make sure you upgrade at a time when you don't need to use electrs for a while. 26 | Because of reindex electrs will be unable to serve your requests for a few hours. 27 | (The exact time depends on your hardware.) 28 | If you wish to check the database without reindexing run electrs with `--no-auto-reindex`. 29 | * If you have less than 60 GB of free space delete `mainnet` subdirectory inside your `db_dir` *before* running the new version. 30 | Note however if you have less than 60 GB of free space you should consider extending your storage soon 31 | since in the worst case scenario you will run out of space in ~100 days. 32 | * Make sure to allow accesses to bitcoind from local address, ideally whitelist it using `whitelist=download@127.0.0.1` bitcoind option. 33 | Either don't use `maxconnections` bitcoind option or set it to 12 or more. 34 | * If you use non-default P2P port (or address) for bitcoind adjust `electrs` configuration. 35 | * If you still didn't migrate `cookie` electrs option you have to now - see below. 36 | * Remove unsupported options from configuration (`blocks_dir`, `jsonrpc_import`, `bulk_index_threads`, `tx_cache_size_mb`, `blocktxids_cache_size_mb`) 37 | * Rename `txid_limit` to `index_lookup_limit` if used 38 | * If you use `verbose = 4` (or `-vvvv` argument) lower it down to `2` (`-vv`) for production use. 39 | Keeping it would waste resources because we utilize it more now. 40 | * **After reindexing**, if you did **not** delete `mainnet` subdirectory within `db_dir` check that `electrs` works as expected and then *delete whole `mainnet` subdirectory*. 41 | * If you are using our Dockerfile, please make sure to re-map the DB volume (see [the section above](install.md#docker-based-installation-from-source)). 42 | 43 | ### Important changes from version older than 0.8.8 44 | 45 | **If you're upgrading from version 0.8.7 to a higher version and used `cookie` option you should change your configuration!** 46 | The `cookie` option was deprecated and **will be removed eventually**! 47 | If you had actual cookie (from `~/bitcoin/.cookie` file) specified in `cookie` option, this was wrong as it wouldn't get updated when needed. 48 | It's strongly recommended to use proper cookie authentication using `cookie_file`. 49 | If you really have to use fixed username and password, explicitly specified in `bitcoind` config, use `auth` option instead. 50 | Users of `btc-rpc-proxy` using `public:public` need to use `auth` too. 51 | You can read [a detailed explanation of cookie deprecation with motivation explained](cookie_deprecation.md). 52 | 53 | ### General upgrading guide 54 | 55 | As with any other application, you need to remember how you installed `electrs` to upgrade it. 56 | If you don't then here's a little help: run `which electrs` and compare the output 57 | 58 | * If you got an error you didn't install `electrs` into your system in any way, it's probably sitting in the `target/release` directory of source 59 | * If the path starts with `/bin/` then either you have used packaging system or you made a mistake the first time (non-packaged binaries must go to `/usr/local/bin`) 60 | * If the path starts with `/usr/local/bin` you most likely copied electrs there after building 61 | * If the path starts with `/home/YOUR_USERNAME/.cargo/bin` you most likely ran `cargo install` 62 | 63 | ### Upgrading distribution package 64 | 65 | If you used Debian packaging system you only need this: 66 | 67 | ``` 68 | sudo apt update 69 | sudo apt upgrade 70 | ``` 71 | 72 | Similarly for other distributions - use their respective commands. 73 | If a new version of `electrs` is not yet in the package system, try waiting a few days or contact the maintainers of the packages if it has been a long time. 74 | 75 | ### Upgrading manual installation 76 | 77 | 1. Enter your `electrs` source directory, usually in `~/` but some people like to put it in something like `~/sources`. 78 | If you've deleted it, you need to `git clone` again. 79 | 2. `git checkout master` 80 | 3. `git pull` 81 | 4. Strongly recommended: `git verify-tag v0.9.1` (fix the version number if we've forgotten to update the docs ;)) should show "Good signature from 15C8 C357 4AE4 F1E2 5F3F 35C5 87CA E5FA 4691 7CBB" 82 | 5. `git checkout v0.9.1` 83 | 6. If you used static linking: `cargo build --locked --release`. 84 | If you used dynamic linking `ROCKSDB_INCLUDE_DIR=/usr/include ROCKSDB_LIB_DIR=/usr/lib cargo build --locked --release`. 85 | If you don't remember which linking you used, you probably used static. 86 | This step will take a few tens of minutes (but dynamic linking is a bit faster), go grab a coffee. 87 | Also remember that you need enough free RAM, the build will die otherwise 88 | 7. If you've previously copied `electrs` into `/usr/local/bin` run: sudo `cp target/release/electrs /usr/local/bin` 89 | If you've previously installed `electrs` using `cargo install`: `cargo install --locked --path . -f` 90 | 8. If you've manually configured systemd service: `sudo systemctl restart electrs` 91 | -------------------------------------------------------------------------------- /doc/usage.md: -------------------------------------------------------------------------------- 1 | ## Quickstart 2 | 3 |
4 | Assuming Bitcoin Core 0.21+ is installed on the same machine (with the standard configuration at `~/.bitcoin/bitcoin.conf`): 5 | 6 | ```bash 7 | $ bitcoind -server=1 -prune=0 & 8 | $ # ... wait until the chain is synced (e.g. using `bitcoin-cli getblockchaininfo`) 9 | $ electrs --log-filters=INFO --db-dir ./db --daemon-dir ~/.bitcoin --network bitcoin 10 | ``` 11 | 12 |
13 | 14 | [![asciicast](https://asciinema.org/a/zRNZp5HsBDi5rAlGWU7470Pzl.svg)](https://asciinema.org/a/zRNZp5HsBDi5rAlGWU7470Pzl?speed=3) 15 | 16 | ## Usage 17 | 18 | First index sync should take ~6.5 hours for ~504GB @ August 2023 (on a dual core Intel CPU @ 3.3 GHz, 8 GB RAM, 1TB WD Blue HDD): 19 | ```bash 20 | $ du -ch ~/.bitcoin/blocks/blk*.dat | tail -n1 21 | 336G total 22 | 23 | $ ./target/release/electrs --network bitcoin --db-dir ./db --daemon-dir /home/user/.bitcoin 24 | Starting electrs 0.10.0 on x86_64 linux with Config { network: Bitcoin, db_path: "./db/bitcoin", daemon_dir: "/home/user/.bitcoin", daemon_auth: CookieFile("/home/user/.bitcoin/.cookie"), daemon_rpc_addr: 127.0.0.1:8332, daemon_p2p_addr: 127.0.0.1:8333, electrum_rpc_addr: 127.0.0.1:50001, monitoring_addr: 127.0.0.1:4224, wait_duration: 10s, jsonrpc_timeout: 15s, index_batch_size: 10, index_lookup_limit: None, reindex_last_blocks: 0, auto_reindex: true, ignore_mempool: false, sync_once: false, skip_block_download_wait: false, disable_electrum_rpc: false, server_banner: "Welcome to electrs 0.10.0 (Electrum Rust Server)!", signet_magic: f9beb4d9, args: [] } 25 | [2023-08-16T19:17:11.193Z INFO electrs::metrics::metrics_impl] serving Prometheus metrics on 127.0.0.1:4224 26 | [2023-08-16T19:17:11.193Z INFO electrs::server] serving Electrum RPC on 127.0.0.1:50001 27 | [2023-08-16T19:17:12.355Z INFO electrs::db] "./db/bitcoin": 0 SST files, 0 GB, 0 Grows 28 | [2023-08-16T19:17:12.446Z INFO electrs::index] indexing 2000 blocks: [1..2000] 29 | [2023-08-16T19:17:12.866Z INFO electrs::chain] chain updated: tip=00000000dfd5d65c9d8561b4b8f60a63018fe3933ecb131fb37f905f87da951a, height=2000 30 | [2023-08-16T19:17:12.879Z INFO electrs::index] indexing 2000 blocks: [2001..4000] 31 | [2023-08-16T19:17:13.227Z INFO electrs::chain] chain updated: tip=00000000922e2aa9e84a474350a3555f49f06061fd49df50a9352f156692a842, height=4000 32 | [2023-08-16T19:17:13.238Z INFO electrs::index] indexing 2000 blocks: [4001..6000] 33 | [2023-08-16T19:17:13.587Z INFO electrs::chain] chain updated: tip=00000000dbbb79792303bdd1c6c4d7ab9c21bba0667213c2eca955e11230c5a5, height=6000 34 | [2023-08-16T19:17:13.598Z INFO electrs::index] indexing 2000 blocks: [6001..8000] 35 | [2023-08-16T19:17:13.950Z INFO electrs::chain] chain updated: tip=0000000094fbacdffec05aea9847000522a258c269ae37a74a818afb96fc27d9, height=8000 36 | [2023-08-16T19:17:13.961Z INFO electrs::index] indexing 2000 blocks: [8001..10000] 37 | <...> 38 | [2023-08-17T00:13:16.443Z INFO electrs::index] indexing 2000 blocks: [798001..800000] 39 | [2023-08-17T00:14:58.310Z INFO electrs::chain] chain updated: tip=00000000000000000002a7c4c1e48d76c5a37902165a270156b7a8d72728a054, height=800000 40 | [2023-08-17T00:14:58.325Z INFO electrs::index] indexing 2000 blocks: [800001..802000] 41 | [2023-08-17T00:16:36.425Z INFO electrs::chain] chain updated: tip=0000000000000000000311b41f1d611f977b024b947568c1dd760704360f148a, height=802000 42 | [2023-08-17T00:16:36.437Z INFO electrs::index] indexing 1534 blocks: [802001..803534] 43 | [2023-08-17T00:17:51.338Z INFO electrs::chain] chain updated: tip=00000000000000000003c0cd1b62ed8bb502e24bcbfeee16e81d6ea33d026263, height=803534 44 | [2023-08-17T00:18:00.592Z INFO electrs::db] starting config compaction 45 | [2023-08-17T00:18:00.778Z INFO electrs::db] starting headers compaction 46 | [2023-08-17T00:18:00.870Z INFO electrs::db] starting txid compaction 47 | [2023-08-17T00:33:34.370Z INFO electrs::db] starting funding compaction 48 | [2023-08-17T01:03:56.784Z INFO electrs::db] starting spending compaction 49 | [2023-08-17T01:35:05.983Z INFO electrs::db] finished full compaction 50 | [2023-08-17T01:36:23.300Z INFO electrs::index] indexing 5 blocks: [803535..803539] 51 | [2023-08-17T01:36:23.646Z INFO electrs::chain] chain updated: tip=000000000000000000006a3aaddd4b643607b33e000f1200d35005c330ecfa88, height=803539 52 | [2023-08-17T01:41:26.009Z INFO electrs::index] indexing 1 blocks: [803540..803540] 53 | [2023-08-17T01:41:26.143Z INFO electrs::chain] chain updated: tip=00000000000000000003266d31db92629b64241eef7ce708244f6d6283b080b4, height=803540 54 | [2023-08-17T01:42:42.999Z INFO electrs::index] indexing 1 blocks: [803541..803541] 55 | [2023-08-17T01:42:43.153Z INFO electrs::chain] chain updated: tip=00000000000000000000884a77c8b8ad2fb0c25510a3251bf5ef57f0db275146, height=803541 56 | ``` 57 | You can specify options via command-line parameters, environment variables or using config files. 58 | See the documentation above. 59 | 60 | Note that the final DB size should be ~10% of the `blk*.dat` files, but it may increase to ~20% at the end of the initial sync (just before the [full compaction is invoked](https://github.com/facebook/rocksdb/wiki/Manual-Compaction)). 61 | 62 | It should take roughly 18 hours to sync and compact the index on an ODROID-HC1 with 8 CPU cores @ 2GHz, 2GB RAM, and an SSD using the command above. 63 | 64 | The index database is stored here: 65 | ```bash 66 | $ du db/ 67 | 42G db/mainnet/ 68 | ``` 69 | 70 | See [extra configuration suggestions](config.md#extra-configuration-suggestions) that you might want to consider. 71 | 72 | ## Electrum client 73 | 74 | If you happen to use the Electrum client from [the *beta* Debian repository](binaries.md#cnative-os-packages), it's pre-configured out-of-the-box already 75 | Read below otherwise. 76 | 77 | There's a prepared script for launching `electrum` in such way to connect only to the local `electrs` instance to protect your privacy. 78 | 79 | ```bash 80 | $ ./contrib/local-electrum.bash 81 | + ADDR=127.0.0.1 82 | + PORT=50001 83 | + PROTOCOL=t 84 | + electrum --oneserver --server=127.0.0.1:50001:t 85 | 86 | ``` 87 | 88 | You can persist Electrum configuration (see `~/.electrum/config`) using: 89 | ```bash 90 | $ electrum setconfig oneserver true 91 | $ electrum setconfig server 127.0.0.1:50001:t 92 | $ electrum # will connect only to the local server 93 | ``` 94 | 95 | ## RPC examples 96 | 97 | You can invoke any supported RPC using `netcat`, for example: 98 | 99 | ``` 100 | $ echo '{"jsonrpc": "2.0", "method": "server.version", "params": ["", "1.4"], "id": 0}' | netcat 127.0.0.1 50001 101 | {"id":0,"jsonrpc":"2.0","result":["electrs 0.9.0","1.4"]} 102 | ``` 103 | 104 | For more complex tasks, you may need to convert addresses to 105 | [script hashes](https://electrumx-spesmilo.readthedocs.io/en/latest/protocol-basics.html#script-hashes) - see 106 | [contrib/history.py](https://github.com/romanz/electrs/blob/master/contrib/history.py) for getting an address balance and history: 107 | 108 | ``` 109 | $ ./contrib/history.sh --venv 144STc7gcb9XCp6t4hvrcUEKg9KemivsCR 110 | [2021-08-18 13:56:40.254317] INFO: electrum: connecting to localhost:50001 111 | [2021-08-18 13:56:40.574461] INFO: electrum: subscribed to 1 scripthashes 112 | [2021-08-18 13:56:40.645072] DEBUG: electrum: 0.00000 mBTC (total) 113 | [2021-08-18 13:56:40.710279] INFO: electrum: got history of 2 transactions 114 | [2021-08-18 13:56:40.769064] INFO: electrum: loaded 2 transactions 115 | [2021-08-18 13:56:40.835569] INFO: electrum: loaded 2 header timestamps 116 | [2021-08-18 13:56:40.900560] INFO: electrum: loaded 2 merkle proofs 117 | +------------------------------------------------------------------+----------------------+--------+---------------+--------------+--------------+ 118 | | txid | block timestamp | height | confirmations | delta (mBTC) | total (mBTC) | 119 | +------------------------------------------------------------------+----------------------+--------+---------------+--------------+--------------+ 120 | | 34b6411d004f279622d0a45a4558746e1fa74323c5c01e9c0bb0a3277781a0d0 | 2020-07-25T08:33:57Z | 640699 | 55689 | 126.52436 | 126.52436 | 121 | | e58916ca945639c657de137b30bd29e213e4c9fc8e04652c1abc2922909fb8fd | 2020-07-25T21:20:35Z | 640775 | 55613 | -126.52436 | 0.00000 | 122 | +------------------------------------------------------------------+----------------------+--------+---------------+--------------+--------------+ 123 | [2021-08-18 13:56:40.902677] INFO: electrum: tip=00000000000000000009d7590d32ca52ad0b8a4cdfee43e28e6dfcd11cafeaac, height=696387 @ 2021-08-18T13:47:19Z 124 | ``` 125 | -------------------------------------------------------------------------------- /examples/tx_collisions.rs: -------------------------------------------------------------------------------- 1 | use anyhow::{Context, Result}; 2 | use electrs_rocksdb::{ColumnFamilyDescriptor, IteratorMode, Options, DB}; 3 | 4 | fn main() -> Result<()> { 5 | let path = std::env::args().nth(1).context("missing DB path")?; 6 | let cf_names = DB::list_cf(&Options::default(), &path)?; 7 | let cfs: Vec<_> = cf_names 8 | .iter() 9 | .map(|name| ColumnFamilyDescriptor::new(name, Options::default())) 10 | .collect(); 11 | let db = DB::open_cf_descriptors(&Options::default(), &path, cfs)?; 12 | let cf = db.cf_handle("txid").context("missing column family")?; 13 | 14 | let mut state: Option<(u64, u32)> = None; 15 | for row in db.iterator_cf(cf, IteratorMode::Start) { 16 | let (curr, _value) = row?; 17 | let curr_prefix = u64::from_le_bytes(curr[..8].try_into()?); 18 | let curr_height = u32::from_le_bytes(curr[8..].try_into()?); 19 | 20 | if let Some((prev_prefix, prev_height)) = state { 21 | if prev_prefix == curr_prefix { 22 | eprintln!( 23 | "prefix={:x} heights: {} {}", 24 | curr_prefix, prev_height, curr_height 25 | ); 26 | }; 27 | } 28 | state = Some((curr_prefix, curr_height)); 29 | } 30 | Ok(()) 31 | } 32 | -------------------------------------------------------------------------------- /internal/README.md: -------------------------------------------------------------------------------- 1 | # electrs-internal files 2 | 3 | **Nothing for users here, just for developers. ;)** 4 | -------------------------------------------------------------------------------- /internal/config_specification.toml: -------------------------------------------------------------------------------- 1 | [general] 2 | env_prefix = "ELECTRS" 3 | conf_file_param = "conf" 4 | conf_dir_param = "conf_dir" 5 | skip_default_conf_files_switch = "skip_default_conf_files" 6 | doc = """ 7 | An efficient re-implementation of Electrum Server, inspired by ElectrumX, Electrum Personal Server and bitcoincore-indexd. 8 | 9 | The motivation behind this project is to enable a user to run his own Electrum server, with required hardware resources not much beyond those of a full node. The server indexes the entire Bitcoin blockchain, and the resulting index enables fast queries for any given user wallet, allowing the user to keep real-time track of his balances and his transaction history using the Electrum wallet. Since it runs on the user's own machine, there is no need for the wallet to communicate with external Electrum servers, thus preserving the privacy of the user's addresses and balances.""" 10 | 11 | [[switch]] 12 | name = "verbose" 13 | abbr = "v" 14 | doc = "Increase logging verbosity" 15 | count = true 16 | 17 | [[switch]] 18 | name = "timestamp" 19 | doc = "Prepend log lines with a timestamp" 20 | 21 | [[switch]] 22 | name = "auto_reindex" 23 | doc = "Automatically reindex the database if it's inconsistent or in old format" 24 | default = true 25 | 26 | [[param]] 27 | name = "db_dir" 28 | type = "std::path::PathBuf" 29 | doc = "Directory to store index database (default: ./db/)" 30 | default = "\"./db\".into()" 31 | 32 | [[param]] 33 | name = "db_log_dir" 34 | type = "std::path::PathBuf" 35 | doc = "Directory to store index database internal log (default: same as specified by `db_dir`)" 36 | 37 | [[param]] 38 | name = "db_parallelism" 39 | type = "u8" 40 | doc = "Max threads to use for DB background operations (flushes and compactions)" 41 | default = "1" 42 | 43 | [[param]] 44 | name = "daemon_dir" 45 | type = "std::path::PathBuf" 46 | doc = "Data directory of Bitcoind (default: ~/.bitcoin/)" 47 | default = "crate::config::default_daemon_dir()" 48 | 49 | [[param]] 50 | name = "auth" 51 | type = "String" 52 | doc = "JSONRPC authentication ('USER:PASSWORD', default: use cookie file)" 53 | # Force the user to use config file in order to avoid password leaks 54 | argument = false 55 | env_var = false 56 | 57 | [[param]] 58 | name = "cookie_file" 59 | type = "std::path::PathBuf" 60 | doc = "JSONRPC authentication cookie file (default: ~/.bitcoin/.cookie)" 61 | # This is safe to configure on command line. 62 | 63 | [[param]] 64 | name = "network" 65 | type = "crate::config::BitcoinNetwork" 66 | convert_into = "::bitcoin::Network" 67 | doc = "Select Bitcoin network type ('bitcoin', 'testnet', 'testnet4', 'regtest' or 'signet')" 68 | default = "Default::default()" 69 | 70 | [[param]] 71 | name = "electrum_rpc_addr" 72 | type = "crate::config::ResolvAddr" 73 | doc = "Electrum server JSONRPC 'addr:port' to listen on (default: '127.0.0.1:50001' for mainnet, '127.0.0.1:60001' for testnet, '127.0.0.1:60401' for regtest and '127.0.0.1:60601' for signet)" 74 | 75 | [[param]] 76 | name = "daemon_rpc_addr" 77 | type = "crate::config::ResolvAddr" 78 | doc = "Bitcoin daemon JSONRPC 'addr:port' to connect (default: 127.0.0.1:8332 for mainnet, 127.0.0.1:18332 for testnet, 127.0.0.1:18443 for regtest and 127.0.0.1:18554 for signet)" 79 | [[param]] 80 | name = "daemon_p2p_addr" 81 | type = "crate::config::ResolvAddr" 82 | doc = "Bitcoin daemon p2p 'addr:port' to connect (default: 127.0.0.1:8333 for mainnet, 127.0.0.1:18333 for testnet, 127.0.0.1:18444 for regtest and 127.0.0.1:38333 for signet)" 83 | 84 | [[param]] 85 | name = "monitoring_addr" 86 | type = "crate::config::ResolvAddr" 87 | doc = "Prometheus monitoring 'addr:port' to listen on (default: 127.0.0.1:4224 for mainnet, 127.0.0.1:14224 for testnet, 127.0.0.1:24224 for regtest and 127.0.0.1:34224 for signet)" 88 | 89 | [[param]] 90 | name = "wait_duration_secs" 91 | type = "u64" 92 | doc = "Duration to wait between bitcoind polling" 93 | default = "10" 94 | 95 | [[param]] 96 | name = "jsonrpc_timeout_secs" 97 | type = "u64" 98 | doc = "Duration to wait until bitcoind JSON-RPC timeouts (must be greater than wait_duration_secs)." 99 | default = "15" 100 | 101 | [[param]] 102 | name = "index_batch_size" 103 | type = "usize" 104 | doc = "Number of blocks to get in a single p2p protocol request from bitcoind" 105 | default = "10" 106 | 107 | [[switch]] 108 | name = "ignore_mempool" 109 | doc = "Don't sync mempool - queries will show only confirmed transactions." 110 | 111 | [[switch]] 112 | name = "disable_electrum_rpc" 113 | doc = "Disable Electrum RPC server - only sync and index blocks." 114 | 115 | [[switch]] 116 | name = "sync_once" 117 | doc = "Exit after the initial sync is over (don't start Electrum server)." 118 | 119 | [[switch]] 120 | name = "skip_block_download_wait" 121 | doc = "Don't wait for block download to finish before starting sync." 122 | 123 | [[switch]] 124 | name = "version" 125 | doc = "Print out the program version." 126 | 127 | [[param]] 128 | name = "index_lookup_limit" 129 | type = "usize" 130 | doc = "Number of transactions to lookup before returning an error, to prevent 'too popular' addresses from causing the RPC server to get stuck (0 - disable the limit)" 131 | default = "0" 132 | 133 | [[param]] 134 | name = "reindex_last_blocks" 135 | type = "usize" 136 | doc = "Number of last blocks to reindex (used for testing)" 137 | default = "0" 138 | 139 | [[param]] 140 | name = "server_banner" 141 | type = "String" 142 | doc = "The banner to be shown in the Electrum console" 143 | default = "concat!(\"Welcome to electrs \", env!(\"CARGO_PKG_VERSION\"), \" (Electrum Rust Server)!\").to_owned()" 144 | 145 | [[param]] 146 | name = "log_filters" 147 | type = "String" 148 | doc = "Logging filters, overriding `RUST_LOG` environment variable (see https://docs.rs/env_logger/ for details)" 149 | 150 | [[param]] 151 | name = "signet_magic" 152 | type = "String" 153 | doc = "network magic for custom signet network in hex format, as found in Bitcoin Core logs (signet only)" 154 | -------------------------------------------------------------------------------- /logo/icon.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 5 | 12 | 13 | 44 | 45 | 46 | 47 | 48 | 49 | 50 | 51 | 52 | 53 | 54 | 55 | 56 | 57 | 63 | 69 | 70 | 71 | 72 | 73 | -------------------------------------------------------------------------------- /logo/logo.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 5 | 13 | 14 | 45 | 46 | 47 | 48 | 49 | 50 | 51 | 52 | 53 | 54 | 55 | 56 | 62 | 68 | 69 | 70 | 71 | 72 | 73 | 79 | 80 | 86 | 93 | 97 | 100 | 110 | 111 | 112 | 113 | -------------------------------------------------------------------------------- /logo/manual.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/romanz/electrs/785ae687e4279c8682488c10221d4fa3cde62b23/logo/manual.pdf -------------------------------------------------------------------------------- /server.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -eux 3 | cd `dirname $0` 4 | 5 | cargo fmt --all 6 | cargo build --all --features "metrics_process" --release 7 | 8 | NETWORK=$1 9 | shift 10 | 11 | DB=${DB-./db} 12 | export RUST_LOG=${RUST_LOG-electrs=INFO} 13 | target/release/electrs --network $NETWORK --db-dir $DB --daemon-dir $HOME/.bitcoin $* 14 | 15 | # use SIGINT to quit 16 | -------------------------------------------------------------------------------- /src/bin/electrs.rs: -------------------------------------------------------------------------------- 1 | use anyhow::Result; 2 | 3 | fn main() -> Result<()> { 4 | electrs::run() 5 | } 6 | -------------------------------------------------------------------------------- /src/cache.rs: -------------------------------------------------------------------------------- 1 | use bitcoin::Txid; 2 | use parking_lot::RwLock; 3 | 4 | use std::collections::HashMap; 5 | use std::sync::Arc; 6 | 7 | use crate::metrics::{self, Histogram, Metrics}; 8 | 9 | pub(crate) struct Cache { 10 | txs: Arc>>>, 11 | 12 | // stats 13 | txs_size: Histogram, 14 | } 15 | 16 | impl Cache { 17 | pub fn new(metrics: &Metrics) -> Self { 18 | Cache { 19 | txs: Default::default(), 20 | txs_size: metrics.histogram_vec( 21 | "cache_txs_size", 22 | "Cached transactions' size (in bytes)", 23 | "type", 24 | metrics::default_size_buckets(), 25 | ), 26 | } 27 | } 28 | 29 | pub fn add_tx(&self, txid: Txid, f: impl FnOnce() -> Box<[u8]>) { 30 | self.txs.write().entry(txid).or_insert_with(|| { 31 | let tx = f(); 32 | self.txs_size.observe("serialized", tx.len() as f64); 33 | tx 34 | }); 35 | } 36 | 37 | pub fn get_tx(&self, txid: &Txid, f: F) -> Option 38 | where 39 | F: FnOnce(&[u8]) -> T, 40 | { 41 | self.txs.read().get(txid).map(|tx_bytes| f(tx_bytes)) 42 | } 43 | } 44 | -------------------------------------------------------------------------------- /src/chain.rs: -------------------------------------------------------------------------------- 1 | use std::collections::HashMap; 2 | 3 | use bitcoin::blockdata::block::Header as BlockHeader; 4 | use bitcoin::{BlockHash, Network}; 5 | 6 | /// A new header found, to be added to the chain at specific height 7 | pub(crate) struct NewHeader { 8 | header: BlockHeader, 9 | hash: BlockHash, 10 | height: usize, 11 | } 12 | 13 | impl NewHeader { 14 | pub(crate) fn from((header, height): (BlockHeader, usize)) -> Self { 15 | Self { 16 | header, 17 | hash: header.block_hash(), 18 | height, 19 | } 20 | } 21 | 22 | pub(crate) fn height(&self) -> usize { 23 | self.height 24 | } 25 | 26 | pub(crate) fn hash(&self) -> BlockHash { 27 | self.hash 28 | } 29 | } 30 | 31 | /// Current blockchain headers' list 32 | pub struct Chain { 33 | headers: Vec<(BlockHash, BlockHeader)>, 34 | heights: HashMap, 35 | } 36 | 37 | impl Chain { 38 | // create an empty chain 39 | pub fn new(network: Network) -> Self { 40 | let genesis = bitcoin::blockdata::constants::genesis_block(network); 41 | let genesis_hash = genesis.block_hash(); 42 | Self { 43 | headers: vec![(genesis_hash, genesis.header)], 44 | heights: std::iter::once((genesis_hash, 0)).collect(), // genesis header @ zero height 45 | } 46 | } 47 | 48 | pub(crate) fn drop_last_headers(&mut self, n: usize) { 49 | if n == 0 { 50 | return; 51 | } 52 | let new_height = self.height().saturating_sub(n); 53 | self.update(vec![NewHeader::from(( 54 | self.headers[new_height].1, 55 | new_height, 56 | ))]) 57 | } 58 | 59 | /// Load the chain from a collection of headers, up to the given tip 60 | pub(crate) fn load(&mut self, headers: impl Iterator, tip: BlockHash) { 61 | let genesis_hash = self.headers[0].0; 62 | 63 | let header_map: HashMap = 64 | headers.map(|h| (h.block_hash(), h)).collect(); 65 | let mut blockhash = tip; 66 | let mut new_headers: Vec<&BlockHeader> = Vec::with_capacity(header_map.len()); 67 | while blockhash != genesis_hash { 68 | let header = match header_map.get(&blockhash) { 69 | Some(header) => header, 70 | None => panic!("missing header {} while loading from DB", blockhash), 71 | }; 72 | blockhash = header.prev_blockhash; 73 | new_headers.push(header); 74 | } 75 | info!("loading {} headers, tip={}", new_headers.len(), tip); 76 | let new_headers = new_headers.into_iter().rev().copied(); // order by height 77 | self.update(new_headers.zip(1..).map(NewHeader::from).collect()) 78 | } 79 | 80 | /// Get the block hash at specified height (if exists) 81 | pub(crate) fn get_block_hash(&self, height: usize) -> Option { 82 | self.headers.get(height).map(|(hash, _header)| *hash) 83 | } 84 | 85 | /// Get the block header at specified height (if exists) 86 | pub(crate) fn get_block_header(&self, height: usize) -> Option<&BlockHeader> { 87 | self.headers.get(height).map(|(_hash, header)| header) 88 | } 89 | 90 | /// Get the block height given the specified hash (if exists) 91 | pub(crate) fn get_block_height(&self, blockhash: &BlockHash) -> Option { 92 | self.heights.get(blockhash).copied() 93 | } 94 | 95 | /// Update the chain with a list of new headers (possibly a reorg) 96 | pub(crate) fn update(&mut self, headers: Vec) { 97 | if let Some(first_height) = headers.first().map(|h| h.height) { 98 | for (hash, _header) in self.headers.drain(first_height..) { 99 | assert!(self.heights.remove(&hash).is_some()); 100 | } 101 | for (h, height) in headers.into_iter().zip(first_height..) { 102 | assert_eq!(h.height, height); 103 | assert_eq!(h.hash, h.header.block_hash()); 104 | assert!(self.heights.insert(h.hash, h.height).is_none()); 105 | self.headers.push((h.hash, h.header)); 106 | } 107 | info!( 108 | "chain updated: tip={}, height={}", 109 | self.headers.last().unwrap().0, 110 | self.headers.len() - 1 111 | ); 112 | } 113 | } 114 | 115 | /// Best block hash 116 | pub(crate) fn tip(&self) -> BlockHash { 117 | self.headers.last().expect("empty chain").0 118 | } 119 | 120 | /// Number of blocks (excluding genesis block) 121 | pub(crate) fn height(&self) -> usize { 122 | self.headers.len() - 1 123 | } 124 | 125 | /// List of block hashes for efficient fork detection and block/header sync 126 | /// see https://en.bitcoin.it/wiki/Protocol_documentation#getblocks 127 | pub(crate) fn locator(&self) -> Vec { 128 | let mut result = vec![]; 129 | let mut index = self.headers.len() - 1; 130 | let mut step = 1; 131 | loop { 132 | if result.len() >= 10 { 133 | step *= 2; 134 | } 135 | result.push(self.headers[index].0); 136 | if index == 0 { 137 | break; 138 | } 139 | index = index.saturating_sub(step); 140 | } 141 | result 142 | } 143 | } 144 | 145 | #[cfg(test)] 146 | mod tests { 147 | use super::{Chain, NewHeader}; 148 | use bitcoin::blockdata::block::Header as BlockHeader; 149 | use bitcoin::consensus::deserialize; 150 | use bitcoin::Network::Regtest; 151 | use hex_lit::hex; 152 | 153 | #[test] 154 | fn test_genesis() { 155 | let regtest = Chain::new(Regtest); 156 | assert_eq!(regtest.height(), 0); 157 | assert_eq!( 158 | regtest.tip(), 159 | "0f9188f13cb7b2c71f2a335e3a4fc328bf5beb436012afca590b1a11466e2206" 160 | .parse() 161 | .unwrap() 162 | ); 163 | } 164 | 165 | #[test] 166 | fn test_updates() { 167 | let byte_headers = [ 168 | hex!("0000002006226e46111a0b59caaf126043eb5bbf28c34f3a5e332a1fc7b2b73cf188910f1d14d3c7ff12d6adf494ebbcfba69baa915a066358b68a2b8c37126f74de396b1d61cc60ffff7f2000000000"), 169 | hex!("00000020d700ae5d3c705702e0a5d9ababd22ded079f8a63b880b1866321d6bfcb028c3fc816efcf0e84ccafa1dda26be337f58d41b438170c357cda33a68af5550590bc1e61cc60ffff7f2004000000"), 170 | hex!("00000020d13731bc59bc0989e06a5e7cab9843a4e17ad65c7ca47cd77f50dfd24f1f55793f7f342526aca9adb6ce8f33d8a07662c97d29d83b9e18117fb3eceecb2ab99b1e61cc60ffff7f2001000000"), 171 | hex!("00000020a603def3e1255cadfb6df072946327c58b344f9bfb133e8e3e280d1c2d55b31c731a68f70219472864a7cb010cd53dc7e0f67e57f7d08b97e5e092b0c3942ad51f61cc60ffff7f2001000000"), 172 | hex!("0000002041dd202b3b2edcdd3c8582117376347d48ff79ff97c95e5ac814820462012e785142dc360975b982ca43eecd14b4ba6f019041819d4fc5936255d7a2c45a96651f61cc60ffff7f2000000000"), 173 | hex!("0000002072e297a2d6b633c44f3c9b1a340d06f3ce4e6bcd79ebd4c4ff1c249a77e1e37c59c7be1ca0964452e1735c0d2740f0d98a11445a6140c36b55770b5c0bcf801f1f61cc60ffff7f2000000000"), 174 | hex!("000000200c9eb5889a8e924d1c4e8e79a716514579e41114ef37d72295df8869d6718e4ac5840f28de43ff25c7b9200aaf7873b20587c92827eaa61943484ca828bdd2e11f61cc60ffff7f2000000000"), 175 | hex!("000000205873f322b333933e656b07881bb399dae61a6c0fa74188b5fb0e3dd71c9e2442f9e2f433f54466900407cf6a9f676913dd54aad977f7b05afcd6dcd81e98ee752061cc60ffff7f2004000000"), 176 | hex!("00000020fd1120713506267f1dba2e1856ca1d4490077d261cde8d3e182677880df0d856bf94cfa5e189c85462813751ab4059643759ed319a81e0617113758f8adf67bc2061cc60ffff7f2000000000"), 177 | hex!("000000200030d7f9c11ef35b89a0eefb9a5e449909339b5e7854d99804ea8d6a49bf900a0304d2e55fe0b6415949cff9bca0f88c0717884a5e5797509f89f856af93624a2061cc60ffff7f2002000000"), 178 | ]; 179 | let headers: Vec = byte_headers 180 | .iter() 181 | .map(|byte_header| deserialize(byte_header).unwrap()) 182 | .collect(); 183 | 184 | for chunk_size in 1..headers.len() { 185 | let mut regtest = Chain::new(Regtest); 186 | let mut height = 0; 187 | let mut tip = regtest.tip(); 188 | for chunk in headers.chunks(chunk_size) { 189 | let mut update = vec![]; 190 | for header in chunk { 191 | height += 1; 192 | tip = header.block_hash(); 193 | update.push(NewHeader::from((*header, height))) 194 | } 195 | regtest.update(update); 196 | assert_eq!(regtest.tip(), tip); 197 | assert_eq!(regtest.height(), height); 198 | } 199 | assert_eq!(regtest.tip(), headers.last().unwrap().block_hash()); 200 | assert_eq!(regtest.height(), headers.len()); 201 | } 202 | 203 | // test loading from a list of headers and tip 204 | let mut regtest = Chain::new(Regtest); 205 | regtest.load( 206 | headers.iter().copied(), 207 | headers.last().unwrap().block_hash(), 208 | ); 209 | assert_eq!(regtest.height(), headers.len()); 210 | 211 | // test getters 212 | for (header, height) in headers.iter().zip(1usize..) { 213 | assert_eq!(regtest.get_block_header(height), Some(header)); 214 | assert_eq!(regtest.get_block_hash(height), Some(header.block_hash())); 215 | assert_eq!(regtest.get_block_height(&header.block_hash()), Some(height)); 216 | } 217 | 218 | // test chain shortening 219 | for i in (0..=headers.len()).rev() { 220 | let hash = regtest.get_block_hash(i).unwrap(); 221 | assert_eq!(regtest.get_block_height(&hash), Some(i)); 222 | assert_eq!(regtest.height(), i); 223 | assert_eq!(regtest.tip(), hash); 224 | regtest.drop_last_headers(1); 225 | } 226 | assert_eq!(regtest.height(), 0); 227 | assert_eq!( 228 | regtest.tip(), 229 | "0f9188f13cb7b2c71f2a335e3a4fc328bf5beb436012afca590b1a11466e2206" 230 | .parse() 231 | .unwrap() 232 | ); 233 | 234 | regtest.drop_last_headers(1); 235 | assert_eq!(regtest.height(), 0); 236 | assert_eq!( 237 | regtest.tip(), 238 | "0f9188f13cb7b2c71f2a335e3a4fc328bf5beb436012afca590b1a11466e2206" 239 | .parse() 240 | .unwrap() 241 | ); 242 | 243 | // test reorg 244 | let mut regtest = Chain::new(Regtest); 245 | regtest.load( 246 | headers.iter().copied(), 247 | headers.last().unwrap().block_hash(), 248 | ); 249 | let height = regtest.height(); 250 | 251 | let new_header: BlockHeader = deserialize(&hex!("000000200030d7f9c11ef35b89a0eefb9a5e449909339b5e7854d99804ea8d6a49bf900a0304d2e55fe0b6415949cff9bca0f88c0717884a5e5797509f89f856af93624a7a6bcc60ffff7f2000000000")).unwrap(); 252 | regtest.update(vec![NewHeader::from((new_header, height))]); 253 | assert_eq!(regtest.height(), height); 254 | assert_eq!( 255 | regtest.tip(), 256 | "0e16637fe0700a7c52e9a6eaa58bd6ac7202652103be8f778680c66f51ad2e9b" 257 | .parse() 258 | .unwrap() 259 | ); 260 | } 261 | } 262 | -------------------------------------------------------------------------------- /src/config.rs: -------------------------------------------------------------------------------- 1 | use bitcoin::p2p::Magic; 2 | use bitcoin::Network; 3 | use bitcoincore_rpc::Auth; 4 | use dirs_next::home_dir; 5 | 6 | use std::ffi::{OsStr, OsString}; 7 | use std::fmt; 8 | use std::net::SocketAddr; 9 | use std::net::ToSocketAddrs; 10 | use std::path::PathBuf; 11 | use std::str::FromStr; 12 | 13 | use std::env::consts::{ARCH, OS}; 14 | use std::time::Duration; 15 | 16 | pub const ELECTRS_VERSION: &str = env!("CARGO_PKG_VERSION"); 17 | const DEFAULT_SERVER_ADDRESS: [u8; 4] = [127, 0, 0, 1]; // by default, serve on IPv4 localhost 18 | 19 | mod internal { 20 | include!(concat!(env!("OUT_DIR"), "/configure_me_config.rs")); 21 | } 22 | 23 | /// A simple error type representing invalid UTF-8 input. 24 | pub struct InvalidUtf8(OsString); 25 | 26 | impl fmt::Display for InvalidUtf8 { 27 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 28 | write!(f, "{:?} isn't a valid UTF-8 sequence", self.0) 29 | } 30 | } 31 | 32 | /// An error that might happen when resolving an address 33 | pub enum AddressError { 34 | ResolvError { addr: String, err: std::io::Error }, 35 | NoAddrError(String), 36 | } 37 | 38 | impl fmt::Display for AddressError { 39 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 40 | match self { 41 | AddressError::ResolvError { addr, err } => { 42 | write!(f, "Failed to resolve address {}: {}", addr, err) 43 | } 44 | AddressError::NoAddrError(addr) => write!(f, "No address found for {}", addr), 45 | } 46 | } 47 | } 48 | 49 | /// Newtype for an address that is parsed as `String` 50 | /// 51 | /// The main point of this newtype is to provide better description than what `String` type 52 | /// provides. 53 | #[derive(Deserialize)] 54 | pub struct ResolvAddr(String); 55 | 56 | impl ::configure_me::parse_arg::ParseArg for ResolvAddr { 57 | type Error = InvalidUtf8; 58 | 59 | fn parse_arg(arg: &OsStr) -> std::result::Result { 60 | Self::parse_owned_arg(arg.to_owned()) 61 | } 62 | 63 | fn parse_owned_arg(arg: OsString) -> std::result::Result { 64 | arg.into_string().map_err(InvalidUtf8).map(ResolvAddr) 65 | } 66 | 67 | fn describe_type(mut writer: W) -> fmt::Result { 68 | write!(writer, "a network address (will be resolved if needed)") 69 | } 70 | } 71 | 72 | impl ResolvAddr { 73 | /// Resolves the address. 74 | fn resolve(self) -> std::result::Result { 75 | match self.0.to_socket_addrs() { 76 | Ok(mut iter) => iter.next().ok_or(AddressError::NoAddrError(self.0)), 77 | Err(err) => Err(AddressError::ResolvError { addr: self.0, err }), 78 | } 79 | } 80 | 81 | /// Resolves the address, but prints error and exits in case of failure. 82 | fn resolve_or_exit(self) -> SocketAddr { 83 | self.resolve().unwrap_or_else(|err| { 84 | eprintln!("Error: {}", err); 85 | std::process::exit(1) 86 | }) 87 | } 88 | } 89 | 90 | /// This newtype implements `ParseArg` for `Network`. 91 | #[derive(Deserialize)] 92 | pub struct BitcoinNetwork(Network); 93 | 94 | impl Default for BitcoinNetwork { 95 | fn default() -> Self { 96 | BitcoinNetwork(Network::Bitcoin) 97 | } 98 | } 99 | 100 | impl FromStr for BitcoinNetwork { 101 | type Err = ::Err; 102 | 103 | fn from_str(string: &str) -> std::result::Result { 104 | Network::from_str(string).map(BitcoinNetwork) 105 | } 106 | } 107 | 108 | impl ::configure_me::parse_arg::ParseArgFromStr for BitcoinNetwork { 109 | fn describe_type(mut writer: W) -> fmt::Result { 110 | write!( 111 | writer, 112 | "either 'bitcoin', 'testnet', 'testnet4', 'regtest' or 'signet'" 113 | ) 114 | } 115 | } 116 | 117 | impl From for Network { 118 | fn from(network: BitcoinNetwork) -> Network { 119 | network.0 120 | } 121 | } 122 | 123 | /// Parsed and post-processed configuration 124 | #[derive(Debug)] 125 | pub struct Config { 126 | // See below for the documentation of each field: 127 | pub network: Network, 128 | pub db_path: PathBuf, 129 | pub db_log_dir: Option, 130 | pub db_parallelism: u8, 131 | pub daemon_auth: SensitiveAuth, 132 | pub daemon_rpc_addr: SocketAddr, 133 | pub daemon_p2p_addr: SocketAddr, 134 | pub electrum_rpc_addr: SocketAddr, 135 | pub monitoring_addr: SocketAddr, 136 | pub wait_duration: Duration, 137 | pub jsonrpc_timeout: Duration, 138 | pub index_batch_size: usize, 139 | pub index_lookup_limit: Option, 140 | pub reindex_last_blocks: usize, 141 | pub auto_reindex: bool, 142 | pub ignore_mempool: bool, 143 | pub sync_once: bool, 144 | pub skip_block_download_wait: bool, 145 | pub disable_electrum_rpc: bool, 146 | pub server_banner: String, 147 | pub signet_magic: Magic, 148 | } 149 | 150 | pub struct SensitiveAuth(pub Auth); 151 | 152 | impl SensitiveAuth { 153 | pub(crate) fn get_auth(&self) -> Auth { 154 | self.0.clone() 155 | } 156 | } 157 | 158 | impl fmt::Debug for SensitiveAuth { 159 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 160 | match self.0 { 161 | Auth::UserPass(ref user, _) => f 162 | .debug_tuple("UserPass") 163 | .field(&user) 164 | .field(&"") 165 | .finish(), 166 | _ => write!(f, "{:?}", self.0), 167 | } 168 | } 169 | } 170 | 171 | /// Returns default daemon directory 172 | fn default_daemon_dir() -> PathBuf { 173 | let mut home = home_dir().unwrap_or_else(|| { 174 | eprintln!("Error: unknown home directory"); 175 | std::process::exit(1) 176 | }); 177 | home.push(".bitcoin"); 178 | home 179 | } 180 | 181 | fn default_config_files() -> Vec { 182 | let mut files = vec![OsString::from("electrs.toml")]; // cwd 183 | if let Some(mut path) = home_dir() { 184 | path.extend([".electrs", "config.toml"]); 185 | files.push(OsString::from(path)) // home directory 186 | } 187 | files.push(OsString::from("/etc/electrs/config.toml")); // system-wide 188 | files 189 | } 190 | 191 | impl Config { 192 | /// Parses args, env vars, config files and post-processes them 193 | pub fn from_args() -> Config { 194 | use internal::prelude::ResultExt; 195 | 196 | let (mut config, _args) = 197 | internal::prelude::Config::including_optional_config_files(default_config_files()) 198 | .unwrap_or_exit(); 199 | 200 | fn unsupported_network(network: Network) -> ! { 201 | eprintln!("Error: unsupported network: {}", network); 202 | std::process::exit(1); 203 | } 204 | 205 | let db_subdir = match config.network { 206 | Network::Bitcoin => "bitcoin", 207 | Network::Testnet => "testnet", 208 | Network::Testnet4 => "testnet4", 209 | Network::Regtest => "regtest", 210 | Network::Signet => "signet", 211 | unsupported => unsupported_network(unsupported), 212 | }; 213 | 214 | config.db_dir.push(db_subdir); 215 | 216 | let default_daemon_rpc_port = match config.network { 217 | Network::Bitcoin => 8332, 218 | Network::Testnet => 18332, 219 | Network::Testnet4 => 48332, 220 | Network::Regtest => 18443, 221 | Network::Signet => 38332, 222 | unsupported => unsupported_network(unsupported), 223 | }; 224 | let default_daemon_p2p_port = match config.network { 225 | Network::Bitcoin => 8333, 226 | Network::Testnet => 18333, 227 | Network::Testnet4 => 48333, 228 | Network::Regtest => 18444, 229 | Network::Signet => 38333, 230 | unsupported => unsupported_network(unsupported), 231 | }; 232 | let default_electrum_port = match config.network { 233 | Network::Bitcoin => 50001, 234 | Network::Testnet => 60001, 235 | Network::Testnet4 => 40001, 236 | Network::Regtest => 60401, 237 | Network::Signet => 60601, 238 | unsupported => unsupported_network(unsupported), 239 | }; 240 | let default_monitoring_port = match config.network { 241 | Network::Bitcoin => 4224, 242 | Network::Testnet => 14224, 243 | Network::Testnet4 => 44224, 244 | Network::Regtest => 24224, 245 | Network::Signet => 34224, 246 | unsupported => unsupported_network(unsupported), 247 | }; 248 | 249 | let magic = match (config.network, config.signet_magic) { 250 | (Network::Signet, Some(magic)) => magic.parse().unwrap_or_else(|error| { 251 | eprintln!( 252 | "Error: signet magic '{}' is not a valid hex string: {}", 253 | magic, error 254 | ); 255 | std::process::exit(1); 256 | }), 257 | (network, None) => network.magic(), 258 | (_, Some(_)) => { 259 | eprintln!("Error: signet magic only available on signet"); 260 | std::process::exit(1); 261 | } 262 | }; 263 | 264 | let daemon_rpc_addr: SocketAddr = config.daemon_rpc_addr.map_or( 265 | (DEFAULT_SERVER_ADDRESS, default_daemon_rpc_port).into(), 266 | ResolvAddr::resolve_or_exit, 267 | ); 268 | let daemon_p2p_addr: SocketAddr = config.daemon_p2p_addr.map_or( 269 | (DEFAULT_SERVER_ADDRESS, default_daemon_p2p_port).into(), 270 | ResolvAddr::resolve_or_exit, 271 | ); 272 | let electrum_rpc_addr: SocketAddr = config.electrum_rpc_addr.map_or( 273 | (DEFAULT_SERVER_ADDRESS, default_electrum_port).into(), 274 | ResolvAddr::resolve_or_exit, 275 | ); 276 | #[cfg(not(feature = "metrics"))] 277 | { 278 | if config.monitoring_addr.is_some() { 279 | eprintln!("Error: enable \"metrics\" feature to specify monitoring_addr"); 280 | std::process::exit(1); 281 | } 282 | } 283 | let monitoring_addr: SocketAddr = config.monitoring_addr.map_or( 284 | (DEFAULT_SERVER_ADDRESS, default_monitoring_port).into(), 285 | ResolvAddr::resolve_or_exit, 286 | ); 287 | 288 | match config.network { 289 | Network::Bitcoin => (), 290 | Network::Testnet => config.daemon_dir.push("testnet3"), 291 | Network::Testnet4 => config.daemon_dir.push("testnet4"), 292 | Network::Regtest => config.daemon_dir.push("regtest"), 293 | Network::Signet => config.daemon_dir.push("signet"), 294 | unsupported => unsupported_network(unsupported), 295 | } 296 | 297 | let mut deprecated_options_used = false; 298 | 299 | if config.timestamp { 300 | eprintln!( 301 | "Error: `timestamp` is deprecated, timestamps on logs is (and was) always \ 302 | enabled, please remove this option." 303 | ); 304 | deprecated_options_used = true; 305 | } 306 | 307 | if config.verbose > 0 { 308 | eprintln!("Error: please use `log_filters` to set logging verbosity",); 309 | deprecated_options_used = true; 310 | } 311 | 312 | if deprecated_options_used { 313 | std::process::exit(1); 314 | } 315 | 316 | let daemon_dir = &config.daemon_dir; 317 | let daemon_auth = SensitiveAuth(match (config.auth, config.cookie_file) { 318 | (None, None) => Auth::CookieFile(daemon_dir.join(".cookie")), 319 | (None, Some(cookie_file)) => Auth::CookieFile(cookie_file), 320 | (Some(auth), None) => { 321 | let parts: Vec<&str> = auth.splitn(2, ':').collect(); 322 | if parts.len() != 2 { 323 | eprintln!("Error: auth cookie doesn't contain colon"); 324 | std::process::exit(1); 325 | } 326 | Auth::UserPass(parts[0].to_owned(), parts[1].to_owned()) 327 | } 328 | (Some(_), Some(_)) => { 329 | eprintln!("Error: ambiguous configuration - auth and cookie_file can't be specified at the same time"); 330 | std::process::exit(1); 331 | } 332 | }); 333 | 334 | let log_filters = config.log_filters; 335 | 336 | let index_lookup_limit = match config.index_lookup_limit { 337 | 0 => None, 338 | _ => Some(config.index_lookup_limit), 339 | }; 340 | 341 | if config.jsonrpc_timeout_secs <= config.wait_duration_secs { 342 | eprintln!( 343 | "Error: jsonrpc_timeout_secs ({}) must be higher than wait_duration_secs ({})", 344 | config.jsonrpc_timeout_secs, config.wait_duration_secs 345 | ); 346 | std::process::exit(1); 347 | } 348 | 349 | if config.version { 350 | println!("v{}", ELECTRS_VERSION); 351 | std::process::exit(0); 352 | } 353 | 354 | let config = Config { 355 | network: config.network, 356 | db_path: config.db_dir, 357 | db_log_dir: config.db_log_dir, 358 | db_parallelism: config.db_parallelism, 359 | daemon_auth, 360 | daemon_rpc_addr, 361 | daemon_p2p_addr, 362 | electrum_rpc_addr, 363 | monitoring_addr, 364 | wait_duration: Duration::from_secs(config.wait_duration_secs), 365 | jsonrpc_timeout: Duration::from_secs(config.jsonrpc_timeout_secs), 366 | index_batch_size: config.index_batch_size, 367 | index_lookup_limit, 368 | reindex_last_blocks: config.reindex_last_blocks, 369 | auto_reindex: config.auto_reindex, 370 | ignore_mempool: config.ignore_mempool, 371 | sync_once: config.sync_once, 372 | skip_block_download_wait: config.skip_block_download_wait, 373 | disable_electrum_rpc: config.disable_electrum_rpc, 374 | server_banner: config.server_banner, 375 | signet_magic: magic, 376 | }; 377 | eprintln!( 378 | "Starting electrs {} on {} {} with {:?}", 379 | ELECTRS_VERSION, ARCH, OS, config 380 | ); 381 | let mut builder = env_logger::Builder::from_default_env(); 382 | builder.default_format().format_timestamp_millis(); 383 | if let Some(log_filters) = &log_filters { 384 | builder.parse_filters(log_filters); 385 | } 386 | builder.init(); 387 | 388 | config 389 | } 390 | } 391 | 392 | #[cfg(test)] 393 | mod tests { 394 | use super::{Auth, SensitiveAuth}; 395 | use std::path::Path; 396 | 397 | #[test] 398 | fn test_auth_debug() { 399 | let auth = Auth::None; 400 | assert_eq!(format!("{:?}", SensitiveAuth(auth)), "None"); 401 | 402 | let auth = Auth::CookieFile(Path::new("/foo/bar/.cookie").to_path_buf()); 403 | assert_eq!( 404 | format!("{:?}", SensitiveAuth(auth)), 405 | "CookieFile(\"/foo/bar/.cookie\")" 406 | ); 407 | 408 | let auth = Auth::UserPass("user".to_owned(), "pass".to_owned()); 409 | assert_eq!( 410 | format!("{:?}", SensitiveAuth(auth)), 411 | "UserPass(\"user\", \"\")" 412 | ); 413 | } 414 | } 415 | -------------------------------------------------------------------------------- /src/daemon.rs: -------------------------------------------------------------------------------- 1 | use anyhow::{Context, Result}; 2 | 3 | use bitcoin::{consensus::deserialize, hashes::hex::FromHex}; 4 | use bitcoin::{Amount, BlockHash, Transaction, Txid}; 5 | use bitcoincore_rpc::{json, jsonrpc, Auth, Client, RpcApi}; 6 | use crossbeam_channel::Receiver; 7 | use parking_lot::Mutex; 8 | use serde::Serialize; 9 | use serde_json::{json, value::RawValue, Value}; 10 | 11 | use std::fs::File; 12 | use std::io::Read; 13 | use std::path::Path; 14 | 15 | use crate::{ 16 | chain::{Chain, NewHeader}, 17 | config::Config, 18 | metrics::Metrics, 19 | p2p::Connection, 20 | signals::ExitFlag, 21 | types::SerBlock, 22 | }; 23 | 24 | enum PollResult { 25 | Done(Result<()>), 26 | Retry, 27 | } 28 | 29 | fn rpc_poll(client: &mut Client, skip_block_download_wait: bool) -> PollResult { 30 | match client.get_blockchain_info() { 31 | Ok(info) => { 32 | if skip_block_download_wait { 33 | // bitcoind RPC is available, don't wait for block download to finish 34 | return PollResult::Done(Ok(())); 35 | } 36 | let left_blocks = info.headers - info.blocks; 37 | if info.initial_block_download || left_blocks > 0 { 38 | info!( 39 | "waiting for {} blocks to download{}", 40 | left_blocks, 41 | if info.initial_block_download { 42 | " (IBD)" 43 | } else { 44 | "" 45 | } 46 | ); 47 | return PollResult::Retry; 48 | } 49 | PollResult::Done(Ok(())) 50 | } 51 | Err(err) => { 52 | if let Some(e) = extract_bitcoind_error(&err) { 53 | if e.code == -28 { 54 | debug!("waiting for RPC warmup: {}", e.message); 55 | return PollResult::Retry; 56 | } 57 | } 58 | PollResult::Done(Err(err).context("daemon not available")) 59 | } 60 | } 61 | } 62 | 63 | fn read_cookie(path: &Path) -> Result<(String, String)> { 64 | // Load username and password from bitcoind cookie file: 65 | // * https://github.com/bitcoin/bitcoin/pull/6388/commits/71cbeaad9a929ba6a7b62d9b37a09b214ae00c1a 66 | // * https://bitcoin.stackexchange.com/questions/46782/rpc-cookie-authentication 67 | let mut file = File::open(path) 68 | .with_context(|| format!("failed to open bitcoind cookie file: {}", path.display()))?; 69 | let mut contents = String::new(); 70 | file.read_to_string(&mut contents) 71 | .with_context(|| format!("failed to read bitcoind cookie from {}", path.display()))?; 72 | 73 | let parts: Vec<&str> = contents.splitn(2, ':').collect(); 74 | ensure!( 75 | parts.len() == 2, 76 | "failed to parse bitcoind cookie - missing ':' separator" 77 | ); 78 | Ok((parts[0].to_owned(), parts[1].to_owned())) 79 | } 80 | 81 | fn rpc_connect(config: &Config) -> Result { 82 | let rpc_url = format!("http://{}", config.daemon_rpc_addr); 83 | // Allow `wait_for_new_block` to take a bit longer before timing out. 84 | // See https://github.com/romanz/electrs/issues/495 for more details. 85 | let builder = jsonrpc::simple_http::SimpleHttpTransport::builder() 86 | .url(&rpc_url)? 87 | .timeout(config.jsonrpc_timeout); 88 | let builder = match config.daemon_auth.get_auth() { 89 | Auth::None => builder, 90 | Auth::UserPass(user, pass) => builder.auth(user, Some(pass)), 91 | Auth::CookieFile(path) => { 92 | let (user, pass) = read_cookie(&path)?; 93 | builder.auth(user, Some(pass)) 94 | } 95 | }; 96 | Ok(Client::from_jsonrpc(jsonrpc::Client::with_transport( 97 | builder.build(), 98 | ))) 99 | } 100 | 101 | pub struct Daemon { 102 | p2p: Mutex, 103 | rpc: Client, 104 | } 105 | 106 | impl Daemon { 107 | pub(crate) fn connect( 108 | config: &Config, 109 | exit_flag: &ExitFlag, 110 | metrics: &Metrics, 111 | ) -> Result { 112 | let mut rpc = rpc_connect(config)?; 113 | 114 | loop { 115 | exit_flag 116 | .poll() 117 | .context("bitcoin RPC polling interrupted")?; 118 | match rpc_poll(&mut rpc, config.skip_block_download_wait) { 119 | PollResult::Done(result) => { 120 | result.context("bitcoind RPC polling failed")?; 121 | break; // on success, finish polling 122 | } 123 | PollResult::Retry => { 124 | std::thread::sleep(std::time::Duration::from_secs(1)); // wait a bit before polling 125 | } 126 | } 127 | } 128 | 129 | let network_info = rpc.get_network_info()?; 130 | if network_info.version < 21_00_00 { 131 | bail!("electrs requires bitcoind 0.21+"); 132 | } 133 | if !network_info.network_active { 134 | bail!("electrs requires active bitcoind p2p network"); 135 | } 136 | let info = rpc.get_blockchain_info()?; 137 | if info.pruned { 138 | bail!("electrs requires non-pruned bitcoind node"); 139 | } 140 | 141 | let p2p = Mutex::new(Connection::connect( 142 | config.network, 143 | config.daemon_p2p_addr, 144 | metrics, 145 | config.signet_magic, 146 | )?); 147 | Ok(Self { p2p, rpc }) 148 | } 149 | 150 | pub(crate) fn estimate_fee(&self, nblocks: u16) -> Result> { 151 | let res = self.rpc.estimate_smart_fee(nblocks, None); 152 | if let Err(bitcoincore_rpc::Error::JsonRpc(jsonrpc::Error::Rpc(RpcError { 153 | code: -32603, 154 | .. 155 | }))) = res 156 | { 157 | return Ok(None); // don't fail when fee estimation is disabled (e.g. with `-blocksonly=1`) 158 | } 159 | Ok(res.context("failed to estimate fee")?.fee_rate) 160 | } 161 | 162 | pub(crate) fn get_relay_fee(&self) -> Result { 163 | Ok(self 164 | .rpc 165 | .get_network_info() 166 | .context("failed to get relay fee")? 167 | .relay_fee) 168 | } 169 | 170 | pub(crate) fn broadcast(&self, tx: &Transaction) -> Result { 171 | self.rpc 172 | .send_raw_transaction(tx) 173 | .context("failed to broadcast transaction") 174 | } 175 | 176 | pub(crate) fn get_transaction_info( 177 | &self, 178 | txid: &Txid, 179 | blockhash: Option, 180 | ) -> Result { 181 | // No need to parse the resulting JSON, just return it as-is to the client. 182 | self.rpc 183 | .call( 184 | "getrawtransaction", 185 | &[json!(txid), json!(true), json!(blockhash)], 186 | ) 187 | .context("failed to get transaction info") 188 | } 189 | 190 | pub(crate) fn get_transaction_hex( 191 | &self, 192 | txid: &Txid, 193 | blockhash: Option, 194 | ) -> Result { 195 | use bitcoin::consensus::serde::{hex::Lower, Hex, With}; 196 | 197 | let tx = self.get_transaction(txid, blockhash)?; 198 | #[derive(serde::Serialize)] 199 | #[serde(transparent)] 200 | struct TxAsHex(#[serde(with = "With::>")] Transaction); 201 | serde_json::to_value(TxAsHex(tx)).map_err(Into::into) 202 | } 203 | 204 | pub(crate) fn get_transaction( 205 | &self, 206 | txid: &Txid, 207 | blockhash: Option, 208 | ) -> Result { 209 | self.rpc 210 | .get_raw_transaction(txid, blockhash.as_ref()) 211 | .context("failed to get transaction") 212 | } 213 | 214 | pub(crate) fn get_block_txids(&self, blockhash: BlockHash) -> Result> { 215 | Ok(self 216 | .rpc 217 | .get_block_info(&blockhash) 218 | .context("failed to get block txids")? 219 | .tx) 220 | } 221 | 222 | pub(crate) fn get_mempool_info(&self) -> Result { 223 | self.rpc 224 | .get_mempool_info() 225 | .context("failed to get mempool info") 226 | } 227 | 228 | pub(crate) fn get_mempool_txids(&self) -> Result> { 229 | self.rpc 230 | .get_raw_mempool() 231 | .context("failed to get mempool txids") 232 | } 233 | 234 | pub(crate) fn get_mempool_entries( 235 | &self, 236 | txids: &[Txid], 237 | ) -> Result>> { 238 | let results = batch_request(self.rpc.get_jsonrpc_client(), "getmempoolentry", txids)?; 239 | Ok(results 240 | .into_iter() 241 | .map(|r| match r?.result::() { 242 | Ok(entry) => Some(entry), 243 | Err(err) => { 244 | debug!("failed to get mempool entry: {}", err); // probably due to RBF 245 | None 246 | } 247 | }) 248 | .collect()) 249 | } 250 | 251 | pub(crate) fn get_mempool_transactions( 252 | &self, 253 | txids: &[Txid], 254 | ) -> Result>> { 255 | let results = batch_request(self.rpc.get_jsonrpc_client(), "getrawtransaction", txids)?; 256 | Ok(results 257 | .into_iter() 258 | .map(|r| -> Option { 259 | let tx_hex = match r?.result::() { 260 | Ok(tx_hex) => Some(tx_hex), 261 | Err(err) => { 262 | debug!("failed to get mempool tx: {}", err); // probably due to RBF 263 | None 264 | } 265 | }?; 266 | let tx_bytes = match Vec::from_hex(&tx_hex) { 267 | Ok(tx_bytes) => Some(tx_bytes), 268 | Err(err) => { 269 | warn!("got non-hex transaction {}: {}", tx_hex, err); 270 | None 271 | } 272 | }?; 273 | match deserialize(&tx_bytes) { 274 | Ok(tx) => Some(tx), 275 | Err(err) => { 276 | warn!("got invalid tx {}: {}", tx_hex, err); 277 | None 278 | } 279 | } 280 | }) 281 | .collect()) 282 | } 283 | 284 | pub(crate) fn get_new_headers(&self, chain: &Chain) -> Result> { 285 | self.p2p.lock().get_new_headers(chain) 286 | } 287 | 288 | pub(crate) fn for_blocks(&self, blockhashes: B, func: F) -> Result<()> 289 | where 290 | B: IntoIterator, 291 | F: FnMut(BlockHash, SerBlock), 292 | { 293 | self.p2p.lock().for_blocks(blockhashes, func) 294 | } 295 | 296 | pub(crate) fn new_block_notification(&self) -> Receiver<()> { 297 | self.p2p.lock().new_block_notification() 298 | } 299 | } 300 | 301 | pub(crate) type RpcError = bitcoincore_rpc::jsonrpc::error::RpcError; 302 | 303 | pub(crate) fn extract_bitcoind_error(err: &bitcoincore_rpc::Error) -> Option<&RpcError> { 304 | use bitcoincore_rpc::{ 305 | jsonrpc::error::Error::Rpc as ServerError, Error::JsonRpc as JsonRpcError, 306 | }; 307 | match err { 308 | JsonRpcError(ServerError(e)) => Some(e), 309 | _ => None, 310 | } 311 | } 312 | 313 | fn batch_request( 314 | client: &jsonrpc::Client, 315 | name: &str, 316 | items: &[T], 317 | ) -> Result>> 318 | where 319 | T: Serialize, 320 | { 321 | debug!("calling {} on {} items", name, items.len()); 322 | let args: Vec> = items 323 | .iter() 324 | .map(|item| jsonrpc::try_arg([item]).context("failed to serialize into JSON")) 325 | .collect::>>()?; 326 | let reqs: Vec = args 327 | .iter() 328 | .map(|arg| client.build_request(name, Some(arg))) 329 | .collect(); 330 | match client.send_batch(&reqs) { 331 | Ok(values) => { 332 | assert_eq!(items.len(), values.len()); 333 | Ok(values) 334 | } 335 | Err(err) => bail!("batch {} request failed: {}", name, err), 336 | } 337 | } 338 | -------------------------------------------------------------------------------- /src/index.rs: -------------------------------------------------------------------------------- 1 | use anyhow::{Context, Result}; 2 | use bitcoin::consensus::{deserialize, Decodable, Encodable}; 3 | use bitcoin::hashes::Hash; 4 | use bitcoin::{BlockHash, OutPoint, Txid}; 5 | use bitcoin_slices::{bsl, Visit, Visitor}; 6 | use std::ops::ControlFlow; 7 | use std::thread; 8 | 9 | use crate::{ 10 | chain::{Chain, NewHeader}, 11 | daemon::Daemon, 12 | db::{DBStore, WriteBatch}, 13 | metrics::{self, Gauge, Histogram, Metrics}, 14 | signals::ExitFlag, 15 | types::{ 16 | bsl_txid, HashPrefixRow, HeaderRow, ScriptHash, ScriptHashRow, SerBlock, SpendingPrefixRow, 17 | TxidRow, 18 | }, 19 | }; 20 | 21 | #[derive(Clone)] 22 | struct Stats { 23 | update_duration: Histogram, 24 | update_size: Histogram, 25 | height: Gauge, 26 | db_properties: Gauge, 27 | } 28 | 29 | impl Stats { 30 | fn new(metrics: &Metrics) -> Self { 31 | Self { 32 | update_duration: metrics.histogram_vec( 33 | "index_update_duration", 34 | "Index update duration (in seconds)", 35 | "step", 36 | metrics::default_duration_buckets(), 37 | ), 38 | update_size: metrics.histogram_vec( 39 | "index_update_size", 40 | "Index update size (in bytes)", 41 | "step", 42 | metrics::default_size_buckets(), 43 | ), 44 | height: metrics.gauge("index_height", "Indexed block height", "type"), 45 | db_properties: metrics.gauge("index_db_properties", "Index DB properties", "name"), 46 | } 47 | } 48 | 49 | fn observe_duration(&self, label: &str, f: impl FnOnce() -> T) -> T { 50 | self.update_duration.observe_duration(label, f) 51 | } 52 | 53 | fn observe_size(&self, label: &str, rows: &[[u8; N]]) { 54 | self.update_size.observe(label, (rows.len() * N) as f64); 55 | } 56 | 57 | fn observe_batch(&self, batch: &WriteBatch) { 58 | self.observe_size("write_funding_rows", &batch.funding_rows); 59 | self.observe_size("write_spending_rows", &batch.spending_rows); 60 | self.observe_size("write_txid_rows", &batch.txid_rows); 61 | self.observe_size("write_header_rows", &batch.header_rows); 62 | debug!( 63 | "writing {} funding and {} spending rows from {} transactions, {} blocks", 64 | batch.funding_rows.len(), 65 | batch.spending_rows.len(), 66 | batch.txid_rows.len(), 67 | batch.header_rows.len() 68 | ); 69 | } 70 | 71 | fn observe_chain(&self, chain: &Chain) { 72 | self.height.set("tip", chain.height() as f64); 73 | } 74 | 75 | fn observe_db(&self, store: &DBStore) { 76 | for (cf, name, value) in store.get_properties() { 77 | self.db_properties 78 | .set(&format!("{}:{}", name, cf), value as f64); 79 | } 80 | } 81 | } 82 | 83 | /// Confirmed transactions' address index 84 | pub struct Index { 85 | store: DBStore, 86 | batch_size: usize, 87 | lookup_limit: Option, 88 | chain: Chain, 89 | stats: Stats, 90 | is_ready: bool, 91 | flush_needed: bool, 92 | } 93 | 94 | impl Index { 95 | pub(crate) fn load( 96 | store: DBStore, 97 | mut chain: Chain, 98 | metrics: &Metrics, 99 | batch_size: usize, 100 | lookup_limit: Option, 101 | reindex_last_blocks: usize, 102 | ) -> Result { 103 | if let Some(row) = store.get_tip() { 104 | let tip = deserialize(&row).expect("invalid tip"); 105 | let headers = store 106 | .iter_headers() 107 | .map(|row| HeaderRow::from_db_row(row).header); 108 | chain.load(headers, tip); 109 | chain.drop_last_headers(reindex_last_blocks); 110 | }; 111 | let stats = Stats::new(metrics); 112 | stats.observe_chain(&chain); 113 | stats.observe_db(&store); 114 | Ok(Index { 115 | store, 116 | batch_size, 117 | lookup_limit, 118 | chain, 119 | stats, 120 | is_ready: false, 121 | flush_needed: false, 122 | }) 123 | } 124 | 125 | pub(crate) fn chain(&self) -> &Chain { 126 | &self.chain 127 | } 128 | 129 | pub(crate) fn limit_result(&self, entries: impl Iterator) -> Result> { 130 | let mut entries = entries.fuse(); 131 | let result: Vec = match self.lookup_limit { 132 | Some(lookup_limit) => entries.by_ref().take(lookup_limit).collect(), 133 | None => entries.by_ref().collect(), 134 | }; 135 | if entries.next().is_some() { 136 | bail!(">{} index entries, query may take too long", result.len()) 137 | } 138 | Ok(result) 139 | } 140 | 141 | pub(crate) fn filter_by_txid(&self, txid: Txid) -> impl Iterator + '_ { 142 | self.store 143 | .iter_txid(TxidRow::scan_prefix(txid)) 144 | .map(|row| HashPrefixRow::from_db_row(row).height()) 145 | .filter_map(move |height| self.chain.get_block_hash(height)) 146 | } 147 | 148 | pub(crate) fn filter_by_funding( 149 | &self, 150 | scripthash: ScriptHash, 151 | ) -> impl Iterator + '_ { 152 | self.store 153 | .iter_funding(ScriptHashRow::scan_prefix(scripthash)) 154 | .map(|row| HashPrefixRow::from_db_row(row).height()) 155 | .filter_map(move |height| self.chain.get_block_hash(height)) 156 | } 157 | 158 | pub(crate) fn filter_by_spending( 159 | &self, 160 | outpoint: OutPoint, 161 | ) -> impl Iterator + '_ { 162 | self.store 163 | .iter_spending(SpendingPrefixRow::scan_prefix(outpoint)) 164 | .map(|row| HashPrefixRow::from_db_row(row).height()) 165 | .filter_map(move |height| self.chain.get_block_hash(height)) 166 | } 167 | 168 | // Return `Ok(true)` when the chain is fully synced and the index is compacted. 169 | pub(crate) fn sync(&mut self, daemon: &Daemon, exit_flag: &ExitFlag) -> Result { 170 | let new_headers = self 171 | .stats 172 | .observe_duration("headers", || daemon.get_new_headers(&self.chain))?; 173 | match (new_headers.first(), new_headers.last()) { 174 | (Some(first), Some(last)) => { 175 | let count = new_headers.len(); 176 | info!( 177 | "indexing {} blocks: [{}..{}]", 178 | count, 179 | first.height(), 180 | last.height() 181 | ); 182 | } 183 | _ => { 184 | if self.flush_needed { 185 | self.store.flush(); // full compaction is performed on the first flush call 186 | self.flush_needed = false; 187 | } 188 | self.is_ready = true; 189 | return Ok(true); // no more blocks to index (done for now) 190 | } 191 | } 192 | 193 | thread::scope(|scope| -> Result<()> { 194 | let (tx, rx) = crossbeam_channel::bounded(1); 195 | 196 | let chunks = new_headers.chunks(self.batch_size); 197 | let index = &self; // to be moved into reader thread 198 | let reader = thread::Builder::new() 199 | .name("index_build".into()) 200 | .spawn_scoped(scope, move || -> Result<()> { 201 | for chunk in chunks { 202 | exit_flag.poll().with_context(|| { 203 | format!( 204 | "indexing interrupted at height: {}", 205 | chunk.first().unwrap().height() 206 | ) 207 | })?; 208 | let batch = index.index_blocks(daemon, chunk)?; 209 | tx.send(batch).context("writer disconnected")?; 210 | } 211 | Ok(()) // `tx` is dropped, to stop the iteration on `rx` 212 | }) 213 | .expect("spawn failed"); 214 | 215 | let index = &self; // to be moved into writer thread 216 | let writer = thread::Builder::new() 217 | .name("index_write".into()) 218 | .spawn_scoped(scope, move || { 219 | let stats = &index.stats; 220 | for mut batch in rx { 221 | stats.observe_duration("sort", || batch.sort()); // pre-sort to optimize DB writes 222 | stats.observe_batch(&batch); 223 | stats.observe_duration("write", || index.store.write(&batch)); 224 | stats.observe_db(&index.store); 225 | } 226 | }) 227 | .expect("spawn failed"); 228 | 229 | reader.join().expect("reader thread panic")?; 230 | writer.join().expect("writer thread panic"); 231 | Ok(()) 232 | })?; 233 | self.chain.update(new_headers); 234 | self.stats.observe_chain(&self.chain); 235 | self.flush_needed = true; 236 | Ok(false) // sync is not done 237 | } 238 | 239 | fn index_blocks(&self, daemon: &Daemon, chunk: &[NewHeader]) -> Result { 240 | let blockhashes: Vec = chunk.iter().map(|h| h.hash()).collect(); 241 | let mut heights = chunk.iter().map(|h| h.height()); 242 | 243 | let mut batch = WriteBatch::default(); 244 | 245 | daemon.for_blocks(blockhashes, |blockhash, block| { 246 | let height = heights.next().expect("unexpected block"); 247 | self.stats.observe_duration("block", || { 248 | index_single_block(blockhash, block, height, &mut batch); 249 | }); 250 | self.stats.height.set("tip", height as f64); 251 | })?; 252 | let heights: Vec<_> = heights.collect(); 253 | assert!( 254 | heights.is_empty(), 255 | "some blocks were not indexed: {:?}", 256 | heights 257 | ); 258 | Ok(batch) 259 | } 260 | 261 | pub(crate) fn is_ready(&self) -> bool { 262 | self.is_ready 263 | } 264 | } 265 | 266 | fn index_single_block( 267 | block_hash: BlockHash, 268 | block: SerBlock, 269 | height: usize, 270 | batch: &mut WriteBatch, 271 | ) { 272 | struct IndexBlockVisitor<'a> { 273 | batch: &'a mut WriteBatch, 274 | height: usize, 275 | } 276 | 277 | impl Visitor for IndexBlockVisitor<'_> { 278 | fn visit_transaction(&mut self, tx: &bsl::Transaction) -> ControlFlow<()> { 279 | let txid = bsl_txid(tx); 280 | self.batch 281 | .txid_rows 282 | .push(TxidRow::row(txid, self.height).to_db_row()); 283 | ControlFlow::Continue(()) 284 | } 285 | 286 | fn visit_tx_out(&mut self, _vout: usize, tx_out: &bsl::TxOut) -> ControlFlow<()> { 287 | let script = bitcoin::Script::from_bytes(tx_out.script_pubkey()); 288 | // skip indexing unspendable outputs 289 | if !script.is_op_return() { 290 | let row = ScriptHashRow::row(ScriptHash::new(script), self.height); 291 | self.batch.funding_rows.push(row.to_db_row()); 292 | } 293 | ControlFlow::Continue(()) 294 | } 295 | 296 | fn visit_tx_in(&mut self, _vin: usize, tx_in: &bsl::TxIn) -> ControlFlow<()> { 297 | let prevout: OutPoint = tx_in.prevout().into(); 298 | // skip indexing coinbase transactions' input 299 | if !prevout.is_null() { 300 | let row = SpendingPrefixRow::row(prevout, self.height); 301 | self.batch.spending_rows.push(row.to_db_row()); 302 | } 303 | ControlFlow::Continue(()) 304 | } 305 | 306 | fn visit_block_header(&mut self, header: &bsl::BlockHeader) -> ControlFlow<()> { 307 | let header = bitcoin::block::Header::consensus_decode(&mut header.as_ref()) 308 | .expect("block header was already validated"); 309 | self.batch 310 | .header_rows 311 | .push(HeaderRow::new(header).to_db_row()); 312 | ControlFlow::Continue(()) 313 | } 314 | } 315 | 316 | let mut index_block = IndexBlockVisitor { batch, height }; 317 | bsl::Block::visit(&block, &mut index_block).expect("core returned invalid block"); 318 | 319 | let len = block_hash 320 | .consensus_encode(&mut (&mut batch.tip_row as &mut [u8])) 321 | .expect("in-memory writers don't error"); 322 | debug_assert_eq!(len, BlockHash::LEN); 323 | } 324 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | #[macro_use] 2 | extern crate anyhow; 3 | 4 | #[macro_use] 5 | extern crate log; 6 | 7 | #[macro_use] 8 | extern crate serde_derive; 9 | 10 | mod cache; 11 | mod chain; 12 | mod config; 13 | mod daemon; 14 | mod db; 15 | mod electrum; 16 | mod index; 17 | mod mempool; 18 | mod merkle; 19 | mod metrics; 20 | mod p2p; 21 | mod server; 22 | mod signals; 23 | mod status; 24 | mod thread; 25 | mod tracker; 26 | mod types; 27 | 28 | pub use server::run; 29 | -------------------------------------------------------------------------------- /src/mempool.rs: -------------------------------------------------------------------------------- 1 | use anyhow::{Context, Result}; 2 | 3 | use std::collections::{BTreeSet, HashMap, HashSet}; 4 | use std::convert::TryFrom; 5 | use std::iter::FromIterator; 6 | use std::ops::Bound; 7 | 8 | use bitcoin::hashes::Hash; 9 | use bitcoin::{Amount, OutPoint, Transaction, Txid}; 10 | use serde::ser::{Serialize, SerializeSeq, Serializer}; 11 | 12 | use crate::{ 13 | daemon::Daemon, 14 | metrics::{Gauge, Metrics}, 15 | signals::ExitFlag, 16 | types::ScriptHash, 17 | }; 18 | 19 | pub(crate) struct Entry { 20 | pub txid: Txid, 21 | pub tx: Transaction, 22 | pub fee: Amount, 23 | pub vsize: u64, 24 | pub has_unconfirmed_inputs: bool, 25 | } 26 | 27 | /// Mempool current state 28 | pub(crate) struct Mempool { 29 | entries: HashMap, 30 | by_funding: BTreeSet<(ScriptHash, Txid)>, 31 | by_spending: BTreeSet<(OutPoint, Txid)>, 32 | fees: FeeHistogram, 33 | // stats 34 | vsize: Gauge, 35 | count: Gauge, 36 | } 37 | 38 | /// An update to [`Mempool`]'s internal state. This can be fetched 39 | /// asynchronously using [`MempoolSyncUpdate::poll`], and applied 40 | /// using [`Mempool::apply_sync_update`]. 41 | pub(crate) struct MempoolSyncUpdate { 42 | new_entries: Vec, 43 | removed_entries: HashSet, 44 | } 45 | 46 | impl MempoolSyncUpdate { 47 | /// Poll the bitcoin node and compute a [`MempoolSyncUpdate`] based on the given set of 48 | /// `old_txids` which are already cached. 49 | pub fn poll( 50 | daemon: &Daemon, 51 | old_txids: HashSet, 52 | exit_flag: &ExitFlag, 53 | ) -> Result { 54 | let txids = daemon.get_mempool_txids()?; 55 | debug!("loading {} mempool transactions", txids.len()); 56 | 57 | let new_txids = HashSet::::from_iter(txids); 58 | 59 | let to_add = &new_txids - &old_txids; 60 | let to_remove = &old_txids - &new_txids; 61 | 62 | let to_add: Vec = to_add.into_iter().collect(); 63 | let mut new_entries = Vec::with_capacity(to_add.len()); 64 | 65 | for txids_chunk in to_add.chunks(1000) { 66 | exit_flag.poll().context("mempool update interrupted")?; 67 | let entries = daemon.get_mempool_entries(txids_chunk)?; 68 | ensure!( 69 | txids_chunk.len() == entries.len(), 70 | "got {} mempools entries, expected {}", 71 | entries.len(), 72 | txids_chunk.len() 73 | ); 74 | let txs = daemon.get_mempool_transactions(txids_chunk)?; 75 | ensure!( 76 | txids_chunk.len() == txs.len(), 77 | "got {} mempools transactions, expected {}", 78 | txs.len(), 79 | txids_chunk.len() 80 | ); 81 | let chunk_entries: Vec = txids_chunk 82 | .iter() 83 | .zip(entries.into_iter().zip(txs.into_iter())) 84 | .filter_map(|(txid, (entry, tx))| { 85 | let entry = match entry { 86 | Some(entry) => entry, 87 | None => { 88 | debug!("missing mempool entry: {}", txid); 89 | return None; 90 | } 91 | }; 92 | let tx = match tx { 93 | Some(tx) => tx, 94 | None => { 95 | debug!("missing mempool tx: {}", txid); 96 | return None; 97 | } 98 | }; 99 | Some(Entry { 100 | txid: *txid, 101 | tx, 102 | vsize: entry.vsize, 103 | fee: entry.fees.base, 104 | has_unconfirmed_inputs: !entry.depends.is_empty(), 105 | }) 106 | }) 107 | .collect(); 108 | 109 | new_entries.extend(chunk_entries); 110 | } 111 | 112 | let update = MempoolSyncUpdate { 113 | new_entries, 114 | removed_entries: to_remove, 115 | }; 116 | Ok(update) 117 | } 118 | } 119 | 120 | // Smallest possible txid 121 | fn txid_min() -> Txid { 122 | Txid::all_zeros() 123 | } 124 | 125 | // Largest possible txid 126 | fn txid_max() -> Txid { 127 | Txid::from_byte_array([0xFF; 32]) 128 | } 129 | 130 | impl Mempool { 131 | pub fn new(metrics: &Metrics) -> Self { 132 | Self { 133 | entries: Default::default(), 134 | by_funding: Default::default(), 135 | by_spending: Default::default(), 136 | fees: FeeHistogram::default(), 137 | vsize: metrics.gauge( 138 | "mempool_txs_vsize", 139 | "Total vsize of mempool transactions (in bytes)", 140 | "fee_rate", 141 | ), 142 | count: metrics.gauge( 143 | "mempool_txs_count", 144 | "Total number of mempool transactions", 145 | "fee_rate", 146 | ), 147 | } 148 | } 149 | 150 | pub(crate) fn fees_histogram(&self) -> &FeeHistogram { 151 | &self.fees 152 | } 153 | 154 | pub(crate) fn get(&self, txid: &Txid) -> Option<&Entry> { 155 | self.entries.get(txid) 156 | } 157 | 158 | pub(crate) fn filter_by_funding(&self, scripthash: &ScriptHash) -> Vec<&Entry> { 159 | let range = ( 160 | Bound::Included((*scripthash, txid_min())), 161 | Bound::Included((*scripthash, txid_max())), 162 | ); 163 | self.by_funding 164 | .range(range) 165 | .map(|(_, txid)| self.get(txid).expect("missing funding mempool tx")) 166 | .collect() 167 | } 168 | 169 | pub(crate) fn filter_by_spending(&self, outpoint: &OutPoint) -> Vec<&Entry> { 170 | let range = ( 171 | Bound::Included((*outpoint, txid_min())), 172 | Bound::Included((*outpoint, txid_max())), 173 | ); 174 | self.by_spending 175 | .range(range) 176 | .map(|(_, txid)| self.get(txid).expect("missing spending mempool tx")) 177 | .collect() 178 | } 179 | 180 | /// Apply a [`MempoolSyncUpdate`] to the mempool state. 181 | pub fn apply_sync_update(&mut self, update: MempoolSyncUpdate) { 182 | let removed = update.removed_entries.len(); 183 | let added = update.new_entries.len(); 184 | 185 | for txid_to_remove in update.removed_entries { 186 | self.remove_entry(txid_to_remove); 187 | } 188 | 189 | for entry in update.new_entries { 190 | self.add_entry(entry); 191 | } 192 | 193 | self.update_metrics(); 194 | 195 | debug!( 196 | "{} mempool txs: {} added, {} removed", 197 | self.entries.len(), 198 | added, 199 | removed, 200 | ); 201 | } 202 | 203 | fn update_metrics(&mut self) { 204 | for i in 0..FeeHistogram::BINS { 205 | let bin_index = FeeHistogram::BINS - i - 1; // from 63 to 0 206 | let (lower, upper) = FeeHistogram::bin_range(bin_index); 207 | let label = format!("[{:20.0}, {:20.0})", lower, upper); 208 | self.vsize.set(&label, self.fees.vsize[bin_index] as f64); 209 | self.count.set(&label, self.fees.count[bin_index] as f64); 210 | } 211 | } 212 | 213 | pub fn sync(&mut self, daemon: &Daemon, exit_flag: &ExitFlag) { 214 | let loaded = match daemon.get_mempool_info() { 215 | Ok(info) => info.loaded.unwrap_or(true), 216 | Err(e) => { 217 | warn!("mempool sync failed: {}", e); 218 | return; 219 | } 220 | }; 221 | if !loaded { 222 | warn!("mempool not loaded"); 223 | return; 224 | } 225 | 226 | let old_txids = HashSet::::from_iter(self.entries.keys().copied()); 227 | 228 | let poll_result = MempoolSyncUpdate::poll(daemon, old_txids, exit_flag); 229 | 230 | let sync_update = match poll_result { 231 | Ok(sync_update) => sync_update, 232 | Err(e) => { 233 | warn!("mempool sync failed: {}", e); 234 | return; 235 | } 236 | }; 237 | 238 | self.apply_sync_update(sync_update); 239 | } 240 | 241 | /// Add a transaction entry to the mempool and update the fee histogram. 242 | fn add_entry(&mut self, entry: Entry) { 243 | for txi in &entry.tx.input { 244 | self.by_spending.insert((txi.previous_output, entry.txid)); 245 | } 246 | for txo in &entry.tx.output { 247 | let scripthash = ScriptHash::new(&txo.script_pubkey); 248 | self.by_funding.insert((scripthash, entry.txid)); // may have duplicates 249 | } 250 | 251 | self.modify_fee_histogram(entry.fee, entry.vsize as i64); 252 | 253 | assert!( 254 | self.entries.insert(entry.txid, entry).is_none(), 255 | "duplicate mempool txid" 256 | ); 257 | } 258 | 259 | /// Remove a transaction entry from the mempool and update the fee histogram. 260 | fn remove_entry(&mut self, txid: Txid) { 261 | let entry = self.entries.remove(&txid).expect("missing tx from mempool"); 262 | for txi in entry.tx.input { 263 | self.by_spending.remove(&(txi.previous_output, txid)); 264 | } 265 | for txo in entry.tx.output { 266 | let scripthash = ScriptHash::new(&txo.script_pubkey); 267 | self.by_funding.remove(&(scripthash, txid)); // may have misses 268 | } 269 | 270 | self.modify_fee_histogram(entry.fee, -(entry.vsize as i64)); 271 | } 272 | 273 | /// Apply a change to the fee histogram. Used when transactions are added or 274 | /// removed from the mempool. If `vsize_change` is positive, we increase 275 | /// the histogram vsize and TX count in the appropriate bin. If negative, 276 | /// we decrease them. 277 | fn modify_fee_histogram(&mut self, fee: Amount, vsize_change: i64) { 278 | let vsize = vsize_change.unsigned_abs(); 279 | let bin_index = FeeHistogram::bin_index(fee, vsize); 280 | if vsize_change >= 0 { 281 | self.fees.insert(bin_index, vsize); 282 | } else { 283 | self.fees.remove(bin_index, vsize); 284 | } 285 | } 286 | } 287 | 288 | pub(crate) struct FeeHistogram { 289 | /// bins[64-i] contains transactions' statistics inside the fee band of [2**(i-1), 2**i). 290 | /// bins[64] = [0, 1) 291 | /// bins[63] = [1, 2) 292 | /// bins[62] = [2, 4) 293 | /// bins[61] = [4, 8) 294 | /// bins[60] = [8, 16) 295 | /// ... 296 | /// bins[1] = [2**62, 2**63) 297 | /// bins[0] = [2**63, 2**64) 298 | vsize: [u64; FeeHistogram::BINS], 299 | count: [u64; FeeHistogram::BINS], 300 | } 301 | 302 | impl Default for FeeHistogram { 303 | fn default() -> Self { 304 | Self { 305 | vsize: [0; FeeHistogram::BINS], 306 | count: [0; FeeHistogram::BINS], 307 | } 308 | } 309 | } 310 | 311 | impl FeeHistogram { 312 | const BINS: usize = 65; // 0..=64 313 | 314 | fn bin_index(fee: Amount, vsize: u64) -> usize { 315 | let fee_rate = fee.to_sat() / vsize; 316 | usize::try_from(fee_rate.leading_zeros()).unwrap() 317 | } 318 | 319 | fn bin_range(bin_index: usize) -> (u128, u128) { 320 | let limit = 1u128 << (FeeHistogram::BINS - bin_index - 1); 321 | (limit / 2, limit) 322 | } 323 | 324 | fn insert(&mut self, bin_index: usize, vsize: u64) { 325 | // skip transactions with too low fee rate (<1 sat/vB) 326 | if let Some(bin) = self.vsize.get_mut(bin_index) { 327 | *bin += vsize 328 | } 329 | if let Some(bin) = self.count.get_mut(bin_index) { 330 | *bin += 1 331 | } 332 | } 333 | 334 | fn remove(&mut self, bin_index: usize, vsize: u64) { 335 | // skip transactions with too low fee rate (<1 sat/vB) 336 | if let Some(bin) = self.vsize.get_mut(bin_index) { 337 | *bin = bin.checked_sub(vsize).unwrap_or_else(|| { 338 | warn!("removing TX from mempool caused bin count to unexpectedly drop below zero"); 339 | 0 340 | }); 341 | } 342 | if let Some(bin) = self.count.get_mut(bin_index) { 343 | *bin = bin.checked_sub(1).unwrap_or_else(|| { 344 | warn!("removing TX from mempool caused bin vsize to unexpectedly drop below zero"); 345 | 0 346 | }); 347 | } 348 | } 349 | } 350 | 351 | impl Serialize for FeeHistogram { 352 | fn serialize(&self, serializer: S) -> Result 353 | where 354 | S: Serializer, 355 | { 356 | let mut seq = serializer.serialize_seq(Some(self.vsize.len()))?; 357 | // https://electrum-protocol.readthedocs.io/en/latest/protocol-methods.html#mempool-get-fee-histogram 358 | let fee_rates = 359 | (0..FeeHistogram::BINS).map(|i| u64::MAX.checked_shr(i as u32).unwrap_or(0)); 360 | fee_rates 361 | .zip(self.vsize.iter().copied()) 362 | .skip_while(|(_fee_rate, vsize)| *vsize == 0) 363 | .try_for_each(|element| seq.serialize_element(&element))?; 364 | seq.end() 365 | } 366 | } 367 | 368 | #[cfg(test)] 369 | mod tests { 370 | use super::FeeHistogram; 371 | use bitcoin::Amount; 372 | use serde_json::json; 373 | 374 | #[test] 375 | fn test_histogram() { 376 | let items = vec![ 377 | (Amount::from_sat(20), 10), 378 | (Amount::from_sat(10), 10), 379 | (Amount::from_sat(60), 10), 380 | (Amount::from_sat(30), 10), 381 | (Amount::from_sat(70), 10), 382 | (Amount::from_sat(50), 10), 383 | (Amount::from_sat(40), 10), 384 | (Amount::from_sat(80), 10), 385 | (Amount::from_sat(1), 100), 386 | ]; 387 | let mut hist = FeeHistogram::default(); 388 | for (amount, vsize) in items { 389 | let bin_index = FeeHistogram::bin_index(amount, vsize); 390 | hist.insert(bin_index, vsize); 391 | } 392 | assert_eq!( 393 | json!(hist), 394 | json!([[15, 10], [7, 40], [3, 20], [1, 10], [0, 100]]) 395 | ); 396 | 397 | { 398 | let bin_index = FeeHistogram::bin_index(Amount::from_sat(5), 1); // 5 sat/byte 399 | hist.remove(bin_index, 11); 400 | assert_eq!( 401 | json!(hist), 402 | json!([[15, 10], [7, 29], [3, 20], [1, 10], [0, 100]]) 403 | ); 404 | } 405 | 406 | { 407 | let bin_index = FeeHistogram::bin_index(Amount::from_sat(13), 1); // 13 sat/byte 408 | hist.insert(bin_index, 80); 409 | assert_eq!( 410 | json!(hist), 411 | json!([[15, 90], [7, 29], [3, 20], [1, 10], [0, 100]]) 412 | ); 413 | } 414 | 415 | { 416 | let bin_index = FeeHistogram::bin_index(Amount::from_sat(99), 1); // 99 sat/byte 417 | hist.insert(bin_index, 15); 418 | assert_eq!( 419 | json!(hist), 420 | json!([ 421 | [127, 15], 422 | [63, 0], 423 | [31, 0], 424 | [15, 90], 425 | [7, 29], 426 | [3, 20], 427 | [1, 10], 428 | [0, 100] 429 | ]) 430 | ); 431 | } 432 | } 433 | } 434 | -------------------------------------------------------------------------------- /src/merkle.rs: -------------------------------------------------------------------------------- 1 | use bitcoin::{hash_types::TxMerkleNode, hashes::Hash, Txid}; 2 | 3 | pub(crate) struct Proof { 4 | proof: Vec, 5 | position: usize, 6 | } 7 | 8 | impl Proof { 9 | pub(crate) fn create(txids: &[Txid], position: usize) -> Self { 10 | assert!(position < txids.len()); 11 | let mut offset = position; 12 | let mut hashes: Vec = txids 13 | .iter() 14 | .map(|txid| TxMerkleNode::from_raw_hash(txid.to_raw_hash())) 15 | .collect(); 16 | 17 | let mut proof = vec![]; 18 | while hashes.len() > 1 { 19 | if hashes.len() % 2 != 0 { 20 | let last = *hashes.last().unwrap(); 21 | hashes.push(last); 22 | } 23 | offset = if offset % 2 == 0 { 24 | offset + 1 25 | } else { 26 | offset - 1 27 | }; 28 | proof.push(hashes[offset]); 29 | offset /= 2; 30 | hashes = hashes 31 | .chunks(2) 32 | .map(|pair| { 33 | let left = pair[0]; 34 | let right = pair[1]; 35 | let input = [&left[..], &right[..]].concat(); 36 | TxMerkleNode::hash(&input) 37 | }) 38 | .collect() 39 | } 40 | Self { proof, position } 41 | } 42 | 43 | pub(crate) fn to_hex(&self) -> Vec { 44 | self.proof 45 | .iter() 46 | .map(|node| format!("{:x}", node)) 47 | .collect() 48 | } 49 | 50 | pub(crate) fn position(&self) -> usize { 51 | self.position 52 | } 53 | } 54 | 55 | #[cfg(test)] 56 | mod tests { 57 | use bitcoin::{consensus::encode::deserialize, Block, Txid}; 58 | use std::path::Path; 59 | 60 | use super::Proof; 61 | 62 | #[test] 63 | fn test_merkle() { 64 | let proof = Proof::create( 65 | &load_block_txids("00000000000000001203c1ea455e38612bdf36e9967fdead11935c8e22283ecc"), 66 | 157, 67 | ); 68 | assert_eq!( 69 | proof.to_hex(), 70 | vec![ 71 | "5d8cfb001d9ec17861ad9c158244239cb6e3298a619b2a5f7b176ddd54459c75", 72 | "06811172e13312f2e496259d2c8a7262f1192be5223fcf4d6a9ed7f58a2175ba", 73 | "cbcec841dea3294706809d1510c72b4424d141fac89106af65b70399b1d79f3f", 74 | "a24d6c3601a54d40f4350e6c8887bf82a873fe8619f95c772b573ec0373119d3", 75 | "2015c1bb133ee2c972e55fdcd205a9aee7b0122fd74c2f5d5d27b24a562c7790", 76 | "f379496fef2e603c4e1c03e2179ebaf5153d6463b8d61aa16d41db3321a18165", 77 | "7a798d6529663fd472d26cc90c434b64f78955747ac2f93c8dcd35b8f684946e", 78 | "ad3811062b8db664f2342cbff1b491865310b74416dd7b901f14d980886821f8" 79 | ] 80 | ); 81 | 82 | let proof = Proof::create( 83 | &load_block_txids("000000000000000002d249a3d89f63ef3fee203adcca7c24008c13fd854513f2"), 84 | 6, 85 | ); 86 | assert_eq!( 87 | proof.to_hex(), 88 | vec![ 89 | "d29769df672657689fd6d293b416ee9211c77fbe243ab7820813f327b0e8dd47", 90 | "d71f0947b47cab0f64948acfe52d41c293f492fe9627690c330d4004f2852ce4", 91 | "5f36c4330c727d7c8d98cc906cb286f13a61b5b4cab2124c5d041897834b42d8", 92 | "e77d181f83355ed38d0e6305fdb87c9637373fd90d1dfb911262ac55d260181e", 93 | "a8f83ca44dc486d9d45c4cff9567839c254bda96e6960d310a5e471c70c6a95b", 94 | "e9a5ff7f74cb060b451ed2cd27de038efff4df911f4e0f99e2661b46ebcc7e1c", 95 | "6b0144095e3f0e0d0551cbaa6c5dfc89387024f836528281b6d290e356e196cf", 96 | "bb0761b0636ffd387e0ce322289a3579e926b6813e090130a88228bd80cff982", 97 | "ac327124304cccf6739da308a25bb365a6b63e9344bad2be139b0b02c042567c", 98 | "42e11f2d67050cd31295f85507ebc7706fc4c1fddf1e5a45b98ae3f7c63d2592", 99 | "52657042fcfc88067524bf6c5f9a66414c7de4f4fcabcb65bca56fa84cf309b4" 100 | ] 101 | ); 102 | } 103 | 104 | fn load_block_txids(block_hash_hex: &str) -> Vec { 105 | let path = Path::new("src") 106 | .join("tests") 107 | .join("blocks") 108 | .join(block_hash_hex); 109 | let data = std::fs::read(path).unwrap(); 110 | let block: Block = deserialize(&data).unwrap(); 111 | block.txdata.iter().map(|tx| tx.compute_txid()).collect() 112 | } 113 | } 114 | -------------------------------------------------------------------------------- /src/metrics.rs: -------------------------------------------------------------------------------- 1 | #[cfg(feature = "metrics")] 2 | mod metrics_impl { 3 | use anyhow::{Context, Result}; 4 | 5 | #[cfg(feature = "metrics_process")] 6 | use prometheus::process_collector::ProcessCollector; 7 | 8 | use prometheus::{self, Encoder, HistogramOpts, HistogramVec, Registry, TEXT_FORMAT}; 9 | use tiny_http::{Header as HttpHeader, Response, Server}; 10 | 11 | use std::net::SocketAddr; 12 | 13 | use crate::thread::spawn; 14 | 15 | pub struct Metrics { 16 | reg: Registry, 17 | } 18 | 19 | impl Metrics { 20 | pub fn new(addr: SocketAddr) -> Result { 21 | let reg = Registry::new(); 22 | 23 | #[cfg(feature = "metrics_process")] 24 | reg.register(Box::new(ProcessCollector::for_self())) 25 | .expect("failed to register ProcessCollector"); 26 | 27 | let result = Self { reg }; 28 | let reg = result.reg.clone(); 29 | 30 | let server = match Server::http(addr) { 31 | Ok(server) => server, 32 | Err(err) => bail!("failed to start HTTP server on {}: {}", addr, err), 33 | }; 34 | 35 | spawn("metrics", move || { 36 | let content_type = HttpHeader::from_bytes(&b"Content-Type"[..], TEXT_FORMAT) 37 | .expect("failed to create HTTP header for Prometheus text format"); 38 | for request in server.incoming_requests() { 39 | let mut buffer = vec![]; 40 | prometheus::TextEncoder::new() 41 | .encode(®.gather(), &mut buffer) 42 | .context("failed to encode metrics")?; 43 | request 44 | .respond(Response::from_data(buffer).with_header(content_type.clone())) 45 | .context("failed to send HTTP response")?; 46 | } 47 | Ok(()) 48 | }); 49 | 50 | info!("serving Prometheus metrics on {}", addr); 51 | Ok(result) 52 | } 53 | 54 | pub fn histogram_vec( 55 | &self, 56 | name: &str, 57 | desc: &str, 58 | label: &str, 59 | buckets: Vec, 60 | ) -> Histogram { 61 | let name = String::from("electrs_") + name; 62 | let opts = HistogramOpts::new(name, desc).buckets(buckets); 63 | let hist = HistogramVec::new(opts, &[label]).unwrap(); 64 | self.reg 65 | .register(Box::new(hist.clone())) 66 | .expect("failed to register Histogram"); 67 | Histogram { hist } 68 | } 69 | 70 | pub fn gauge(&self, name: &str, desc: &str, label: &str) -> Gauge { 71 | let name = String::from("electrs_") + name; 72 | let opts = prometheus::Opts::new(name, desc); 73 | let gauge = prometheus::GaugeVec::new(opts, &[label]).unwrap(); 74 | self.reg 75 | .register(Box::new(gauge.clone())) 76 | .expect("failed to register Gauge"); 77 | Gauge { gauge } 78 | } 79 | } 80 | 81 | #[derive(Clone)] 82 | pub struct Gauge { 83 | gauge: prometheus::GaugeVec, 84 | } 85 | 86 | impl Gauge { 87 | pub fn set(&self, label: &str, value: f64) { 88 | self.gauge.with_label_values(&[label]).set(value) 89 | } 90 | } 91 | 92 | #[derive(Clone)] 93 | pub struct Histogram { 94 | hist: HistogramVec, 95 | } 96 | 97 | impl Histogram { 98 | pub fn observe(&self, label: &str, value: f64) { 99 | self.hist.with_label_values(&[label]).observe(value); 100 | } 101 | 102 | pub fn observe_duration(&self, label: &str, func: F) -> T 103 | where 104 | F: FnOnce() -> T, 105 | { 106 | self.hist 107 | .with_label_values(&[label]) 108 | .observe_closure_duration(func) 109 | } 110 | } 111 | } 112 | 113 | #[cfg(feature = "metrics")] 114 | pub use metrics_impl::{Gauge, Histogram, Metrics}; 115 | 116 | #[cfg(not(feature = "metrics"))] 117 | mod metrics_fake { 118 | use anyhow::Result; 119 | 120 | use std::net::SocketAddr; 121 | 122 | pub struct Metrics {} 123 | 124 | impl Metrics { 125 | pub fn new(_addr: SocketAddr) -> Result { 126 | debug!("metrics collection is disabled"); 127 | Ok(Self {}) 128 | } 129 | 130 | pub fn histogram_vec( 131 | &self, 132 | _name: &str, 133 | _desc: &str, 134 | _label: &str, 135 | _buckets: Vec, 136 | ) -> Histogram { 137 | Histogram {} 138 | } 139 | 140 | pub fn gauge(&self, _name: &str, _desc: &str, _label: &str) -> Gauge { 141 | Gauge {} 142 | } 143 | } 144 | 145 | #[derive(Clone)] 146 | pub struct Gauge {} 147 | 148 | impl Gauge { 149 | pub fn set(&self, _label: &str, _value: f64) {} 150 | } 151 | 152 | #[derive(Clone)] 153 | pub struct Histogram {} 154 | 155 | impl Histogram { 156 | pub fn observe(&self, _label: &str, _value: f64) {} 157 | 158 | pub fn observe_duration(&self, _label: &str, func: F) -> T 159 | where 160 | F: FnOnce() -> T, 161 | { 162 | func() 163 | } 164 | } 165 | } 166 | 167 | #[cfg(not(feature = "metrics"))] 168 | pub use metrics_fake::{Gauge, Histogram, Metrics}; 169 | 170 | pub(crate) fn default_duration_buckets() -> Vec { 171 | vec![ 172 | 1e-6, 2e-6, 5e-6, 1e-5, 2e-5, 5e-5, 1e-4, 2e-4, 5e-4, 1e-3, 2e-3, 5e-3, 1e-2, 2e-2, 5e-2, 173 | 1e-1, 2e-1, 5e-1, 1.0, 2.0, 5.0, 10.0, 20.0, 50.0, 100.0, 174 | ] 175 | } 176 | 177 | pub(crate) fn default_size_buckets() -> Vec { 178 | vec![ 179 | 1.0, 2.0, 5.0, 1e1, 2e1, 5e1, 1e2, 2e2, 5e2, 1e3, 2e3, 5e3, 1e4, 2e4, 5e4, 1e5, 2e5, 5e5, 180 | 1e6, 2e6, 5e6, 1e7, 181 | ] 182 | } 183 | -------------------------------------------------------------------------------- /src/server.rs: -------------------------------------------------------------------------------- 1 | use anyhow::{Context, Result}; 2 | use crossbeam_channel::{select, unbounded, Sender}; 3 | use rayon::prelude::*; 4 | 5 | use std::{ 6 | collections::hash_map::HashMap, 7 | io::{BufRead, BufReader, Write}, 8 | iter::once, 9 | net::{Shutdown, TcpListener, TcpStream}, 10 | }; 11 | 12 | use crate::{ 13 | config::Config, 14 | electrum::{Client, Rpc}, 15 | metrics::{self, Metrics}, 16 | signals::ExitError, 17 | thread::spawn, 18 | }; 19 | 20 | struct Peer { 21 | id: usize, 22 | client: Client, 23 | stream: TcpStream, 24 | } 25 | 26 | impl Peer { 27 | fn new(id: usize, stream: TcpStream) -> Self { 28 | let client = Client::default(); 29 | Self { id, client, stream } 30 | } 31 | 32 | fn send(&mut self, values: Vec) -> Result<()> { 33 | for mut value in values { 34 | debug!("{}: send {}", self.id, value); 35 | value += "\n"; 36 | self.stream 37 | .write_all(value.as_bytes()) 38 | .with_context(|| format!("failed to send response: {:?}", value))?; 39 | } 40 | Ok(()) 41 | } 42 | 43 | fn disconnect(self) { 44 | if let Err(e) = self.stream.shutdown(Shutdown::Both) { 45 | warn!("{}: failed to shutdown TCP connection {}", self.id, e) 46 | } 47 | } 48 | } 49 | 50 | pub fn run() -> Result<()> { 51 | let result = serve(); 52 | if let Err(e) = &result { 53 | for cause in e.chain() { 54 | if cause.downcast_ref::().is_some() { 55 | info!("electrs stopped: {:?}", e); 56 | return Ok(()); 57 | } 58 | } 59 | } 60 | result.context("electrs failed") 61 | } 62 | 63 | fn serve() -> Result<()> { 64 | let config = Config::from_args(); 65 | let metrics = Metrics::new(config.monitoring_addr)?; 66 | 67 | let (server_tx, server_rx) = unbounded(); 68 | if !config.disable_electrum_rpc { 69 | let listener = TcpListener::bind(config.electrum_rpc_addr)?; 70 | info!("serving Electrum RPC on {}", listener.local_addr()?); 71 | spawn("accept_loop", || accept_loop(listener, server_tx)); // detach accepting thread 72 | }; 73 | 74 | let server_batch_size = metrics.histogram_vec( 75 | "server_batch_size", 76 | "# of server events handled in a single batch", 77 | "type", 78 | metrics::default_size_buckets(), 79 | ); 80 | let duration = metrics.histogram_vec( 81 | "server_loop_duration", 82 | "server loop duration", 83 | "step", 84 | metrics::default_duration_buckets(), 85 | ); 86 | let mut rpc = Rpc::new(&config, metrics)?; 87 | 88 | let new_block_rx = rpc.new_block_notification(); 89 | let mut peers = HashMap::::new(); 90 | loop { 91 | // initial sync and compaction may take a few hours 92 | while server_rx.is_empty() { 93 | let done = duration.observe_duration("sync", || rpc.sync().context("sync failed"))?; // sync a batch of blocks 94 | peers = duration.observe_duration("notify", || notify_peers(&rpc, peers)); // peers are disconnected on error 95 | if !done { 96 | continue; // more blocks to sync 97 | } 98 | if config.sync_once { 99 | return Ok(()); // exit after initial sync is done 100 | } 101 | break; 102 | } 103 | duration.observe_duration("select", || -> Result<()> { 104 | select! { 105 | // Handle signals for graceful shutdown 106 | recv(rpc.signal().receiver()) -> result => { 107 | result.context("signal channel disconnected")?; 108 | rpc.signal().exit_flag().poll().context("RPC server interrupted")?; 109 | }, 110 | // Handle new blocks' notifications 111 | recv(new_block_rx) -> result => match result { 112 | Ok(_) => (), // sync and update 113 | Err(_) => { 114 | info!("disconnected from bitcoind"); 115 | return Ok(()); 116 | } 117 | }, 118 | // Handle Electrum RPC requests 119 | recv(server_rx) -> event => { 120 | let first = once(event.context("server disconnected")?); 121 | let rest = server_rx.iter().take(server_rx.len()); 122 | let events: Vec = first.chain(rest).collect(); 123 | server_batch_size.observe("recv", events.len() as f64); 124 | duration.observe_duration("handle", || handle_events(&rpc, &mut peers, events)); 125 | }, 126 | default(config.wait_duration) => (), // sync and update 127 | }; 128 | Ok(()) 129 | })?; 130 | } 131 | } 132 | 133 | fn notify_peers(rpc: &Rpc, peers: HashMap) -> HashMap { 134 | peers 135 | .into_par_iter() 136 | .filter_map(|(_, mut peer)| match notify_peer(rpc, &mut peer) { 137 | Ok(()) => Some((peer.id, peer)), 138 | Err(e) => { 139 | error!("failed to notify peer {}: {}", peer.id, e); 140 | peer.disconnect(); 141 | None 142 | } 143 | }) 144 | .collect() 145 | } 146 | 147 | fn notify_peer(rpc: &Rpc, peer: &mut Peer) -> Result<()> { 148 | let notifications = rpc 149 | .update_client(&mut peer.client) 150 | .context("failed to generate notifications")?; 151 | peer.send(notifications) 152 | .context("failed to send notifications") 153 | } 154 | 155 | struct Event { 156 | peer_id: usize, 157 | msg: Message, 158 | } 159 | 160 | enum Message { 161 | New(TcpStream), 162 | Request(String), 163 | Done, 164 | } 165 | 166 | fn handle_events(rpc: &Rpc, peers: &mut HashMap, events: Vec) { 167 | let mut events_by_peer = HashMap::>::new(); 168 | events 169 | .into_iter() 170 | .for_each(|e| events_by_peer.entry(e.peer_id).or_default().push(e.msg)); 171 | for (peer_id, messages) in events_by_peer { 172 | handle_peer_events(rpc, peers, peer_id, messages); 173 | } 174 | } 175 | 176 | fn handle_peer_events( 177 | rpc: &Rpc, 178 | peers: &mut HashMap, 179 | peer_id: usize, 180 | messages: Vec, 181 | ) { 182 | let mut lines = vec![]; 183 | let mut done = false; 184 | for msg in messages { 185 | match msg { 186 | Message::New(stream) => { 187 | debug!("{}: connected", peer_id); 188 | peers.insert(peer_id, Peer::new(peer_id, stream)); 189 | } 190 | Message::Request(line) => lines.push(line), 191 | Message::Done => { 192 | done = true; 193 | break; 194 | } 195 | } 196 | } 197 | let result = match peers.get_mut(&peer_id) { 198 | Some(peer) => { 199 | let responses = rpc.handle_requests(&mut peer.client, &lines); 200 | peer.send(responses) 201 | } 202 | None => return, // unknown peer 203 | }; 204 | if let Err(e) = result { 205 | error!("{}: disconnecting due to {}", peer_id, e); 206 | peers.remove(&peer_id).unwrap().disconnect(); 207 | } else if done { 208 | peers.remove(&peer_id); // already disconnected, just remove from peers' map 209 | } 210 | } 211 | 212 | fn accept_loop(listener: TcpListener, server_tx: Sender) -> Result<()> { 213 | for (peer_id, conn) in listener.incoming().enumerate() { 214 | let stream = conn.context("failed to accept")?; 215 | let tx = server_tx.clone(); 216 | spawn("recv_loop", move || { 217 | let result = recv_loop(peer_id, &stream, tx); 218 | if let Err(e) = stream.shutdown(Shutdown::Read) { 219 | warn!("{}: failed to shutdown TCP receiving {}", peer_id, e) 220 | } 221 | result 222 | }); 223 | } 224 | Ok(()) 225 | } 226 | 227 | fn recv_loop(peer_id: usize, stream: &TcpStream, server_tx: Sender) -> Result<()> { 228 | let msg = Message::New(stream.try_clone()?); 229 | server_tx.send(Event { peer_id, msg })?; 230 | 231 | let mut first_line = true; 232 | for line in BufReader::new(stream).lines() { 233 | if let Err(e) = &line { 234 | if first_line && e.kind() == std::io::ErrorKind::InvalidData { 235 | warn!("InvalidData on first line may indicate client attempted to connect using SSL when server expects unencrypted communication.") 236 | } 237 | } 238 | let line = line.with_context(|| format!("{}: recv failed", peer_id))?; 239 | debug!("{}: recv {}", peer_id, line); 240 | let msg = Message::Request(line); 241 | server_tx.send(Event { peer_id, msg })?; 242 | first_line = false; 243 | } 244 | 245 | debug!("{}: disconnected", peer_id); 246 | let msg = Message::Done; 247 | server_tx.send(Event { peer_id, msg })?; 248 | Ok(()) 249 | } 250 | -------------------------------------------------------------------------------- /src/signals.rs: -------------------------------------------------------------------------------- 1 | #[cfg(not(windows))] 2 | use anyhow::Context; 3 | use crossbeam_channel::{unbounded, Receiver}; 4 | #[cfg(not(windows))] 5 | use signal_hook::consts::signal::*; 6 | #[cfg(not(windows))] 7 | use signal_hook::iterator::Signals; 8 | 9 | use std::sync::{ 10 | atomic::{AtomicBool, Ordering}, 11 | Arc, 12 | }; 13 | use std::{error, fmt}; 14 | 15 | #[cfg(not(windows))] 16 | use crate::thread::spawn; 17 | 18 | #[derive(Debug)] 19 | pub struct ExitError; 20 | 21 | impl fmt::Display for ExitError { 22 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 23 | write!(f, "exiting due to signal") 24 | } 25 | } 26 | 27 | impl error::Error for ExitError {} 28 | 29 | #[derive(Clone)] 30 | pub(crate) struct ExitFlag { 31 | flag: Arc, 32 | } 33 | 34 | impl ExitFlag { 35 | fn new() -> Self { 36 | ExitFlag { 37 | flag: Arc::new(AtomicBool::new(false)), 38 | } 39 | } 40 | 41 | pub fn poll(&self) -> Result<(), ExitError> { 42 | if self.flag.load(Ordering::Relaxed) { 43 | Err(ExitError) 44 | } else { 45 | Ok(()) 46 | } 47 | } 48 | 49 | fn set(&self) { 50 | self.flag.store(true, Ordering::Relaxed) 51 | } 52 | } 53 | 54 | pub(crate) struct Signal { 55 | rx: Receiver<()>, 56 | exit: ExitFlag, 57 | } 58 | 59 | impl Signal { 60 | #[cfg(not(windows))] 61 | pub fn new() -> Signal { 62 | let ids = vec![ 63 | SIGINT, SIGTERM, 64 | SIGUSR1, // allow external triggering (e.g. via bitcoind `blocknotify`) 65 | ]; 66 | let (tx, rx) = unbounded(); 67 | let result = Signal { 68 | rx, 69 | exit: ExitFlag::new(), 70 | }; 71 | 72 | let exit_flag = result.exit.clone(); 73 | let mut signals = Signals::new(ids).expect("failed to register signal hook"); 74 | spawn("signal", move || { 75 | for id in &mut signals { 76 | info!("notified via SIG{}", id); 77 | match id { 78 | SIGUSR1 => (), 79 | _ => exit_flag.set(), 80 | }; 81 | tx.send(()).context("failed to send signal")?; 82 | } 83 | Ok(()) 84 | }); 85 | result 86 | } 87 | 88 | #[cfg(windows)] 89 | pub fn new() -> Signal { 90 | let (tx, rx) = unbounded(); 91 | let result = Signal { 92 | rx, 93 | exit: ExitFlag::new(), 94 | }; 95 | 96 | let exit_flag = result.exit.clone(); 97 | 98 | // Handle Ctrl-C 99 | ctrlc::set_handler(move || { 100 | info!("notified via Ctrl-C"); 101 | exit_flag.set(); 102 | let _ = tx.send(()); 103 | }) 104 | .expect("failed to set Ctrl-C handler"); 105 | 106 | result 107 | } 108 | 109 | pub fn receiver(&self) -> &Receiver<()> { 110 | &self.rx 111 | } 112 | 113 | pub fn exit_flag(&self) -> &ExitFlag { 114 | &self.exit 115 | } 116 | } 117 | -------------------------------------------------------------------------------- /src/tests/blocks/000000000000000002d249a3d89f63ef3fee203adcca7c24008c13fd854513f2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/romanz/electrs/785ae687e4279c8682488c10221d4fa3cde62b23/src/tests/blocks/000000000000000002d249a3d89f63ef3fee203adcca7c24008c13fd854513f2 -------------------------------------------------------------------------------- /src/tests/blocks/00000000000000001203c1ea455e38612bdf36e9967fdead11935c8e22283ecc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/romanz/electrs/785ae687e4279c8682488c10221d4fa3cde62b23/src/tests/blocks/00000000000000001203c1ea455e38612bdf36e9967fdead11935c8e22283ecc -------------------------------------------------------------------------------- /src/thread.rs: -------------------------------------------------------------------------------- 1 | use anyhow::Result; 2 | 3 | pub(crate) fn spawn(name: &'static str, f: F) -> std::thread::JoinHandle<()> 4 | where 5 | F: 'static + Send + FnOnce() -> Result<()>, 6 | { 7 | std::thread::Builder::new() 8 | .name(name.to_owned()) 9 | .spawn(move || { 10 | if let Err(e) = f() { 11 | warn!("{} thread failed: {}", name, e); 12 | e.chain().skip(1).for_each(|e| warn!("because: {}", e)); 13 | } 14 | }) 15 | .expect("failed to spawn a thread") 16 | } 17 | -------------------------------------------------------------------------------- /src/tracker.rs: -------------------------------------------------------------------------------- 1 | use std::ops::ControlFlow; 2 | 3 | use anyhow::{Context, Result}; 4 | use bitcoin::{BlockHash, Txid}; 5 | use bitcoin_slices::{bsl, Error::VisitBreak, Visit, Visitor}; 6 | 7 | use crate::{ 8 | cache::Cache, 9 | chain::Chain, 10 | config::Config, 11 | daemon::Daemon, 12 | db::DBStore, 13 | index::Index, 14 | mempool::{FeeHistogram, Mempool}, 15 | metrics::Metrics, 16 | signals::ExitFlag, 17 | status::{Balance, ScriptHashStatus, UnspentEntry}, 18 | types::bsl_txid, 19 | }; 20 | 21 | /// Electrum protocol subscriptions' tracker 22 | pub struct Tracker { 23 | index: Index, 24 | mempool: Mempool, 25 | metrics: Metrics, 26 | ignore_mempool: bool, 27 | } 28 | 29 | pub(crate) enum Error { 30 | NotReady, 31 | } 32 | 33 | impl Tracker { 34 | pub fn new(config: &Config, metrics: Metrics) -> Result { 35 | let store = DBStore::open( 36 | &config.db_path, 37 | config.db_log_dir.as_deref(), 38 | config.auto_reindex, 39 | config.db_parallelism, 40 | )?; 41 | let chain = Chain::new(config.network); 42 | Ok(Self { 43 | index: Index::load( 44 | store, 45 | chain, 46 | &metrics, 47 | config.index_batch_size, 48 | config.index_lookup_limit, 49 | config.reindex_last_blocks, 50 | ) 51 | .context("failed to open index")?, 52 | mempool: Mempool::new(&metrics), 53 | metrics, 54 | ignore_mempool: config.ignore_mempool, 55 | }) 56 | } 57 | 58 | pub(crate) fn chain(&self) -> &Chain { 59 | self.index.chain() 60 | } 61 | 62 | pub(crate) fn fees_histogram(&self) -> &FeeHistogram { 63 | self.mempool.fees_histogram() 64 | } 65 | 66 | pub(crate) fn metrics(&self) -> &Metrics { 67 | &self.metrics 68 | } 69 | 70 | pub(crate) fn get_unspent(&self, status: &ScriptHashStatus) -> Vec { 71 | status.get_unspent(self.index.chain()) 72 | } 73 | 74 | pub(crate) fn sync(&mut self, daemon: &Daemon, exit_flag: &ExitFlag) -> Result { 75 | let done = self.index.sync(daemon, exit_flag)?; 76 | if done && !self.ignore_mempool { 77 | self.mempool.sync(daemon, exit_flag); 78 | // TODO: double check tip - and retry on diff 79 | } 80 | Ok(done) 81 | } 82 | 83 | pub(crate) fn status(&self) -> Result<(), Error> { 84 | if self.index.is_ready() { 85 | return Ok(()); 86 | } 87 | Err(Error::NotReady) 88 | } 89 | 90 | pub(crate) fn update_scripthash_status( 91 | &self, 92 | status: &mut ScriptHashStatus, 93 | daemon: &Daemon, 94 | cache: &Cache, 95 | ) -> Result { 96 | let prev_statushash = status.statushash(); 97 | status.sync(&self.index, &self.mempool, daemon, cache)?; 98 | Ok(prev_statushash != status.statushash()) 99 | } 100 | 101 | pub(crate) fn get_balance(&self, status: &ScriptHashStatus) -> Balance { 102 | status.get_balance(self.chain()) 103 | } 104 | 105 | pub(crate) fn lookup_transaction( 106 | &self, 107 | daemon: &Daemon, 108 | txid: Txid, 109 | ) -> Result)>> { 110 | // Note: there are two blocks with coinbase transactions having same txid (see BIP-30) 111 | let blockhashes = self.index.filter_by_txid(txid); 112 | let mut result = None; 113 | daemon.for_blocks(blockhashes, |blockhash, block| { 114 | if result.is_some() { 115 | return; // keep first matching transaction 116 | } 117 | let mut visitor = FindTransaction::new(txid); 118 | result = match bsl::Block::visit(&block, &mut visitor) { 119 | Ok(_) | Err(VisitBreak) => visitor.found.map(|tx| (blockhash, tx)), 120 | Err(e) => panic!("core returned invalid block: {:?}", e), 121 | }; 122 | })?; 123 | Ok(result) 124 | } 125 | } 126 | 127 | pub struct FindTransaction { 128 | txid: bitcoin::Txid, 129 | found: Option>, // no need to deserialize 130 | } 131 | 132 | impl FindTransaction { 133 | pub fn new(txid: bitcoin::Txid) -> Self { 134 | Self { txid, found: None } 135 | } 136 | } 137 | impl Visitor for FindTransaction { 138 | fn visit_transaction(&mut self, tx: &bsl::Transaction) -> ControlFlow<()> { 139 | if self.txid == bsl_txid(tx) { 140 | self.found = Some(tx.as_ref().into()); 141 | ControlFlow::Break(()) 142 | } else { 143 | ControlFlow::Continue(()) 144 | } 145 | } 146 | } 147 | -------------------------------------------------------------------------------- /src/types.rs: -------------------------------------------------------------------------------- 1 | use anyhow::Result; 2 | 3 | use std::convert::TryFrom; 4 | 5 | use bitcoin::blockdata::block::Header as BlockHeader; 6 | use bitcoin::{ 7 | consensus::encode::{deserialize, Decodable, Encodable}, 8 | hashes::{hash_newtype, sha256, Hash}, 9 | io, OutPoint, Script, Txid, 10 | }; 11 | use bitcoin_slices::bsl; 12 | 13 | macro_rules! impl_consensus_encoding { 14 | ($thing:ident, $($field:ident),+) => ( 15 | impl Encodable for $thing { 16 | #[inline] 17 | fn consensus_encode( 18 | &self, 19 | s: &mut S, 20 | ) -> Result { 21 | let mut len = 0; 22 | $(len += self.$field.consensus_encode(s)?;)+ 23 | Ok(len) 24 | } 25 | } 26 | 27 | impl Decodable for $thing { 28 | #[inline] 29 | fn consensus_decode( 30 | d: &mut D, 31 | ) -> Result<$thing, bitcoin::consensus::encode::Error> { 32 | Ok($thing { 33 | $($field: Decodable::consensus_decode(d)?),+ 34 | }) 35 | } 36 | } 37 | ); 38 | } 39 | 40 | pub const HASH_PREFIX_LEN: usize = 8; 41 | const HEIGHT_SIZE: usize = 4; 42 | 43 | pub(crate) type HashPrefix = [u8; HASH_PREFIX_LEN]; 44 | pub(crate) type SerializedHashPrefixRow = [u8; HASH_PREFIX_ROW_SIZE]; 45 | type Height = u32; 46 | pub(crate) type SerBlock = Vec; 47 | 48 | #[derive(Debug, Serialize, Deserialize, PartialEq)] 49 | pub(crate) struct HashPrefixRow { 50 | prefix: HashPrefix, 51 | height: Height, // transaction confirmed height 52 | } 53 | 54 | pub const HASH_PREFIX_ROW_SIZE: usize = HASH_PREFIX_LEN + HEIGHT_SIZE; 55 | 56 | impl HashPrefixRow { 57 | pub(crate) fn to_db_row(&self) -> SerializedHashPrefixRow { 58 | let mut row = [0; HASH_PREFIX_ROW_SIZE]; 59 | let len = self 60 | .consensus_encode(&mut (&mut row as &mut [u8])) 61 | .expect("in-memory writers don't error"); 62 | debug_assert_eq!(len, HASH_PREFIX_ROW_SIZE); 63 | row 64 | } 65 | 66 | pub(crate) fn from_db_row(row: SerializedHashPrefixRow) -> Self { 67 | deserialize(&row).expect("bad HashPrefixRow") 68 | } 69 | 70 | pub fn height(&self) -> usize { 71 | usize::try_from(self.height).expect("invalid height") 72 | } 73 | } 74 | 75 | impl_consensus_encoding!(HashPrefixRow, prefix, height); 76 | 77 | hash_newtype! { 78 | /// https://electrum-protocol.readthedocs.io/en/latest/protocol-basics.html#script-hashes 79 | #[hash_newtype(backward)] 80 | pub struct ScriptHash(sha256::Hash); 81 | } 82 | 83 | impl ScriptHash { 84 | pub fn new(script: &Script) -> Self { 85 | ScriptHash::hash(script.as_bytes()) 86 | } 87 | 88 | fn prefix(&self) -> HashPrefix { 89 | let mut prefix = HashPrefix::default(); 90 | prefix.copy_from_slice(&self.0[..HASH_PREFIX_LEN]); 91 | prefix 92 | } 93 | } 94 | 95 | pub(crate) struct ScriptHashRow; 96 | 97 | impl ScriptHashRow { 98 | pub(crate) fn scan_prefix(scripthash: ScriptHash) -> HashPrefix { 99 | scripthash.0[..HASH_PREFIX_LEN].try_into().unwrap() 100 | } 101 | 102 | pub(crate) fn row(scripthash: ScriptHash, height: usize) -> HashPrefixRow { 103 | HashPrefixRow { 104 | prefix: scripthash.prefix(), 105 | height: Height::try_from(height).expect("invalid height"), 106 | } 107 | } 108 | } 109 | 110 | // *************************************************************************** 111 | 112 | hash_newtype! { 113 | /// https://electrum-protocol.readthedocs.io/en/latest/protocol-basics.html#status 114 | pub struct StatusHash(sha256::Hash); 115 | } 116 | 117 | // *************************************************************************** 118 | 119 | fn spending_prefix(prev: OutPoint) -> HashPrefix { 120 | let txid_prefix = HashPrefix::try_from(&prev.txid[..HASH_PREFIX_LEN]).unwrap(); 121 | let value = u64::from_be_bytes(txid_prefix); 122 | let value = value.wrapping_add(prev.vout.into()); 123 | value.to_be_bytes() 124 | } 125 | 126 | pub(crate) struct SpendingPrefixRow; 127 | 128 | impl SpendingPrefixRow { 129 | pub(crate) fn scan_prefix(outpoint: OutPoint) -> HashPrefix { 130 | spending_prefix(outpoint) 131 | } 132 | 133 | pub(crate) fn row(outpoint: OutPoint, height: usize) -> HashPrefixRow { 134 | HashPrefixRow { 135 | prefix: spending_prefix(outpoint), 136 | height: Height::try_from(height).expect("invalid height"), 137 | } 138 | } 139 | } 140 | 141 | // *************************************************************************** 142 | 143 | fn txid_prefix(txid: &Txid) -> HashPrefix { 144 | let mut prefix = [0u8; HASH_PREFIX_LEN]; 145 | prefix.copy_from_slice(&txid[..HASH_PREFIX_LEN]); 146 | prefix 147 | } 148 | 149 | pub(crate) struct TxidRow; 150 | 151 | impl TxidRow { 152 | pub(crate) fn scan_prefix(txid: Txid) -> HashPrefix { 153 | txid_prefix(&txid) 154 | } 155 | 156 | pub(crate) fn row(txid: Txid, height: usize) -> HashPrefixRow { 157 | HashPrefixRow { 158 | prefix: txid_prefix(&txid), 159 | height: Height::try_from(height).expect("invalid height"), 160 | } 161 | } 162 | } 163 | 164 | // *************************************************************************** 165 | 166 | pub(crate) type SerializedHeaderRow = [u8; HEADER_ROW_SIZE]; 167 | 168 | #[derive(Debug, Serialize, Deserialize)] 169 | pub(crate) struct HeaderRow { 170 | pub(crate) header: BlockHeader, 171 | } 172 | 173 | pub const HEADER_ROW_SIZE: usize = 80; 174 | 175 | impl_consensus_encoding!(HeaderRow, header); 176 | 177 | impl HeaderRow { 178 | pub(crate) fn new(header: BlockHeader) -> Self { 179 | Self { header } 180 | } 181 | 182 | pub(crate) fn to_db_row(&self) -> SerializedHeaderRow { 183 | let mut row = [0; HEADER_ROW_SIZE]; 184 | let len = self 185 | .consensus_encode(&mut (&mut row as &mut [u8])) 186 | .expect("in-memory writers don't error"); 187 | debug_assert_eq!(len, HEADER_ROW_SIZE); 188 | row 189 | } 190 | 191 | pub(crate) fn from_db_row(row: SerializedHeaderRow) -> Self { 192 | deserialize(&row).expect("bad HeaderRow") 193 | } 194 | } 195 | 196 | pub(crate) fn bsl_txid(tx: &bsl::Transaction) -> Txid { 197 | bitcoin::Txid::from_slice(tx.txid_sha2().as_slice()).expect("invalid txid") 198 | } 199 | 200 | #[cfg(test)] 201 | mod tests { 202 | use crate::types::{spending_prefix, HashPrefixRow, ScriptHash, ScriptHashRow, TxidRow}; 203 | use bitcoin::{Address, OutPoint, Txid}; 204 | use hex_lit::hex; 205 | use serde_json::{from_str, json}; 206 | 207 | use std::str::FromStr; 208 | 209 | #[test] 210 | fn test_scripthash_serde() { 211 | let hex = "\"4b3d912c1523ece4615e91bf0d27381ca72169dbf6b1c2ffcc9f92381d4984a3\""; 212 | let scripthash: ScriptHash = from_str(hex).unwrap(); 213 | assert_eq!(format!("\"{}\"", scripthash), hex); 214 | assert_eq!(json!(scripthash).to_string(), hex); 215 | } 216 | 217 | #[test] 218 | fn test_scripthash_row() { 219 | let hex = "\"4b3d912c1523ece4615e91bf0d27381ca72169dbf6b1c2ffcc9f92381d4984a3\""; 220 | let scripthash: ScriptHash = from_str(hex).unwrap(); 221 | let row1 = ScriptHashRow::row(scripthash, 123456); 222 | let db_row = row1.to_db_row(); 223 | assert_eq!(db_row, hex!("a384491d38929fcc40e20100")); 224 | let row2 = HashPrefixRow::from_db_row(db_row); 225 | assert_eq!(row1, row2); 226 | } 227 | 228 | #[test] 229 | fn test_scripthash() { 230 | let addr = Address::from_str("1KVNjD3AAnQ3gTMqoTKcWFeqSFujq9gTBT") 231 | .unwrap() 232 | .assume_checked(); 233 | let scripthash = ScriptHash::new(&addr.script_pubkey()); 234 | assert_eq!( 235 | scripthash, 236 | "00dfb264221d07712a144bda338e89237d1abd2db4086057573895ea2659766a" 237 | .parse() 238 | .unwrap() 239 | ); 240 | } 241 | 242 | #[test] 243 | fn test_txid1_prefix() { 244 | // duplicate txids from BIP-30 245 | let hex = "d5d27987d2a3dfc724e359870c6644b40e497bdc0589a033220fe15429d88599"; 246 | let txid = Txid::from_str(hex).unwrap(); 247 | 248 | let row1 = TxidRow::row(txid, 91812); 249 | let row2 = TxidRow::row(txid, 91842); 250 | 251 | assert_eq!(row1.to_db_row(), hex!("9985d82954e10f22a4660100")); 252 | assert_eq!(row2.to_db_row(), hex!("9985d82954e10f22c2660100")); 253 | } 254 | 255 | #[test] 256 | fn test_txid2_prefix() { 257 | // duplicate txids from BIP-30 258 | let hex = "e3bf3d07d4b0375638d5f1db5255fe07ba2c4cb067cd81b84ee974b6585fb468"; 259 | let txid = Txid::from_str(hex).unwrap(); 260 | 261 | let row1 = TxidRow::row(txid, 91722); 262 | let row2 = TxidRow::row(txid, 91880); 263 | 264 | // low-endian encoding => rows should be sorted according to block height 265 | assert_eq!(row1.to_db_row(), hex!("68b45f58b674e94e4a660100")); 266 | assert_eq!(row2.to_db_row(), hex!("68b45f58b674e94ee8660100")); 267 | } 268 | 269 | #[test] 270 | fn test_spending_prefix() { 271 | let txid = "000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f" 272 | .parse() 273 | .unwrap(); 274 | 275 | assert_eq!( 276 | spending_prefix(OutPoint { txid, vout: 0 }), 277 | [31, 30, 29, 28, 27, 26, 25, 24] 278 | ); 279 | assert_eq!( 280 | spending_prefix(OutPoint { txid, vout: 10 }), 281 | [31, 30, 29, 28, 27, 26, 25, 34] 282 | ); 283 | assert_eq!( 284 | spending_prefix(OutPoint { txid, vout: 255 }), 285 | [31, 30, 29, 28, 27, 26, 26, 23] 286 | ); 287 | assert_eq!( 288 | spending_prefix(OutPoint { txid, vout: 256 }), 289 | [31, 30, 29, 28, 27, 26, 26, 24] 290 | ); 291 | } 292 | } 293 | -------------------------------------------------------------------------------- /tests/run.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -euo pipefail 3 | 4 | rm -rf data/ 5 | mkdir -p data/{bitcoin,electrum,electrs} 6 | 7 | cleanup() { 8 | trap - SIGTERM SIGINT 9 | set +eo pipefail 10 | jobs 11 | for j in `jobs -rp` 12 | do 13 | kill $j 14 | wait $j 15 | done 16 | } 17 | trap cleanup SIGINT SIGTERM EXIT 18 | 19 | BTC="bitcoin-cli -regtest -datadir=data/bitcoin" 20 | ELECTRUM="electrum --regtest" 21 | EL="$ELECTRUM --wallet=data/electrum/wallet" 22 | 23 | tail_log() { 24 | tail -n +0 -F $1 || true 25 | } 26 | 27 | echo "Starting $(bitcoind -version | head -n1)..." 28 | bitcoind -regtest -datadir=data/bitcoin -printtoconsole=0 & 29 | BITCOIND_PID=$! 30 | 31 | $BTC -rpcwait getblockcount > /dev/null 32 | 33 | echo "Creating Electrum `electrum version --offline` wallet..." 34 | WALLET=`$EL --offline create --seed_type=segwit` 35 | MINING_ADDR=`$EL --offline getunusedaddress` 36 | 37 | $BTC generatetoaddress 110 $MINING_ADDR > /dev/null 38 | echo `$BTC getblockchaininfo | jq -r '"Generated \(.blocks) regtest blocks (\(.size_on_disk/1e3) kB)"'` to $MINING_ADDR 39 | 40 | TIP=`$BTC getbestblockhash` 41 | 42 | export RUST_LOG=electrs=debug 43 | electrs \ 44 | --db-dir=data/electrs \ 45 | --daemon-dir=data/bitcoin \ 46 | --network=regtest \ 47 | 2> data/electrs/regtest-debug.log & 48 | ELECTRS_PID=$! 49 | tail_log data/electrs/regtest-debug.log | grep -m1 "serving Electrum RPC" 50 | curl localhost:24224 -o metrics.txt 51 | 52 | $ELECTRUM daemon --server localhost:60401:t -1 -vDEBUG 2> data/electrum/regtest-debug.log & 53 | ELECTRUM_PID=$! 54 | tail_log data/electrum/regtest-debug.log | grep -m1 "connection established" 55 | $EL getinfo | jq . 56 | 57 | echo "Loading Electrum wallet..." 58 | $EL load_wallet 59 | 60 | echo "Running integration tests:" 61 | 62 | echo " * getbalance" 63 | test "`$EL getbalance | jq -c .`" == '{"confirmed":"550","unmatured":"4950"}' 64 | 65 | echo " * getunusedaddress" 66 | NEW_ADDR=`$EL getunusedaddress` 67 | 68 | echo " * payto & broadcast" 69 | TXID=$($EL broadcast $($EL payto $NEW_ADDR 123 --fee 0.001 --password='')) 70 | 71 | echo " * get_tx_status" 72 | test "`$EL get_tx_status $TXID | jq -c .`" == '{"confirmations":0}' 73 | 74 | echo " * getaddresshistory" 75 | test "`$EL getaddresshistory $NEW_ADDR | jq -c .`" == "[{\"fee\":100000,\"height\":0,\"tx_hash\":\"$TXID\"}]" 76 | 77 | echo " * getbalance" 78 | test "`$EL getbalance | jq -c .`" == '{"confirmed":"549.999","unmatured":"4950"}' 79 | 80 | echo "Generating bitcoin block..." 81 | $BTC generatetoaddress 1 $MINING_ADDR > /dev/null 82 | $BTC getblockcount > /dev/null 83 | 84 | echo " * wait for new block" 85 | kill -USR1 $ELECTRS_PID # notify server to index new block 86 | tail_log data/electrum/regtest-debug.log | grep -m1 "verified $TXID" > /dev/null 87 | 88 | echo " * get_tx_status" 89 | test "`$EL get_tx_status $TXID | jq -c .`" == '{"confirmations":1}' 90 | 91 | echo " * getaddresshistory" 92 | test "`$EL getaddresshistory $NEW_ADDR | jq -c .`" == "[{\"height\":111,\"tx_hash\":\"$TXID\"}]" 93 | 94 | echo " * getbalance" 95 | test "`$EL getbalance | jq -c .`" == '{"confirmed":"599.999","unmatured":"4950.001"}' 96 | 97 | echo "Electrum `$EL stop`" # disconnect wallet 98 | wait $ELECTRUM_PID 99 | 100 | kill -INT $ELECTRS_PID # close server 101 | tail_log data/electrs/regtest-debug.log | grep -m1 "electrs stopped" 102 | wait $ELECTRS_PID 103 | 104 | $BTC stop # stop bitcoind 105 | wait $BITCOIND_PID 106 | 107 | echo "=== PASSED ===" 108 | --------------------------------------------------------------------------------