├── chain ├── LICENSE ├── src │ ├── filter.rs │ ├── block │ │ ├── store.rs │ │ └── store │ │ │ └── memory.rs │ ├── block.rs │ ├── lib.rs │ └── filter │ │ ├── store.rs │ │ └── cache.rs └── Cargo.toml ├── node ├── LICENSE ├── Cargo.toml └── src │ ├── lib.rs │ ├── logger.rs │ └── main.rs ├── p2p ├── LICENSE ├── Cargo.toml └── src │ ├── lib.rs │ ├── fsm │ ├── tests │ │ └── simulations.rs │ ├── pingmgr.rs │ ├── fees.rs │ └── filter_cache.rs │ └── stream.rs ├── rust-toolchain ├── test ├── LICENSE ├── data │ └── headers.bin ├── Cargo.toml └── src │ └── lib.rs ├── client ├── LICENSE ├── src │ ├── lib.rs │ ├── error.rs │ ├── service.rs │ ├── peer.rs │ ├── handle.rs │ ├── tests │ │ └── mock.rs │ └── tests.rs └── Cargo.toml ├── wallet ├── LICENSE ├── src │ ├── wallet │ │ ├── schema.sql │ │ ├── ui │ │ │ └── table.rs │ │ ├── hw.rs │ │ └── db │ │ │ └── types.rs │ ├── error.rs │ ├── logger.rs │ ├── input.rs │ ├── main.rs │ ├── lib.rs │ └── wallet.rs └── Cargo.toml ├── net ├── poll │ ├── LICENSE │ ├── src │ │ ├── fallible.rs │ │ ├── socket.rs │ │ ├── lib.rs │ │ └── time.rs │ └── Cargo.toml ├── Cargo.toml └── src │ ├── simulator │ └── arbitrary.rs │ ├── error.rs │ ├── event.rs │ ├── time.rs │ └── lib.rs ├── .github ├── FUNDING.yml └── workflows │ └── actions.yml ├── .gitignore ├── .gitsigners ├── common ├── Cargo.toml └── src │ ├── lib.rs │ ├── p2p.rs │ ├── block │ ├── genesis.rs │ ├── checkpoints.rs │ ├── iter.rs │ ├── filter.rs │ ├── store.rs │ └── tree.rs │ ├── block.rs │ ├── collections.rs │ └── network.rs ├── deny.toml ├── LICENSE ├── Cargo.toml ├── CONTRIBUTING.md ├── src └── lib.rs ├── README.md └── docs └── nakamoto-talk.md /chain/LICENSE: -------------------------------------------------------------------------------- 1 | ../LICENSE -------------------------------------------------------------------------------- /node/LICENSE: -------------------------------------------------------------------------------- 1 | ../LICENSE -------------------------------------------------------------------------------- /p2p/LICENSE: -------------------------------------------------------------------------------- 1 | ../LICENSE -------------------------------------------------------------------------------- /rust-toolchain: -------------------------------------------------------------------------------- 1 | 1.73 2 | -------------------------------------------------------------------------------- /test/LICENSE: -------------------------------------------------------------------------------- 1 | ../LICENSE -------------------------------------------------------------------------------- /client/LICENSE: -------------------------------------------------------------------------------- 1 | ../LICENSE -------------------------------------------------------------------------------- /wallet/LICENSE: -------------------------------------------------------------------------------- 1 | ../LICENSE -------------------------------------------------------------------------------- /net/poll/LICENSE: -------------------------------------------------------------------------------- 1 | ../../LICENSE -------------------------------------------------------------------------------- /.github/FUNDING.yml: -------------------------------------------------------------------------------- 1 | github: cloudhead 2 | custom: ["https://cloudhead.io/donate"] 3 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | target/ 2 | notes/ 3 | .nakamoto/ 4 | errors.err 5 | headers.db 6 | TODO 7 | NOTES 8 | -------------------------------------------------------------------------------- /test/data/headers.bin: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cloudhead/nakamoto/HEAD/test/data/headers.bin -------------------------------------------------------------------------------- /chain/src/filter.rs: -------------------------------------------------------------------------------- 1 | //! Compact block filters (BIP 157/8). 2 | pub mod cache; 3 | pub mod store; 4 | 5 | pub use nakamoto_common::bitcoin::util::bip158::BlockFilter; 6 | -------------------------------------------------------------------------------- /chain/src/block/store.rs: -------------------------------------------------------------------------------- 1 | //! Block storage backends. 2 | 3 | pub use nakamoto_common::block::store::*; 4 | 5 | pub mod io; 6 | pub mod memory; 7 | 8 | pub use io::File; 9 | pub use memory::Memory; 10 | -------------------------------------------------------------------------------- /.gitsigners: -------------------------------------------------------------------------------- 1 | self@cloudhead.io ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIPrvO/Q9/5eGYPWlj0ygyzismMbJ3P2ZbzN/HcVCrpPD 2 | self@cloudhead.io ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIL460KIEccS4881p7PPpiiQBsxF+H5tgC6De6crw9rbU 3 | -------------------------------------------------------------------------------- /chain/src/block.rs: -------------------------------------------------------------------------------- 1 | //! Block and blockchain related functionality. 2 | pub mod cache; 3 | pub mod store; 4 | 5 | pub use nakamoto_common::bitcoin::blockdata::block::{Block, BlockHeader}; 6 | pub use nakamoto_common::bitcoin::blockdata::transaction::Transaction; 7 | pub use nakamoto_common::bitcoin::hash_types::BlockHash; 8 | pub use nakamoto_common::block::tree::*; 9 | -------------------------------------------------------------------------------- /client/src/lib.rs: -------------------------------------------------------------------------------- 1 | //! Nakamoto's client library. 2 | #![allow(clippy::inconsistent_struct_constructor)] 3 | #![allow(clippy::type_complexity)] 4 | #![deny(missing_docs, unsafe_code)] 5 | mod client; 6 | mod error; 7 | #[allow(hidden_glob_reexports)] 8 | mod event; 9 | mod peer; 10 | mod service; 11 | 12 | pub use client::*; 13 | pub mod handle; 14 | 15 | #[cfg(test)] 16 | mod tests; 17 | -------------------------------------------------------------------------------- /chain/src/lib.rs: -------------------------------------------------------------------------------- 1 | //! Functionality around proof-of-work chains. 2 | #[allow(clippy::len_without_is_empty)] 3 | #[allow(clippy::collapsible_if)] 4 | #[allow(clippy::type_complexity)] 5 | #[allow(clippy::inconsistent_struct_constructor)] 6 | #[deny( 7 | unsafe_code, 8 | missing_docs, 9 | missing_debug_implementations, 10 | missing_copy_implementations 11 | )] 12 | pub mod block; 13 | pub use block::*; 14 | 15 | #[allow(clippy::inconsistent_struct_constructor)] 16 | pub mod filter; 17 | 18 | #[cfg(test)] 19 | mod tests; 20 | -------------------------------------------------------------------------------- /net/poll/src/fallible.rs: -------------------------------------------------------------------------------- 1 | use std::sync::Mutex; 2 | 3 | pub(super) static FALLIBLE: Mutex> = Mutex::new(None); 4 | 5 | pub(crate) struct FailGuard {} 6 | 7 | impl Drop for FailGuard { 8 | fn drop(&mut self) { 9 | let mut fallible = self::FALLIBLE.lock().unwrap(); 10 | *fallible = None; 11 | } 12 | } 13 | 14 | #[allow(dead_code)] 15 | pub(crate) fn set_fallible(p: f64) -> FailGuard { 16 | let mut fallible = self::FALLIBLE.lock().unwrap(); 17 | *fallible = Some(p); 18 | 19 | FailGuard {} 20 | } 21 | -------------------------------------------------------------------------------- /net/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "nakamoto-net" 3 | description = "Lightweight peer-to-peer networking" 4 | homepage = "https://cloudhead.io/nakamoto/" 5 | documentation = "https://docs.rs/nakamoto-net" 6 | repository = "https://github.com/cloudhead/nakamoto" 7 | version = "0.4.0" 8 | authors = ["Alexis Sellier "] 9 | edition = "2021" 10 | license = "MIT" 11 | 12 | [features] 13 | default = [] 14 | 15 | [dependencies] 16 | log = "0.4" 17 | thiserror = "1.0" 18 | crossbeam-channel = { version = "0.5.6" } 19 | quickcheck = { version = "1", optional = true } 20 | fastrand = "1.3.5" 21 | -------------------------------------------------------------------------------- /chain/src/filter/store.rs: -------------------------------------------------------------------------------- 1 | //! Compact block filter store. 2 | 3 | use thiserror::Error; 4 | 5 | pub use nakamoto_common::block::filter::{BlockFilter, FilterHash, FilterHeader, Filters}; 6 | pub use nakamoto_common::block::store::Store; 7 | 8 | pub type File = crate::store::io::File; 9 | pub type Memory = crate::store::memory::Memory; 10 | 11 | /// A filter error occuring in the store. Can happen if the store is corrupted. 12 | #[derive(Debug, Error)] 13 | pub enum Error { 14 | #[error("filter store is corrupted")] 15 | Integrity, 16 | #[error("the operation was interrupted")] 17 | Interrupted, 18 | } 19 | -------------------------------------------------------------------------------- /common/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "nakamoto-common" 3 | description = "Common types and functions shared by all nakamoto crates" 4 | homepage = "https://cloudhead.io/nakamoto/" 5 | documentation = "https://docs.rs/nakamoto-common" 6 | repository = "https://github.com/cloudhead/nakamoto" 7 | license = "MIT" 8 | version = "0.4.0" 9 | authors = ["Alexis Sellier "] 10 | edition = "2021" 11 | 12 | [dependencies] 13 | nakamoto-net = { version = "0.4.0", path = "../net" } 14 | bitcoin = "0.29.2" 15 | bitcoin_hashes = "0.11.0" 16 | thiserror = "1.0" 17 | fastrand = "1.3.5" 18 | nonempty = "0.7" 19 | microserde = "0.1" 20 | log = { version = "0.4", optional = true } 21 | -------------------------------------------------------------------------------- /common/src/lib.rs: -------------------------------------------------------------------------------- 1 | //! Library of common Bitcoin functionality shared by all crates. 2 | #![allow(clippy::type_complexity)] 3 | #![deny(missing_docs, unsafe_code)] 4 | pub mod block; 5 | pub mod collections; 6 | pub mod network; 7 | pub mod p2p; 8 | 9 | pub use bitcoin; 10 | pub use bitcoin_hashes; 11 | pub use nakamoto_net as net; 12 | pub use nonempty; 13 | 14 | /// Return the function path at the current source location. 15 | #[macro_export] 16 | macro_rules! source { 17 | () => {{ 18 | fn f() {} 19 | fn type_of(_: T) -> &'static str { 20 | std::any::type_name::() 21 | } 22 | let name = type_of(f); 23 | &name[..name.len() - 3] 24 | }}; 25 | } 26 | -------------------------------------------------------------------------------- /common/src/p2p.rs: -------------------------------------------------------------------------------- 1 | //! P2P-related types 2 | use std::net; 3 | 4 | pub mod peer; 5 | 6 | /// Communication domain of a network socket. 7 | #[derive(Debug, Copy, Clone, Eq, PartialEq)] 8 | pub enum Domain { 9 | /// IPv4. 10 | IPV4, 11 | /// IPv6. 12 | IPV6, 13 | } 14 | 15 | impl Domain { 16 | /// All domains. 17 | pub fn all() -> Vec { 18 | vec![Self::IPV4, Self::IPV6] 19 | } 20 | 21 | /// Returns the domain for `address`. 22 | pub const fn for_address(address: &net::SocketAddr) -> Domain { 23 | match address { 24 | net::SocketAddr::V4(_) => Domain::IPV4, 25 | net::SocketAddr::V6(_) => Domain::IPV6, 26 | } 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /test/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "nakamoto-test" 3 | description = "Testing utilities for nakamoto crates" 4 | homepage = "https://cloudhead.io/nakamoto/" 5 | documentation = "https://docs.rs/nakamoto-test" 6 | repository = "https://github.com/cloudhead/nakamoto" 7 | version = "0.4.0" 8 | authors = ["Alexis Sellier "] 9 | edition = "2021" 10 | license = "MIT" 11 | 12 | [dependencies] 13 | nakamoto-common = { version = "0.4.0", path = "../common" } 14 | log = { version = "0.4", features = ["std"] } 15 | chrono = { version = "0.4", features = ["std"], default-features = false } 16 | once_cell = "1.17.1" 17 | fastrand = "1.3.5" 18 | colored = "1.9" 19 | quickcheck = { version = "1", default_features = false } 20 | -------------------------------------------------------------------------------- /net/poll/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "nakamoto-net-poll" 3 | description = "Poll-based networking for nakamoto" 4 | homepage = "https://cloudhead.io/nakamoto/" 5 | repository = "https://github.com/cloudhead/nakamoto" 6 | version = "0.4.0" 7 | authors = ["Alexis Sellier "] 8 | edition = "2021" 9 | license = "MIT" 10 | 11 | [dependencies] 12 | nakamoto-net = { version = "0.4.0", path = ".." } 13 | crossbeam-channel = { version = "0.5.6" } 14 | popol = { version = "2" } 15 | socket2 = { version = "0.4" } 16 | libc = { version = "0.2" } 17 | log = { version = "0.4" } 18 | 19 | [dev-dependencies] 20 | fastrand = "1.3.5" 21 | quickcheck = { version = "1", default-features = false } 22 | quickcheck_macros = "1" 23 | -------------------------------------------------------------------------------- /wallet/src/wallet/schema.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE IF NOT EXISTS "utxos" ( 2 | "id" integer PRIMARY KEY, 3 | "txid" text NOT NULL, 4 | "vout" integer NOT NULL, 5 | "address" text NOT NULL REFERENCES "address" ("id"), 6 | "value" integer NOT NULL, 7 | "date" integer NOT NULL, 8 | 9 | UNIQUE ("txid", "vout") 10 | ) STRICT; 11 | 12 | CREATE TABLE IF NOT EXISTS "addresses" ( 13 | "id" text PRIMARY KEY, 14 | "index" integer NOT NULL UNIQUE, 15 | "label" text DEFAULT NULL, 16 | "received" integer NOT NULL DEFAULT 0, 17 | "used" integer NOT NULL DEFAULT false 18 | ) STRICT; 19 | -------------------------------------------------------------------------------- /node/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "nakamoto-node" 3 | description = "Bitcoin light-client node using nakamoto crates" 4 | homepage = "https://cloudhead.io/nakamoto/" 5 | documentation = "https://docs.rs/nakamoto-node" 6 | repository = "https://github.com/cloudhead/nakamoto" 7 | version = "0.4.0" 8 | authors = ["Alexis Sellier "] 9 | edition = "2021" 10 | license = "MIT" 11 | 12 | [dependencies] 13 | nakamoto-client = { version = "0.4.0", path = "../client" } 14 | nakamoto-net-poll = { version = "0.4.0", path = "../net/poll" } 15 | argh = "0.1.3" 16 | colored = "1.9" 17 | atty = { version = "0.2" } 18 | thiserror = "1.0" 19 | log = { version = "0.4", features = ["std"] } 20 | chrono = { version = "0.4", features = ["std"], default-features = false } 21 | -------------------------------------------------------------------------------- /chain/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "nakamoto-chain" 3 | description = "Chain storage and fork selection used by the nakamoto crate." 4 | homepage = "https://cloudhead.io/nakamoto/" 5 | documentation = "https://docs.rs/nakamoto-chain" 6 | repository = "https://github.com/cloudhead/nakamoto" 7 | version = "0.4.0" 8 | authors = ["Alexis Sellier "] 9 | edition = "2021" 10 | license = "MIT" 11 | 12 | [dependencies] 13 | nakamoto-common = { version = "0.4.0", path = "../common", features = ["log"] } 14 | thiserror = "1.0" 15 | log = "0.4" 16 | 17 | [dev-dependencies] 18 | nakamoto-test = { version = "0.4.0", path = "../test" } 19 | quickcheck = { version = "1", default_features = false } 20 | quickcheck_macros = "1" 21 | tempfile = "3" 22 | fastrand = "1.3.5" 23 | -------------------------------------------------------------------------------- /wallet/src/error.rs: -------------------------------------------------------------------------------- 1 | use std::io; 2 | 3 | use nakamoto_client::handle; 4 | use thiserror::Error; 5 | 6 | use crate::{ 7 | input, 8 | wallet::{db, hw, ui}, 9 | }; 10 | 11 | /// An error occuring in the wallet. 12 | #[derive(Error, Debug)] 13 | pub enum Error { 14 | #[error("client handle error: {0}")] 15 | Handle(#[from] handle::Error), 16 | #[error("client error: {0}")] 17 | Client(#[from] nakamoto_client::Error), 18 | #[error("i/o error: {0}")] 19 | Io(#[from] io::Error), 20 | #[error("input error: {0}")] 21 | Input(#[from] input::Error), 22 | #[error("ui error: {0}")] 23 | Ui(#[from] ui::Error), 24 | #[error("channel error: {0}")] 25 | Channel(#[from] crossbeam_channel::RecvError), 26 | #[error(transparent)] 27 | Db(#[from] db::Error), 28 | #[error(transparent)] 29 | Hw(#[from] hw::Error), 30 | } 31 | -------------------------------------------------------------------------------- /deny.toml: -------------------------------------------------------------------------------- 1 | [advisories] 2 | db-path = "~/.cargo/advisory-db" 3 | db-urls = ["https://github.com/rustsec/advisory-db"] 4 | vulnerability = "deny" 5 | unmaintained = "warn" 6 | yanked = "deny" 7 | notice = "warn" 8 | ignore = [] 9 | 10 | [licenses] 11 | unlicensed = "deny" 12 | allow = [ 13 | "MIT", 14 | "Apache-2.0", 15 | "CC0-1.0", 16 | "BSD-3-Clause", 17 | "MPL-2.0" 18 | ] 19 | deny = [] 20 | copyleft = "warn" 21 | allow-osi-fsf-free = "neither" 22 | default = "deny" 23 | confidence-threshold = 0.8 24 | exceptions = [] 25 | 26 | [licenses.private] 27 | ignore = false 28 | registries = [] 29 | 30 | [bans] 31 | multiple-versions = "deny" 32 | wildcards = "deny" 33 | highlight = "all" 34 | allow = [] 35 | deny = [] 36 | skip = [] 37 | skip-tree = [] 38 | 39 | [sources] 40 | unknown-registry = "warn" 41 | unknown-git = "warn" 42 | allow-registry = ["https://github.com/rust-lang/crates.io-index"] 43 | allow-git = [] 44 | -------------------------------------------------------------------------------- /net/src/simulator/arbitrary.rs: -------------------------------------------------------------------------------- 1 | use super::Options; 2 | 3 | impl quickcheck::Arbitrary for Options { 4 | fn arbitrary(g: &mut quickcheck::Gen) -> Self { 5 | let rng = fastrand::Rng::with_seed(u64::arbitrary(g)); 6 | let from = rng.u64(0..=1); 7 | let to = rng.u64(2..4); 8 | let failure_rate = rng.f64() / 4.; 9 | 10 | Self { 11 | latency: from..to, 12 | failure_rate, 13 | } 14 | } 15 | 16 | fn shrink(&self) -> Box> { 17 | let failure_rate = self.failure_rate - 0.01; 18 | let latency = self.latency.start.saturating_sub(1)..self.latency.end.saturating_sub(1); 19 | 20 | if failure_rate < 0. && latency.is_empty() { 21 | return Box::new(std::iter::empty()); 22 | } 23 | 24 | Box::new(std::iter::once(Self { 25 | latency, 26 | failure_rate, 27 | })) 28 | } 29 | } 30 | -------------------------------------------------------------------------------- /p2p/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "nakamoto-p2p" 3 | description = "Peer-to-peer protocol state machine for the nakamoto crate" 4 | homepage = "https://cloudhead.io/nakamoto/" 5 | documentation = "https://docs.rs/nakamoto-p2p" 6 | repository = "https://github.com/cloudhead/nakamoto" 7 | version = "0.4.0" 8 | authors = ["Alexis Sellier "] 9 | edition = "2021" 10 | license = "MIT" 11 | 12 | [dependencies] 13 | nakamoto-common = { version = "0.4.0", path = "../common" } 14 | nakamoto-net = { version = "0.4.0", path = "../net" } 15 | log = "0.4" 16 | thiserror = "1.0" 17 | crossbeam-channel = { version = "0.5.6" } 18 | fastrand = "1.3.5" 19 | microserde = "0.1" 20 | 21 | [dev-dependencies] 22 | nakamoto-test = { version = "0.4.0", path = "../test" } 23 | nakamoto-chain = { version = "0.4.0", path = "../chain" } 24 | nakamoto-net = { version = "0.4.0", path = "../net", features = ["quickcheck"] } 25 | tempfile = "3" 26 | quickcheck = { version = "1", default_features = false } 27 | quickcheck_macros = "1" 28 | -------------------------------------------------------------------------------- /client/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "nakamoto-client" 3 | description = "Bitcoin light-client library used by nakamoto crate" 4 | homepage = "https://cloudhead.io/nakamoto/" 5 | documentation = "https://docs.rs/nakamoto-client" 6 | repository = "https://github.com/cloudhead/nakamoto" 7 | version = "0.4.0" 8 | authors = ["Alexis Sellier "] 9 | edition = "2021" 10 | license = "MIT" 11 | 12 | [dependencies] 13 | nakamoto-p2p = { version = "0.4.0", path = "../p2p" } 14 | nakamoto-net = { version = "0.4.0", path = "../net" } 15 | nakamoto-chain = { version = "0.4.0", path = "../chain" } 16 | nakamoto-common = { version = "0.4.0", path = "../common" } 17 | crossbeam-channel = { version = "0.5.6" } 18 | thiserror = "1.0" 19 | log = "0.4" 20 | fastrand = "1.3.5" 21 | microserde = "0.1" 22 | 23 | [dev-dependencies] 24 | nakamoto-test = { version = "0.4.0", path = "../test" } 25 | nakamoto-net-poll = { version = "0.4.0", path = "../net/poll" } 26 | tempfile = "3" 27 | quickcheck = { version = "1", default-features = false } 28 | quickcheck_macros = "1" 29 | -------------------------------------------------------------------------------- /net/src/error.rs: -------------------------------------------------------------------------------- 1 | //! Peer-to-peer protocol errors. 2 | 3 | use std::fmt::Debug; 4 | use std::io; 5 | 6 | use crossbeam_channel as crossbeam; 7 | 8 | use thiserror::Error; 9 | 10 | /// An error occuring in peer-to-peer networking code. 11 | #[derive(Error, Debug)] 12 | pub enum Error { 13 | /// An I/O error. 14 | #[error("i/o error: {0}")] 15 | Io(#[from] io::Error), 16 | 17 | /// A channel send or receive error. 18 | #[error("channel error: {0}")] 19 | Channel(Box), 20 | } 21 | 22 | impl From> for Error { 23 | fn from(err: crossbeam::SendError) -> Self { 24 | Self::Channel(Box::new(err)) 25 | } 26 | } 27 | 28 | impl From for Error { 29 | fn from(err: crossbeam::RecvError) -> Self { 30 | Self::Channel(Box::new(err)) 31 | } 32 | } 33 | 34 | impl From for Error { 35 | fn from(err: crossbeam::RecvTimeoutError) -> Self { 36 | Self::Channel(Box::new(err)) 37 | } 38 | } 39 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2020, 2021 Alexis Sellier 2 | 3 | Permission is hereby granted, free of charge, to any person obtaining a copy of 4 | this software and associated documentation files (the "Software"), to deal in 5 | the Software without restriction, including without limitation the rights to 6 | use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of 7 | the Software, and to permit persons to whom the Software is furnished to do so, 8 | subject to the following conditions: 9 | 10 | The above copyright notice and this permission notice shall be included in all 11 | copies or substantial portions of the Software. 12 | 13 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS 15 | FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR 16 | COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER 17 | IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 18 | CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 19 | -------------------------------------------------------------------------------- /wallet/src/logger.rs: -------------------------------------------------------------------------------- 1 | //! Logging module. 2 | use std::{io, time::SystemTime}; 3 | 4 | use chrono::prelude::*; 5 | use log::{Level, Log, Metadata, Record, SetLoggerError}; 6 | 7 | struct Logger { 8 | level: Level, 9 | stream: io::Stderr, 10 | } 11 | 12 | impl Log for Logger { 13 | fn enabled(&self, metadata: &Metadata) -> bool { 14 | metadata.level() <= self.level 15 | } 16 | 17 | fn log(&self, record: &Record) { 18 | if self.enabled(record.metadata()) { 19 | write(record, &self.stream); 20 | 21 | fn write(record: &log::Record, mut stream: impl io::Write) { 22 | let now = 23 | DateTime::from(SystemTime::now()).to_rfc3339_opts(SecondsFormat::Millis, true); 24 | writeln!(stream, "{} {}", now, record.args()).expect("write shouldn't fail"); 25 | } 26 | } 27 | } 28 | 29 | fn flush(&self) {} 30 | } 31 | 32 | /// Initialize a new logger. 33 | pub fn init(level: Level) -> Result<(), SetLoggerError> { 34 | let logger = Logger { 35 | level, 36 | stream: io::stderr(), 37 | }; 38 | 39 | log::set_boxed_logger(Box::new(logger))?; 40 | log::set_max_level(level.to_level_filter()); 41 | 42 | Ok(()) 43 | } 44 | -------------------------------------------------------------------------------- /common/src/block/genesis.rs: -------------------------------------------------------------------------------- 1 | //! Bitcoin genesis hashes. 2 | 3 | #[rustfmt::skip] 4 | /// Bitcoin mainnet genesis hash. 5 | pub const MAINNET: &[u8; 32] = &[ 6 | 0x6f, 0xe2, 0x8c, 0x0a, 0xb6, 0xf1, 0xb3, 0x72, 7 | 0xc1, 0xa6, 0xa2, 0x46, 0xae, 0x63, 0xf7, 0x4f, 8 | 0x93, 0x1e, 0x83, 0x65, 0xe1, 0x5a, 0x08, 0x9c, 9 | 0x68, 0xd6, 0x19, 0x00, 0x00, 0x00, 0x00, 0x00, 10 | ]; 11 | 12 | #[rustfmt::skip] 13 | /// Bitcoin testnet genesis hash. 14 | pub const TESTNET: &[u8; 32] = &[ 15 | 0x43, 0x49, 0x7f, 0xd7, 0xf8, 0x26, 0x95, 0x71, 16 | 0x08, 0xf4, 0xa3, 0x0f, 0xd9, 0xce, 0xc3, 0xae, 17 | 0xba, 0x79, 0x97, 0x20, 0x84, 0xe9, 0x0e, 0xad, 18 | 0x01, 0xea, 0x33, 0x09, 0x00, 0x00, 0x00, 0x00, 19 | ]; 20 | 21 | #[rustfmt::skip] 22 | /// Bitcoin regtest genesis hash. 23 | pub const REGTEST: &[u8; 32] = &[ 24 | 0x06, 0x22, 0x6e, 0x46, 0x11, 0x1a, 0x0b, 0x59, 25 | 0xca, 0xaf, 0x12, 0x60, 0x43, 0xeb, 0x5b, 0xbf, 26 | 0x28, 0xc3, 0x4f, 0x3a, 0x5e, 0x33, 0x2a, 0x1f, 27 | 0xc7, 0xb2, 0xb7, 0x3c, 0xf1, 0x88, 0x91, 0x0f, 28 | ]; 29 | 30 | #[rustfmt::skip] 31 | /// Bitcoin signet genesis hash. 32 | pub const SIGNET: &[u8; 32] = &[ 33 | 0xf6, 0x1e, 0xee, 0x3b, 0x63, 0xa3, 0x80, 0xa4, 34 | 0x77, 0xa0, 0x63, 0xaf, 0x32, 0xb2, 0xbb, 0xc9, 35 | 0x7c, 0x9f, 0xf9, 0xf0, 0x1f, 0x2c, 0x42, 0x25, 36 | 0xe9, 0x73, 0x98, 0x81, 0x08, 0x00, 0x00, 0x00 37 | ]; 38 | -------------------------------------------------------------------------------- /node/src/lib.rs: -------------------------------------------------------------------------------- 1 | //! Stand-alone light-client daemon. Runs the light-client as a background process. 2 | #![deny(missing_docs, unsafe_code)] 3 | 4 | use std::net; 5 | use std::path::PathBuf; 6 | 7 | pub use nakamoto_client::{Client, Config, Error, Network}; 8 | pub use nakamoto_client::{Domain, LoadingHandler}; 9 | 10 | pub mod logger; 11 | 12 | /// The network reactor we're going to use. 13 | type Reactor = nakamoto_net_poll::Reactor; 14 | 15 | /// Run the light-client. Takes an initial list of peers to connect to, a list of listen addresses, 16 | /// the client root and the Bitcoin network to connect to. 17 | pub fn run( 18 | connect: &[net::SocketAddr], 19 | listen: &[net::SocketAddr], 20 | root: Option, 21 | domains: &[Domain], 22 | network: Network, 23 | ) -> Result<(), Error> { 24 | let mut cfg = Config { 25 | network, 26 | connect: connect.to_vec(), 27 | domains: domains.to_vec(), 28 | listen: if listen.is_empty() { 29 | vec![([0, 0, 0, 0], 0).into()] 30 | } else { 31 | listen.to_vec() 32 | }, 33 | ..Config::default() 34 | }; 35 | if let Some(path) = root { 36 | cfg.root = path; 37 | } 38 | if !connect.is_empty() { 39 | cfg.limits.max_outbound_peers = connect.len(); 40 | } 41 | 42 | Client::::new()?.run(cfg) 43 | } 44 | -------------------------------------------------------------------------------- /wallet/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "nakamoto-wallet" 3 | description = "Very basic command-line Bitcoin wallet using nakamoto" 4 | homepage = "https://cloudhead.io/nakamoto/" 5 | documentation = "https://docs.rs/nakamoto-wallet" 6 | repository = "https://github.com/cloudhead/nakamoto" 7 | version = "0.4.0" 8 | authors = ["Alexis Sellier "] 9 | edition = "2021" 10 | license = "MIT" 11 | 12 | [dependencies] 13 | nakamoto-client = { version = "0.4.0", path = "../client" } 14 | nakamoto-net-poll = { version = "0.4.0", path = "../net/poll" } 15 | nakamoto-p2p = { version = "0.4.0", path = "../p2p" } 16 | nakamoto-common = { version = "0.4.0", path = "../common" } 17 | log = { version = "0.4", features = ["std"] } 18 | argh = { version = "0.1.3" } 19 | crossbeam-channel = { version = "0.5.6" } 20 | chrono = { version = "0.4", features = ["std"], default-features = false } 21 | coldcard = { version = "0.5", default-features = false, features = ["linux-static-libusb"] } 22 | thiserror = { version = "1.0" } 23 | sqlite = { version = "0.28" } 24 | sqlite3-sys = { version = "0.14", default-features = false } 25 | sqlite3-src = { version = "0.4.0", features = ["bundled"] } 26 | termion = { version = "2" } 27 | signal-hook = { version = "0.3.14", features = ["iterator"], default-features = false } 28 | 29 | [dev-dependencies] 30 | nakamoto-test = { version = "0.4.0", path = "../test" } 31 | -------------------------------------------------------------------------------- /p2p/src/lib.rs: -------------------------------------------------------------------------------- 1 | //! Nakamoto's peer-to-peer library. 2 | //! 3 | //! The `p2p` crate implements the core protocol state-machine. It can be found under the 4 | //! [fsm] module. 5 | //! 6 | //! Nakamoto's implementation of the peer-to-peer protocol(s) is *I/O-free*. The 7 | //! core logic is implemented as a state machine with *inputs* and *outputs* and a 8 | //! *step* function that does not perform any network I/O. 9 | //! 10 | //! The reason for this is to keep the protocol code easy to read and simple to 11 | //! test. Not having I/O minimizes the possible error states and error-handling 12 | //! code in the protocol, and allows for a fully *deterministic* protocol. This 13 | //! means failing tests can always be reproduced and 100% test coverage is within 14 | //! reach. 15 | //! 16 | //! To achieve this, handling of network I/O is cleanly separated into a network 17 | //! *reactor*. See the `nakamoto-net-poll` crate for an example of a reactor. 18 | //! 19 | #![allow(clippy::type_complexity)] 20 | #![allow(clippy::new_without_default)] 21 | #![allow(clippy::collapsible_if)] 22 | #![allow(clippy::single_match)] 23 | #![allow(clippy::comparison_chain)] 24 | #![allow(clippy::inconsistent_struct_constructor)] 25 | #![allow(clippy::too_many_arguments)] 26 | #![deny(missing_docs, unsafe_code)] 27 | pub mod fsm; 28 | pub mod stream; 29 | 30 | pub use fsm::{Command, Config, DisconnectReason, Event, Io, Link, PeerId, StateMachine}; 31 | pub use nakamoto_net as net; 32 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "nakamoto" 3 | description = "Privacy-preserving Bitcoin light-client" 4 | documentation = "https://docs.rs/nakamoto" 5 | homepage = "https://cloudhead.io/nakamoto/" 6 | repository = "https://github.com/cloudhead/nakamoto" 7 | readme = "README.md" 8 | license = "MIT" 9 | version = "0.4.0" 10 | authors = ["Alexis Sellier "] 11 | edition = "2021" 12 | 13 | [workspace] 14 | members = [ 15 | "common", 16 | "p2p", 17 | "chain", 18 | "node", 19 | "test", 20 | "client", 21 | "wallet", 22 | "net", 23 | ] 24 | default-members = [ 25 | "common", 26 | "p2p", 27 | "chain", 28 | "test", 29 | "client", 30 | "net", 31 | ] 32 | 33 | [features] 34 | default = [ 35 | "nakamoto-client", 36 | "nakamoto-chain", 37 | "nakamoto-p2p", 38 | "nakamoto-common", 39 | "nakamoto-net", 40 | "nakamoto-net-poll" 41 | ] 42 | 43 | [dependencies] 44 | nakamoto-common = { version = "0.4.0", path = "./common", optional = true } 45 | nakamoto-node = { version = "0.4.0", path = "./node", optional = true } 46 | nakamoto-client = { version = "0.4.0", path = "./client", optional = true } 47 | nakamoto-chain = { version = "0.4.0", path = "./chain", optional = true } 48 | nakamoto-p2p = { version = "0.4.0", path = "./p2p", optional = true } 49 | nakamoto-test = { version = "0.4.0", path = "./test", optional = true } 50 | nakamoto-wallet = { version = "0.4.0", path = "./wallet", optional = true } 51 | nakamoto-net = { version = "0.4.0", path = "./net", optional = true } 52 | nakamoto-net-poll = { version = "0.4.0", path = "./net/poll", optional = true } 53 | -------------------------------------------------------------------------------- /wallet/src/input.rs: -------------------------------------------------------------------------------- 1 | use std::io; 2 | 3 | use crossbeam_channel as chan; 4 | use termion::event::{Event, Key}; 5 | use termion::input::TermRead; 6 | use thiserror::Error; 7 | 8 | /// An input error. 9 | #[derive(Error, Debug)] 10 | pub enum Error { 11 | #[error("i/o error: {0}")] 12 | Io(#[from] io::Error), 13 | #[error("channel error: {0}")] 14 | EventChannel(#[from] chan::SendError), 15 | #[error("channel error: {0}")] 16 | SignalChannel(#[from] chan::SendError), 17 | } 18 | 19 | #[derive(Copy, Clone, Debug)] 20 | pub enum Signal { 21 | WindowResized, 22 | Interrupted, 23 | } 24 | 25 | pub fn run(channel: chan::Sender, exit: chan::Receiver<()>) -> Result<(), Error> { 26 | let stdin = io::stdin().lock(); 27 | 28 | for event in stdin.events() { 29 | let event = event?; 30 | 31 | if exit.try_recv().is_ok() { 32 | return Ok(()); 33 | } 34 | if let Event::Key(Key::Char('q')) | Event::Key(Key::Esc) = event { 35 | return Ok(()); 36 | } 37 | channel.send(event)?; 38 | } 39 | Ok(()) 40 | } 41 | 42 | pub fn signals(channel: chan::Sender) -> Result<(), Error> { 43 | use signal_hook::consts::signal::*; 44 | 45 | let mut signals = signal_hook::iterator::Signals::new([SIGWINCH, SIGINT])?; 46 | for signal in signals.forever() { 47 | match signal { 48 | SIGWINCH => channel.send(Signal::WindowResized)?, 49 | SIGINT => channel.send(Signal::Interrupted)?, 50 | _ => {} 51 | } 52 | } 53 | Ok(()) 54 | } 55 | -------------------------------------------------------------------------------- /client/src/error.rs: -------------------------------------------------------------------------------- 1 | //! Node error module. 2 | use std::io; 3 | 4 | use crossbeam_channel as chan; 5 | use thiserror::Error; 6 | 7 | use nakamoto_chain as chain; 8 | use nakamoto_common as common; 9 | use nakamoto_p2p as p2p; 10 | 11 | use p2p::fsm::Command; 12 | 13 | /// A client error. 14 | #[derive(Error, Debug)] 15 | pub enum Error { 16 | /// An error occuring from a client handle. 17 | #[error(transparent)] 18 | Handle(#[from] crate::handle::Error), 19 | /// An error coming from the networking sub-system. 20 | #[error(transparent)] 21 | Net(#[from] nakamoto_net::error::Error), 22 | /// A chain-related error. 23 | #[error(transparent)] 24 | Chain(#[from] common::block::tree::Error), 25 | /// An I/O error. 26 | #[error(transparent)] 27 | Io(#[from] io::Error), 28 | /// An error coming from the block store. 29 | #[error(transparent)] 30 | BlockStore(#[from] common::block::store::Error), 31 | /// An error coming from the filter store. 32 | #[error(transparent)] 33 | FilterStore(#[from] chain::filter::store::Error), 34 | /// An error coming from the peer store. 35 | #[error("error loading peers: {0}")] 36 | PeerStore(io::Error), 37 | /// A communication channel error. 38 | #[error("command channel disconnected")] 39 | Channel, 40 | } 41 | 42 | impl From> for Error { 43 | fn from(_: chan::SendError) -> Self { 44 | Self::Channel 45 | } 46 | } 47 | 48 | impl From for Error { 49 | fn from(_: chan::RecvError) -> Self { 50 | Self::Channel 51 | } 52 | } 53 | -------------------------------------------------------------------------------- /common/src/block/checkpoints.rs: -------------------------------------------------------------------------------- 1 | //! Checkpoints used to validate blocks at certain heights. 2 | 3 | #[rustfmt::skip] 4 | /// Mainnet checkpoints. 5 | pub const MAINNET: &[(u64, &str)] = &[ 6 | (11111, "0000000069e244f73d78e8fd29ba2fd2ed618bd6fa2ee92559f542fdb26e7c1d"), 7 | (33333, "000000002dd5588a74784eaa7ab0507a18ad16a236e7b1ce69f00d7ddfb5d0a6"), 8 | (74000, "0000000000573993a3c9e41ce34471c079dcf5f52a0e824a81e7f953b8661a20"), 9 | (105000, "00000000000291ce28027faea320c8d2b054b2e0fe44a773f3eefb151d6bdc97"), 10 | (134444, "00000000000005b12ffd4cd315cd34ffd4a594f430ac814c91184a0d42d2b0fe"), 11 | (168000, "000000000000099e61ea72015e79632f216fe6cb33d7899acb35b75c8303b763"), 12 | (193000, "000000000000059f452a5f7340de6682a977387c17010ff6e6c3bd83ca8b1317"), 13 | (210000, "000000000000048b95347e83192f69cf0366076336c639f9b7228e9ba171342e"), 14 | (216116, "00000000000001b4f4b433e81ee46494af945cf96014816a4e2370f11b23df4e"), 15 | (225430, "00000000000001c108384350f74090433e7fcf79a606b8e797f065b130575932"), 16 | (250000, "000000000000003887df1f29024b06fc2200b55f8af8f35453d7be294df2d214"), 17 | (279000, "0000000000000001ae8c72a0b0c301f67e3afca10e819efa9041e458e9bd7e40"), 18 | (295000, "00000000000000004d9b4ef50f0f9d686fd69db2e03af35a100370c64632a983"), 19 | ]; 20 | 21 | /// Testnet checkpoints. 22 | #[rustfmt::skip] 23 | pub const TESTNET: &[(u64, &str)] = &[ 24 | (546, "000000002a936ca763904c3c35fce2f3556c559c0214345d31b1bcebf76acb70"), 25 | ]; 26 | 27 | /// Regtest checkpoints. 28 | pub const REGTEST: &[(u64, &str)] = &[]; 29 | 30 | /// Signet checkpoints. 31 | pub const SIGNET: &[(u64, &str)] = &[]; 32 | -------------------------------------------------------------------------------- /net/poll/src/socket.rs: -------------------------------------------------------------------------------- 1 | //! Peer-to-peer socket abstraction. 2 | use std::fmt::Debug; 3 | use std::io::{self, Read, Write}; 4 | use std::net; 5 | 6 | use nakamoto_net::Link; 7 | 8 | use crate::fallible; 9 | 10 | /// Peer-to-peer socket abstraction. 11 | #[derive(Debug)] 12 | pub struct Socket { 13 | pub address: net::SocketAddr, 14 | pub link: Link, 15 | 16 | buffer: Vec, 17 | raw: R, 18 | } 19 | 20 | impl Socket { 21 | /// Get socket local address. 22 | pub fn local_address(&self) -> io::Result { 23 | self.raw.local_addr() 24 | } 25 | 26 | /// Disconnect socket. 27 | pub fn disconnect(&self) -> io::Result<()> { 28 | self.raw.shutdown(net::Shutdown::Both) 29 | } 30 | } 31 | 32 | impl Socket { 33 | /// Create a new socket from a `io::Read` and an address pair. 34 | pub fn from(raw: R, address: net::SocketAddr, link: Link) -> Self { 35 | Self { 36 | raw, 37 | link, 38 | address, 39 | buffer: Vec::with_capacity(1024), 40 | } 41 | } 42 | 43 | pub fn read(&mut self, buf: &mut [u8]) -> Result { 44 | self.raw.read(buf) 45 | } 46 | 47 | pub fn push(&mut self, bytes: &[u8]) { 48 | self.buffer.extend_from_slice(bytes); 49 | } 50 | 51 | pub fn flush(&mut self) -> io::Result<()> { 52 | fallible! { io::Error::from(io::ErrorKind::Other) }; 53 | 54 | while !self.buffer.is_empty() { 55 | match self.raw.write(&self.buffer) { 56 | Err(e) => return Err(e), 57 | 58 | Ok(0) => return Err(io::Error::from(io::ErrorKind::WriteZero)), 59 | Ok(n) => { 60 | self.buffer.drain(..n); 61 | } 62 | } 63 | } 64 | self.raw.flush() 65 | } 66 | } 67 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # CONTRIBUTING 2 | 3 | Contributions are very welcome. When contributing code, please follow these 4 | simple guidelines. 5 | 6 | * Make sure you run `rustfmt` on your code. Also ensure all trailing whitespace 7 | is trimmed. 8 | * Run the tests with `cargo test --all`. 9 | * Don't add any new dependencies. 10 | * Write properly formatted git commits (see below). 11 | 12 | Git commits 13 | ----------- 14 | A properly formed git commit subject line should always be able to complete the 15 | following sentence: 16 | 17 | If applied, this commit will _____ 18 | 19 | For example, the following message is well formed: 20 | 21 | Add support for .gif files 22 | 23 | In addition, it should be capitalized and *must not* include a period. 24 | 25 | When it comes to formatting, here's a model git commit message[1]: 26 | 27 | Capitalized, short (50 chars or less) summary 28 | 29 | More detailed explanatory text, if necessary. Wrap it to about 72 30 | characters or so. In some contexts, the first line is treated as the 31 | subject of an email and the rest of the text as the body. The blank 32 | line separating the summary from the body is critical (unless you omit 33 | the body entirely); tools like rebase can get confused if you run the 34 | two together. 35 | 36 | Write your commit message in the imperative: "Fix bug" and not "Fixed bug" 37 | or "Fixes bug." This convention matches up with commit messages generated 38 | by commands like git merge and git revert. 39 | 40 | Further paragraphs come after blank lines. 41 | 42 | - Bullet points are okay, too. 43 | 44 | - Typically a hyphen or asterisk is used for the bullet, followed by a 45 | single space, with blank lines in between, but conventions vary here. 46 | 47 | - Use a hanging indent. 48 | 49 | --- 50 | 51 | [1]: http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html 52 | -------------------------------------------------------------------------------- /wallet/src/main.rs: -------------------------------------------------------------------------------- 1 | use std::net; 2 | use std::path::PathBuf; 3 | 4 | use argh::FromArgs; 5 | 6 | use nakamoto_common::bitcoin::util::bip32::DerivationPath; 7 | use nakamoto_common::bitcoin::Address; 8 | use nakamoto_common::block::Height; 9 | use nakamoto_common::network::Network; 10 | use nakamoto_wallet::logger; 11 | 12 | /// A Bitcoin wallet. 13 | #[derive(FromArgs)] 14 | pub struct Options { 15 | /// watch the following addresses 16 | #[argh(option)] 17 | pub addresses: Vec
, 18 | /// wallet birth height, from which to start scanning 19 | #[argh(option)] 20 | pub birth_height: Height, 21 | /// network to connect to, eg. `testnet` 22 | #[argh(option, default = "Network::default()")] 23 | pub network: Network, 24 | /// connect to this node 25 | #[argh(option)] 26 | pub connect: Vec, 27 | /// wallet file 28 | #[argh(option)] 29 | pub wallet: PathBuf, 30 | /// wallet derivation path, eg. m/84'/0'/0'/0. 31 | #[argh(option)] 32 | pub hd_path: DerivationPath, 33 | /// offline mode; doesn't connect to the network 34 | #[argh(switch)] 35 | pub offline: bool, 36 | /// enable debug logging 37 | #[argh(switch)] 38 | pub debug: bool, 39 | } 40 | 41 | impl Options { 42 | pub fn from_env() -> Self { 43 | argh::from_env() 44 | } 45 | } 46 | 47 | fn main() { 48 | let opts = Options::from_env(); 49 | 50 | let level = if opts.debug { 51 | log::Level::Debug 52 | } else { 53 | log::Level::Error 54 | }; 55 | logger::init(level).expect("initializing logger for the first time"); 56 | 57 | if let Err(err) = nakamoto_wallet::run( 58 | &opts.wallet, 59 | opts.birth_height, 60 | opts.hd_path, 61 | opts.network, 62 | opts.connect, 63 | opts.offline, 64 | ) { 65 | log::error!("Fatal: {}", err); 66 | std::process::exit(1); 67 | } 68 | } 69 | -------------------------------------------------------------------------------- /wallet/src/wallet/ui/table.rs: -------------------------------------------------------------------------------- 1 | use std::io; 2 | 3 | use termion::{clear, cursor}; 4 | 5 | #[derive(Debug)] 6 | pub struct Table { 7 | rows: Vec<[String; N]>, 8 | widths: [usize; N], 9 | } 10 | 11 | impl Default for Table { 12 | fn default() -> Self { 13 | Self { 14 | rows: Vec::new(), 15 | widths: [0; N], 16 | } 17 | } 18 | } 19 | 20 | impl Table { 21 | pub fn push(&mut self, row: [String; N]) { 22 | for (i, cell) in row.iter().enumerate() { 23 | self.widths[i] = self.widths[i].max(cell.chars().count()); 24 | } 25 | self.rows.push(row); 26 | } 27 | 28 | pub fn render(self, width: usize, start: u16, term: &mut W) -> io::Result<()> { 29 | use std::fmt::Write; 30 | 31 | for (i, row) in self.rows.iter().enumerate() { 32 | let mut output = String::new(); 33 | let cells = row.len(); 34 | 35 | for (i, cell) in row.iter().enumerate() { 36 | if i == cells - 1 { 37 | write!(output, "{}", cell).ok(); 38 | } else { 39 | write!(output, "{:width$} ", cell, width = self.widths[i]).ok(); 40 | } 41 | } 42 | if output.chars().count() <= width { 43 | write!( 44 | term, 45 | "{}{}{}", 46 | cursor::Goto(1, i as u16 + start), 47 | clear::CurrentLine, 48 | output 49 | )?; 50 | } else { 51 | write!( 52 | term, 53 | "{}{}{:.width$}…", 54 | cursor::Goto(1, i as u16 + start), 55 | clear::CurrentLine, 56 | &output, 57 | width = width - 1 58 | )?; 59 | } 60 | } 61 | Ok(()) 62 | } 63 | } 64 | -------------------------------------------------------------------------------- /node/src/logger.rs: -------------------------------------------------------------------------------- 1 | //! Logging module. 2 | use std::{io, time::SystemTime}; 3 | 4 | use chrono::prelude::*; 5 | use colored::*; 6 | use log::{Level, Log, Metadata, Record, SetLoggerError}; 7 | 8 | struct Logger { 9 | level: Level, 10 | } 11 | 12 | impl Log for Logger { 13 | fn enabled(&self, metadata: &Metadata) -> bool { 14 | metadata.level() <= self.level 15 | } 16 | 17 | fn log(&self, record: &Record) { 18 | if self.enabled(record.metadata()) { 19 | let target = record.target(); 20 | 21 | if record.level() == Level::Error { 22 | write(record, target, io::stderr()); 23 | } else { 24 | write(record, target, io::stdout()); 25 | } 26 | 27 | fn write(record: &log::Record, target: &str, mut stream: impl io::Write) { 28 | let message = format!("{} {} {}", record.level(), target.bold(), record.args()); 29 | let message = match record.level() { 30 | Level::Error => message.red(), 31 | Level::Warn => message.yellow(), 32 | Level::Info => message.normal(), 33 | Level::Debug => message.dimmed(), 34 | Level::Trace => message.white().dimmed(), 35 | }; 36 | 37 | writeln!( 38 | stream, 39 | "{} {}", 40 | DateTime::from(SystemTime::now()) 41 | .to_rfc3339_opts(SecondsFormat::Millis, true) 42 | .white(), 43 | message, 44 | ) 45 | .expect("write shouldn't fail"); 46 | } 47 | } 48 | } 49 | 50 | fn flush(&self) {} 51 | } 52 | 53 | /// Initialize a new logger. 54 | pub fn init(level: Level) -> Result<(), SetLoggerError> { 55 | let logger = Logger { level }; 56 | 57 | log::set_boxed_logger(Box::new(logger))?; 58 | log::set_max_level(level.to_level_filter()); 59 | 60 | Ok(()) 61 | } 62 | -------------------------------------------------------------------------------- /.github/workflows/actions.yml: -------------------------------------------------------------------------------- 1 | name: Cargo 2 | 3 | on: 4 | push: 5 | branches: [master] 6 | pull_request: 7 | branches: [master] 8 | 9 | jobs: 10 | build: 11 | name: Build & Test 12 | strategy: 13 | matrix: 14 | os: ['ubuntu-latest', 'macos-latest'] 15 | runs-on: ${{ matrix.os }} 16 | steps: 17 | - name: Dependencies 18 | if: matrix.os == 'ubuntu-latest' 19 | run: sudo apt-get install -y libhidapi-dev libusb-1.0-0-dev 20 | - uses: actions/checkout@v2 21 | - name: Build 22 | run: cargo build --verbose 23 | env: 24 | RUSTFLAGS: -D warnings 25 | RUST_BACKTRACE: 1 26 | - name: Run tests 27 | run: cargo test --all --verbose 28 | 29 | docs: 30 | name: Docs 31 | runs-on: ubuntu-latest 32 | steps: 33 | - name: Dependencies 34 | run: sudo apt-get install -y libhidapi-dev libusb-1.0-0-dev 35 | - uses: actions/checkout@v2 36 | - name: Docs 37 | run: cargo doc --all 38 | env: 39 | RUSTDOCFLAGS: -D warnings 40 | 41 | lint: 42 | name: Lint 43 | runs-on: ubuntu-latest 44 | steps: 45 | - name: Dependencies 46 | run: sudo apt-get install -y libhidapi-dev libusb-1.0-0-dev 47 | - uses: actions/checkout@v2 48 | - uses: actions-rs/toolchain@v1 49 | with: 50 | toolchain: "1.73" 51 | profile: minimal 52 | components: clippy, rustfmt 53 | override: true 54 | - name: Cache cargo registry 55 | uses: actions/cache@v1 56 | with: 57 | path: ~/.cargo/registry 58 | key: ${{ runner.os }}-cargo-registry-${{ hashFiles('**/Cargo.lock') }} 59 | - name: Run clippy 60 | uses: actions-rs/cargo@v1 61 | with: 62 | command: clippy 63 | args: --all --tests 64 | env: 65 | RUSTFLAGS: -D warnings 66 | - name: Check formating 67 | uses: actions-rs/cargo@v1 68 | with: 69 | command: fmt 70 | args: --all -- --check 71 | -------------------------------------------------------------------------------- /common/src/block.rs: -------------------------------------------------------------------------------- 1 | //! Block-related types and functions. 2 | pub mod checkpoints; 3 | pub mod filter; 4 | pub mod genesis; 5 | pub mod iter; 6 | pub mod store; 7 | pub mod time; 8 | pub mod tree; 9 | 10 | pub use bitcoin::blockdata::block::{Block, BlockHeader}; 11 | pub use bitcoin::blockdata::transaction::Transaction; 12 | pub use bitcoin::hash_types::BlockHash; 13 | 14 | /// Difficulty target of a block. 15 | pub type Target = bitcoin::util::uint::Uint256; 16 | 17 | /// Block work. 18 | pub type Work = bitcoin::util::uint::Uint256; 19 | 20 | /// Compact difficulty bits (target) of a block. 21 | pub type Bits = u32; 22 | 23 | /// Height of a block. 24 | pub type Height = u64; 25 | 26 | /// Block time (seconds since Epoch). 27 | pub type BlockTime = u32; 28 | 29 | /// Get the locator indexes starting from a given height, and going backwards, exponentially 30 | /// backing off. 31 | /// 32 | /// ``` 33 | /// use nakamoto_common::block; 34 | /// 35 | /// assert_eq!(block::locators_indexes(0), vec![0]); 36 | /// assert_eq!(block::locators_indexes(8), vec![8, 7, 6, 5, 4, 3, 2, 1, 0]); 37 | /// assert_eq!(block::locators_indexes(99), vec![ 38 | /// 99, 98, 97, 96, 95, 94, 93, 92, 91, 89, 85, 77, 61, 29, 0 39 | /// ]); 40 | /// ``` 41 | pub fn locators_indexes(mut from: Height) -> Vec { 42 | let mut indexes = Vec::new(); 43 | let mut step = 1; 44 | 45 | while from > 0 { 46 | // For the first 8 blocks, don't skip any heights. 47 | if indexes.len() >= 8 { 48 | step *= 2; 49 | } 50 | indexes.push(from as Height); 51 | from = from.saturating_sub(step); 52 | } 53 | // Always include genesis. 54 | indexes.push(0); 55 | indexes 56 | } 57 | 58 | /// Get the proof-of-work limit for the network, in bits. 59 | pub fn pow_limit_bits(network: &bitcoin::Network) -> Bits { 60 | match network { 61 | bitcoin::Network::Bitcoin => 0x1d00ffff, 62 | bitcoin::Network::Testnet => 0x1d00ffff, 63 | bitcoin::Network::Regtest => 0x207fffff, 64 | bitcoin::Network::Signet => 0x1e0377ae, 65 | } 66 | } 67 | -------------------------------------------------------------------------------- /net/poll/src/lib.rs: -------------------------------------------------------------------------------- 1 | //! I/O reactor that drives the protocol state machine. 2 | //! 3 | //! The reactor translates network events into protocol events. This has the 4 | //! added benefit that it's trivial to swap nakamoto's networking code with a 5 | //! different implementation, as the code is fully self-contained. 6 | //! 7 | //! To illustrate the above, lets trace the behavior of the system when a `ping` 8 | //! message is received via a peer connection to the client: 9 | //! 10 | //! 1. The `Reactor` reads from the socket and decodes a `NetworkMessage::Ping` 11 | //! message. 12 | //! 2. The `Reactor` wraps this message into a protocol input `Input::Received(addr, 13 | //! NetworkMessage::Ping)`, where `addr` is the remote address of the socket on 14 | //! which it received this message. 15 | //! 3. The `Reactor` calls `Protocol::step(input, time)`, where `input` is the above 16 | //! input, and `time` is the current local time. 17 | //! 4. The `Protocol` forwards this message to the `PingManager`, which constructs 18 | //! a new output `Out::Message(addr, NetworkMessage::Pong)`, and forwards it 19 | //! upstream, to the reactor. 20 | //! 5. The `Reactor` processes the output, encodes the raw message and writes it to 21 | //! the socket corresponding to the `addr` address, effectively sending a `pong` 22 | //! message back to the original sender. 23 | //! 24 | //! Though simplified, the above steps provide a good mental model of how the 25 | //! reactor and protocol interplay to handle network events. 26 | //! 27 | #![allow(clippy::new_without_default)] 28 | #![allow(clippy::inconsistent_struct_constructor)] 29 | 30 | #[cfg(unix)] 31 | pub mod reactor; 32 | pub mod socket; 33 | pub mod time; 34 | 35 | pub use reactor::{Reactor, Waker}; 36 | 37 | #[cfg(test)] 38 | mod fallible; 39 | 40 | /// Makes a function randomly fail with the given error. 41 | #[macro_export] 42 | macro_rules! fallible { 43 | ($err:expr) => { 44 | #[cfg(test)] 45 | { 46 | let fallible = fallible::FALLIBLE.lock().unwrap(); 47 | 48 | if let Some(p) = *fallible { 49 | let r = fastrand::f64(); 50 | 51 | if r <= p { 52 | return Err($err.into()); 53 | } 54 | } 55 | } 56 | }; 57 | } 58 | -------------------------------------------------------------------------------- /common/src/block/iter.rs: -------------------------------------------------------------------------------- 1 | //! Double-ended iterator over a `NonEmpty` blockchain. 2 | use nonempty::NonEmpty; 3 | 4 | use super::Height; 5 | 6 | /// An iterator over a chain. 7 | #[derive(Debug)] 8 | pub struct Iter<'a, T> { 9 | chain: &'a NonEmpty, 10 | next: usize, 11 | next_back: usize, 12 | } 13 | 14 | impl<'a, T> Iter<'a, T> { 15 | /// Create a new iterator. 16 | pub fn new(chain: &'a NonEmpty) -> Self { 17 | Self { 18 | chain, 19 | next: 0, 20 | next_back: chain.len(), 21 | } 22 | } 23 | } 24 | 25 | impl<'a, T> Iterator for Iter<'a, T> { 26 | type Item = (Height, &'a T); 27 | 28 | fn next(&mut self) -> Option { 29 | if self.next == self.next_back { 30 | return None; 31 | } 32 | let height = self.next; 33 | self.next += 1; 34 | 35 | self.chain.get(height).map(|item| (height as Height, item)) 36 | } 37 | } 38 | 39 | impl<'a, T> DoubleEndedIterator for Iter<'a, T> { 40 | ///``` 41 | /// use nonempty::NonEmpty; 42 | /// use nakamoto_common::block::iter::Iter; 43 | /// 44 | /// let chain = NonEmpty::from_vec(vec![1, 2, 3, 4, 5]).unwrap(); 45 | /// let mut iter = Iter::new(&chain); 46 | /// 47 | /// assert_eq!(Some((4, &5)), iter.next_back()); 48 | /// assert_eq!(Some((3, &4)), iter.next_back()); 49 | /// assert_eq!(Some((2, &3)), iter.next_back()); 50 | /// assert_eq!(Some((1, &2)), iter.next_back()); 51 | /// assert_eq!(Some((0, &1)), iter.next_back()); 52 | /// 53 | /// let mut iter = Iter::new(&chain); 54 | /// 55 | /// assert_eq!(Some((4, &5)), iter.next_back()); 56 | /// assert_eq!(Some((0, &1)), iter.next()); 57 | /// assert_eq!(Some((3, &4)), iter.next_back()); 58 | /// assert_eq!(Some((1, &2)), iter.next()); 59 | /// assert_eq!(Some((2, &3)), iter.next_back()); 60 | /// assert_eq!(None, iter.next()); 61 | /// assert_eq!(None, iter.next_back()); 62 | ///``` 63 | fn next_back(&mut self) -> Option { 64 | if self.next_back == self.next { 65 | return None; 66 | } 67 | 68 | self.next_back -= 1; 69 | let height = self.next_back; 70 | 71 | self.chain.get(height).map(|item| (height as Height, item)) 72 | } 73 | } 74 | -------------------------------------------------------------------------------- /p2p/src/fsm/tests/simulations.rs: -------------------------------------------------------------------------------- 1 | //! Simulations. 2 | 3 | #![cfg(test)] 4 | use super::*; 5 | 6 | /// Test that we can find and connect to peers amidst network errors. 7 | pub fn connect_to_peers( 8 | options: Options, 9 | seed: u64, 10 | arbitrary::InRange(target): arbitrary::InRange<1, 6>, 11 | ) -> bool { 12 | logger::init(log::Level::Debug); 13 | 14 | assert!(target > 0); 15 | 16 | let target = target as usize; 17 | let rng = fastrand::Rng::with_seed(seed); 18 | let network = Network::Mainnet; 19 | let headers = BITCOIN_HEADERS.tail.to_vec(); 20 | let time = LocalTime::from_block_time(headers.last().unwrap().time); 21 | 22 | // Alice will try to connect to enough outbound peers. 23 | let mut peers = peer::network(network, target * 2, rng.clone()); 24 | let addrs = peers 25 | .iter() 26 | .map(|p| (p.addr, Source::Dns, p.cfg.services)) 27 | .collect::>(); 28 | let mut alice = Peer::genesis("alice", [48, 48, 48, 48], network, addrs, rng.clone()); 29 | alice.protocol.peermgr.config.target_outbound_peers = target; 30 | alice.init(); 31 | 32 | let mut simulator = Simulation::new(time, rng, options).initialize(&mut peers); 33 | let (mut prev_negotiated, mut prev_connecting, mut prev_connected) = (0, 0, 0); 34 | 35 | while simulator.step(iter::once(&mut alice).chain(&mut peers)) { 36 | let negotiated = alice.protocol.peermgr.negotiated(Link::Outbound).count(); 37 | let connecting = alice.protocol.peermgr.connecting().count(); 38 | let connected = alice.protocol.peermgr.connected().count(); 39 | 40 | if (prev_negotiated, prev_connecting, prev_connected) != (negotiated, connecting, connected) 41 | { 42 | prev_negotiated = negotiated; 43 | prev_connected = connected; 44 | prev_connecting = connecting; 45 | 46 | info!( 47 | target: "test", 48 | "--- negotiated: {}, connecting: {}, connected: {} ---", 49 | negotiated, connecting, connected 50 | ); 51 | } 52 | 53 | if negotiated >= target { 54 | break; 55 | } 56 | if simulator.elapsed() > LocalDuration::from_mins(4) { 57 | return false; 58 | } 59 | } 60 | true 61 | } 62 | -------------------------------------------------------------------------------- /node/src/main.rs: -------------------------------------------------------------------------------- 1 | use std::net; 2 | use std::path::PathBuf; 3 | 4 | use argh::FromArgs; 5 | 6 | use nakamoto_client::Network; 7 | use nakamoto_node::{logger, Domain}; 8 | 9 | #[derive(FromArgs)] 10 | /// A Bitcoin light client. 11 | pub struct Options { 12 | /// connect to the specified peers only 13 | #[argh(option)] 14 | pub connect: Vec, 15 | 16 | /// listen on one of these addresses for peer connections. 17 | #[argh(option)] 18 | pub listen: Vec, 19 | 20 | /// use the bitcoin test network (default: false) 21 | #[argh(switch)] 22 | pub testnet: bool, 23 | 24 | /// use the bitcoin signet network (default: false) 25 | #[argh(switch)] 26 | pub signet: bool, 27 | 28 | /// use the bitcoin regtest network (default: false) 29 | #[argh(switch)] 30 | pub regtest: bool, 31 | 32 | /// only connect to IPv4 addresses (default: false) 33 | #[argh(switch, short = '4')] 34 | pub ipv4: bool, 35 | 36 | /// only connect to IPv6 addresses (default: false) 37 | #[argh(switch, short = '6')] 38 | pub ipv6: bool, 39 | 40 | /// log level (default: info) 41 | #[argh(option, default = "log::Level::Info")] 42 | pub log: log::Level, 43 | 44 | /// root directory for nakamoto files (default: ~) 45 | #[argh(option)] 46 | pub root: Option, 47 | } 48 | 49 | impl Options { 50 | pub fn from_env() -> Self { 51 | argh::from_env() 52 | } 53 | } 54 | 55 | fn main() { 56 | let opts = Options::from_env(); 57 | 58 | logger::init(opts.log).expect("initializing logger for the first time"); 59 | 60 | let network = if opts.testnet { 61 | Network::Testnet 62 | } else if opts.signet { 63 | Network::Signet 64 | } else if opts.regtest { 65 | Network::Regtest 66 | } else { 67 | Network::Mainnet 68 | }; 69 | 70 | let domains = if opts.ipv4 && opts.ipv6 { 71 | vec![Domain::IPV4, Domain::IPV6] 72 | } else if opts.ipv4 { 73 | vec![Domain::IPV4] 74 | } else if opts.ipv6 { 75 | vec![Domain::IPV6] 76 | } else { 77 | vec![Domain::IPV4, Domain::IPV6] 78 | }; 79 | 80 | if let Err(e) = nakamoto_node::run(&opts.connect, &opts.listen, opts.root, &domains, network) { 81 | log::error!(target: "node", "Exiting: {}", e); 82 | std::process::exit(1); 83 | } 84 | } 85 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | //! Nakamoto is a high-assurance Bitcoin light-client library. 2 | //! 3 | //! The project is broken down into the following crates: 4 | //! 5 | //! * [`client`]: the core light-client library 6 | //! * [`p2p`]: the protocol implementation 7 | //! * [`chain`]: the block store and fork selection logic 8 | //! * [`common`]: common functionality used by all crates 9 | //! * [`net`]: networking backend 10 | //! 11 | //! The [`client`] crate is intended to be the entry point for most users of the 12 | //! library, and is a good place to start, to see how everything fits together. 13 | //! 14 | //! ```no_run 15 | //! use std::{net, thread}; 16 | //! 17 | //! use nakamoto::client::{Client, Config, Error}; 18 | //! use nakamoto::client::network::{Network, Services}; 19 | //! use nakamoto::client::traits::Handle as _; 20 | //! 21 | //! /// The network reactor we're going to use. 22 | //! type Reactor = nakamoto::net::poll::Reactor; 23 | //! 24 | //! /// Run the light-client. 25 | //! fn main() -> Result<(), Error> { 26 | //! let cfg = Config::new(Network::Testnet); 27 | //! 28 | //! // Create a client using the above network reactor. 29 | //! let client = Client::::new()?; 30 | //! let handle = client.handle(); 31 | //! 32 | //! // Run the client on a different thread, to not block the main thread. 33 | //! thread::spawn(|| client.run(cfg).unwrap()); 34 | //! 35 | //! // Wait for the client to be connected to a peer. 36 | //! handle.wait_for_peers(1, Services::default())?; 37 | //! 38 | //! // Ask the client to terminate. 39 | //! handle.shutdown()?; 40 | //! 41 | //! Ok(()) 42 | //! } 43 | //! ``` 44 | 45 | #[cfg(feature = "nakamoto-chain")] 46 | pub use nakamoto_chain as chain; 47 | #[cfg(feature = "nakamoto-client")] 48 | pub use nakamoto_client as client; 49 | #[cfg(feature = "nakamoto-common")] 50 | pub use nakamoto_common as common; 51 | #[cfg(feature = "nakamoto-node")] 52 | pub use nakamoto_node as node; 53 | #[cfg(feature = "nakamoto-p2p")] 54 | pub use nakamoto_p2p as p2p; 55 | #[cfg(feature = "nakamoto-wallet")] 56 | pub use nakamoto_wallet as wallet; 57 | 58 | #[cfg(test)] 59 | #[cfg(feature = "nakamoto-test")] 60 | pub use nakamoto_test as test; 61 | 62 | pub mod net { 63 | #[cfg(feature = "nakamoto-net")] 64 | pub use nakamoto_net::*; 65 | #[cfg(feature = "nakamoto-net-poll")] 66 | pub use nakamoto_net_poll as poll; 67 | } 68 | -------------------------------------------------------------------------------- /wallet/src/wallet/hw.rs: -------------------------------------------------------------------------------- 1 | use std::{ops::Range, str::FromStr}; 2 | 3 | use bitcoin::{util::bip32::DerivationPath, Address}; 4 | use nakamoto_common::bitcoin; 5 | 6 | pub use coldcard::protocol::AddressFormat; 7 | 8 | use thiserror::Error; 9 | 10 | #[derive(Error, Debug)] 11 | pub enum Error { 12 | #[error("no hardware device found")] 13 | NoDevice, 14 | #[error("failed to open hardware device")] 15 | Open, 16 | #[error("failed to decode address from device")] 17 | Address(#[from] bitcoin::util::address::Error), 18 | #[error("derivation path error")] 19 | DerivationPath(coldcard::protocol::derivation_path::Error), 20 | #[error("device error: {0}")] 21 | Device(#[from] coldcard::Error), 22 | } 23 | 24 | pub struct Hw { 25 | /// Hardware device. `None` when disconnected, and `Some` when connected. 26 | device: Option, 27 | /// Hardware device address derivation path. 28 | hd_path: DerivationPath, 29 | } 30 | 31 | impl Hw { 32 | pub fn new(hd_path: DerivationPath) -> Self { 33 | Self { 34 | hd_path, 35 | device: None, 36 | } 37 | } 38 | 39 | pub fn connect(&mut self) -> Result<&mut coldcard::Coldcard, Error> { 40 | if let Some(ref mut device) = self.device { 41 | return Ok(device); 42 | } 43 | self.reconnect() 44 | } 45 | 46 | pub fn reconnect(&mut self) -> Result<&mut coldcard::Coldcard, Error> { 47 | // Detect all connected Coldcards. 48 | let serials = coldcard::detect()?; 49 | let (coldcard, _) = serials.first().ok_or(Error::NoDevice)?.open(None)?; 50 | let coldcard = self.device.get_or_insert(coldcard); 51 | 52 | Ok(coldcard) 53 | } 54 | 55 | pub fn request_addresses( 56 | &mut self, 57 | range: impl Into>, 58 | format: AddressFormat, 59 | ) -> Result, Error> { 60 | let hd_path = self.hd_path.clone(); 61 | let device = self.connect()?; 62 | 63 | let range: Range = range.into(); 64 | let mut addrs = Vec::new(); 65 | 66 | for (ix, child) in hd_path 67 | .normal_children() 68 | .enumerate() 69 | .skip(range.start) 70 | .take(range.len()) 71 | { 72 | let child = coldcard::protocol::DerivationPath::new(child.to_string().as_str()) 73 | .map_err(Error::DerivationPath)?; 74 | // TODO: This should be made to return `Address` type. 75 | let addr = device.address(child, format)?; 76 | let addr = Address::from_str(addr.as_str())?; 77 | 78 | log::debug!("Loaded address {addr} from device"); 79 | 80 | addrs.push((ix, addr)); 81 | } 82 | Ok(addrs) 83 | } 84 | } 85 | -------------------------------------------------------------------------------- /wallet/src/wallet/db/types.rs: -------------------------------------------------------------------------------- 1 | use nakamoto_common::bitcoin::Address; 2 | use sqlite as sql; 3 | 4 | use super::Error; 5 | 6 | /// Wraps a type, enabling it to be converted to SQL types. 7 | pub struct Record(pub T); 8 | 9 | impl TryFrom for Record<(A, B)> 10 | where 11 | A: sql::ValueInto, 12 | B: sql::ValueInto, 13 | { 14 | type Error = Error; 15 | 16 | fn try_from(row: sql::Row) -> Result { 17 | let a = row.get(0); 18 | let b = row.get(1); 19 | 20 | Ok(Record((a, b))) 21 | } 22 | } 23 | 24 | impl TryFrom for Record<(A, B, C)> 25 | where 26 | A: sql::ValueInto, 27 | B: sql::ValueInto, 28 | C: sql::ValueInto, 29 | { 30 | type Error = Error; 31 | 32 | fn try_from(row: sql::Row) -> Result { 33 | let a = row.get(0); 34 | let b = row.get(1); 35 | let c = row.get(2); 36 | 37 | Ok(Record((a, b, c))) 38 | } 39 | } 40 | 41 | impl TryFrom for Record<(A, B, C, D)> 42 | where 43 | A: sql::ValueInto, 44 | B: sql::ValueInto, 45 | C: sql::ValueInto, 46 | D: sql::ValueInto, 47 | { 48 | type Error = Error; 49 | 50 | fn try_from(row: sql::Row) -> Result { 51 | let a = row.get(0); 52 | let b = row.get(1); 53 | let c = row.get(2); 54 | let d = row.get(3); 55 | 56 | Ok(Record((a, b, c, d))) 57 | } 58 | } 59 | 60 | /// An address table row. 61 | pub struct AddressRecord { 62 | pub address: Address, 63 | pub index: usize, 64 | pub label: Option, 65 | pub received: u64, 66 | pub used: bool, 67 | } 68 | 69 | impl<'a> TryFrom<&'a sql::Row> for AddressRecord { 70 | type Error = Error; 71 | 72 | fn try_from(row: &'a sql::Row) -> Result { 73 | Ok(Self { 74 | address: row 75 | .get::(0) 76 | .as_str() 77 | .parse() 78 | .map_err(|_| Error::Decoding("address"))?, 79 | index: row.get::(1) as usize, 80 | label: row.get(2), 81 | received: row.get::(3) as u64, 82 | used: row.get::(4) > 0, 83 | }) 84 | } 85 | } 86 | 87 | /// A balance in satoshis. 88 | pub struct Balance(u64); 89 | 90 | impl std::ops::Deref for Balance { 91 | type Target = u64; 92 | 93 | fn deref(&self) -> &Self::Target { 94 | &self.0 95 | } 96 | } 97 | 98 | impl sql::ValueInto for Balance { 99 | fn into(value: &sql::Value) -> Option { 100 | match value { 101 | sql::Value::Integer(i) => Some(Balance(*i as u64)), 102 | _ => None, 103 | } 104 | } 105 | } 106 | -------------------------------------------------------------------------------- /common/src/block/filter.rs: -------------------------------------------------------------------------------- 1 | //! Compact block filter core types and traits. 2 | #![warn(missing_docs)] 3 | 4 | use std::ops::RangeInclusive; 5 | 6 | use bitcoin_hashes::Hash; 7 | use thiserror::Error; 8 | 9 | pub use bitcoin::hash_types::{FilterHash, FilterHeader}; 10 | pub use bitcoin::util::bip158::BlockFilter; 11 | 12 | use super::Height; 13 | use crate::block::store::{self, Genesis}; 14 | use crate::network::Network; 15 | 16 | impl Genesis for FilterHeader { 17 | /// Filter header for the genesis block. 18 | /// 19 | /// ``` 20 | /// use nakamoto_common::block::filter::{FilterHash, FilterHeader}; 21 | /// use nakamoto_common::block::store::Genesis as _; 22 | /// use nakamoto_common::network::Network; 23 | /// use bitcoin_hashes::{hex::FromHex, sha256d}; 24 | /// 25 | /// let genesis = FilterHeader::genesis(Network::Testnet); 26 | /// 27 | /// assert_eq!( 28 | /// genesis.as_hash(), 29 | /// sha256d::Hash::from_hex( 30 | /// "21584579b7eb08997773e5aeff3a7f932700042d0ed2a6129012b7d7ae81b750" 31 | /// ).unwrap() 32 | /// ); 33 | /// ``` 34 | fn genesis(network: Network) -> Self { 35 | let filter = BlockFilter::genesis(network); 36 | filter.filter_header(&FilterHeader::all_zeros()) 37 | } 38 | } 39 | 40 | /// An error related to the filters access. 41 | #[derive(Debug, Error)] 42 | pub enum Error { 43 | /// Filter or header at given height not found. 44 | #[error("filter at height {0} not found")] 45 | NotFound(Height), 46 | /// A storage error occured. 47 | #[error("storage error: {0}")] 48 | Store(#[from] store::Error), 49 | } 50 | 51 | /// A trait for types that provide read/write access to compact block filters, and filter headers. 52 | pub trait Filters { 53 | /// Get filter headers given a block height range. 54 | fn get_headers(&self, range: RangeInclusive) -> Vec<(FilterHash, FilterHeader)>; 55 | /// Get the filter header at the given height. Includes the hash of the filter itself. 56 | fn get_header(&self, height: Height) -> Option<(FilterHash, FilterHeader)>; 57 | /// Import filter headers. 58 | fn import_headers(&mut self, headers: Vec<(FilterHash, FilterHeader)>) 59 | -> Result; 60 | /// Get the tip of the filter header chain. 61 | fn tip(&self) -> (&FilterHash, &FilterHeader); 62 | /// Get the height of the filter header chain. 63 | fn height(&self) -> Height; 64 | /// Get the filter header previous to the given height. 65 | fn get_prev_header(&self, height: Height) -> Option { 66 | if height == 0 { 67 | // If the start height is `0` (genesis), we return the zero hash as the parent. 68 | Some(FilterHeader::all_zeros()) 69 | } else { 70 | self.get_header(height - 1).map(|(_, h)| h) 71 | } 72 | } 73 | /// Rollback filter chain to the given height. 74 | fn rollback(&mut self, height: Height) -> Result<(), Error>; 75 | /// Truncate the filter header chain to zero. 76 | fn clear(&mut self) -> Result<(), Error>; 77 | } 78 | -------------------------------------------------------------------------------- /chain/src/block/store/memory.rs: -------------------------------------------------------------------------------- 1 | //! Ephemeral storage backend for blocks. 2 | 3 | use nakamoto_common::block::store::{Error, Genesis, Store}; 4 | use nakamoto_common::block::Height; 5 | use nakamoto_common::network::Network; 6 | use nakamoto_common::nonempty::NonEmpty; 7 | 8 | /// In-memory block store. 9 | #[derive(Debug, Clone)] 10 | pub struct Memory(NonEmpty); 11 | 12 | impl Memory { 13 | /// Create a new in-memory block store. 14 | pub fn new(chain: NonEmpty) -> Self { 15 | Self(chain) 16 | } 17 | } 18 | 19 | impl Default for Memory { 20 | fn default() -> Self { 21 | Self(NonEmpty::new(H::default())) 22 | } 23 | } 24 | 25 | impl Memory { 26 | /// Create a memory store with only the genesis. 27 | pub fn genesis(network: Network) -> Self { 28 | Self(NonEmpty::new(H::genesis(network))) 29 | } 30 | } 31 | 32 | impl Store for Memory { 33 | type Header = H; 34 | 35 | /// Get the genesis block. 36 | fn genesis(&self) -> H { 37 | *self.0.first() 38 | } 39 | 40 | /// Append a batch of consecutive block headers to the end of the chain. 41 | fn put>(&mut self, headers: I) -> Result { 42 | self.0.tail.extend(headers); 43 | Ok(self.0.len() as Height - 1) 44 | } 45 | 46 | /// Get the block at the given height. 47 | fn get(&self, height: Height) -> Result { 48 | match self.0.get(height as usize) { 49 | Some(header) => Ok(*header), 50 | None => Err(Error::Io(std::io::Error::new( 51 | std::io::ErrorKind::UnexpectedEof, 52 | "unexpected end of file", 53 | ))), 54 | } 55 | } 56 | 57 | /// Rollback the chain to the given height. 58 | fn rollback(&mut self, height: Height) -> Result<(), Error> { 59 | match height { 60 | 0 => self.0.tail.clear(), 61 | h => self.0.tail.truncate(h as usize), 62 | } 63 | Ok(()) 64 | } 65 | 66 | /// Synchronize the changes to disk. 67 | fn sync(&mut self) -> Result<(), Error> { 68 | Ok(()) 69 | } 70 | 71 | /// Iterate over all headers in the store. 72 | fn iter(&self) -> Box>> { 73 | Box::new( 74 | self.0 75 | .clone() 76 | .into_iter() 77 | .enumerate() 78 | .map(|(i, h)| Ok((i as Height, h))), 79 | ) 80 | } 81 | 82 | /// Return the number of headers in the store. 83 | fn len(&self) -> Result { 84 | Ok(self.0.len()) 85 | } 86 | 87 | /// Return the height of the store. 88 | fn height(&self) -> Result { 89 | Ok(self.0.len() as Height - 1) 90 | } 91 | 92 | /// Check data integrity. 93 | fn check(&self) -> Result<(), Error> { 94 | Ok(()) 95 | } 96 | 97 | /// Heal data corruption. 98 | fn heal(&self) -> Result<(), Error> { 99 | Ok(()) 100 | } 101 | } 102 | -------------------------------------------------------------------------------- /p2p/src/stream.rs: -------------------------------------------------------------------------------- 1 | //! Message stream utilities. 2 | use std::io; 3 | 4 | use nakamoto_common::bitcoin::consensus::{encode, Decodable}; 5 | 6 | /// Message stream decoder. 7 | /// 8 | /// Used to for example turn a byte stream into network messages. 9 | #[derive(Debug)] 10 | pub struct Decoder { 11 | unparsed: Vec, 12 | } 13 | 14 | impl Decoder { 15 | /// Create a new stream decoder. 16 | pub fn new(capacity: usize) -> Self { 17 | Self { 18 | unparsed: Vec::with_capacity(capacity), 19 | } 20 | } 21 | 22 | /// Input bytes into the decoder. 23 | pub fn input(&mut self, bytes: &[u8]) { 24 | self.unparsed.extend_from_slice(bytes); 25 | } 26 | 27 | /// Decode and return the next message. Returns [`None`] if nothing was decoded. 28 | pub fn decode_next(&mut self) -> Result, encode::Error> { 29 | match encode::deserialize_partial::(&self.unparsed) { 30 | Ok((msg, index)) => { 31 | // Drain deserialized bytes only. 32 | self.unparsed.drain(..index); 33 | Ok(Some(msg)) 34 | } 35 | 36 | Err(encode::Error::Io(ref err)) if err.kind() == io::ErrorKind::UnexpectedEof => { 37 | Ok(None) 38 | } 39 | Err(err) => Err(err), 40 | } 41 | } 42 | } 43 | 44 | #[cfg(test)] 45 | mod test { 46 | use super::*; 47 | use nakamoto_common::bitcoin::network::message::{NetworkMessage, RawNetworkMessage}; 48 | use quickcheck_macros::quickcheck; 49 | 50 | const MSG_VERACK: [u8; 24] = [ 51 | 0xf9, 0xbe, 0xb4, 0xd9, 0x76, 0x65, 0x72, 0x61, 0x63, 0x6b, 0x00, 0x00, 0x00, 0x00, 0x00, 52 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x5d, 0xf6, 0xe0, 0xe2, 53 | ]; 54 | 55 | const MSG_PING: [u8; 32] = [ 56 | 0xf9, 0xbe, 0xb4, 0xd9, 0x70, 0x69, 0x6e, 0x67, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 57 | 0x00, 0x08, 0x00, 0x00, 0x00, 0x24, 0x67, 0xf1, 0x1d, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 58 | 0x00, 0x00, 59 | ]; 60 | 61 | #[quickcheck] 62 | fn prop_decode_next(chunk_size: usize) { 63 | let mut bytes = vec![]; 64 | let mut msgs = vec![]; 65 | let mut decoder = Decoder::new(1024); 66 | 67 | let chunk_size = 1 + chunk_size % decoder.unparsed.capacity(); 68 | 69 | bytes.extend_from_slice(&MSG_VERACK); 70 | bytes.extend_from_slice(&MSG_PING); 71 | 72 | for chunk in bytes.as_slice().chunks(chunk_size) { 73 | decoder.input(chunk); 74 | 75 | while let Some(msg) = decoder.decode_next::().unwrap() { 76 | msgs.push(msg); 77 | } 78 | } 79 | 80 | assert_eq!(decoder.unparsed.len(), 0); 81 | assert_eq!(msgs.len(), 2); 82 | assert_eq!( 83 | msgs[0], 84 | RawNetworkMessage { 85 | magic: 3652501241, 86 | payload: NetworkMessage::Verack 87 | } 88 | ); 89 | assert_eq!( 90 | msgs[1], 91 | RawNetworkMessage { 92 | magic: 3652501241, 93 | payload: NetworkMessage::Ping(100), 94 | } 95 | ); 96 | } 97 | } 98 | -------------------------------------------------------------------------------- /common/src/block/store.rs: -------------------------------------------------------------------------------- 1 | //! Block header storage. 2 | #![allow(clippy::len_without_is_empty)] 3 | use crate::block::Height; 4 | 5 | use bitcoin::blockdata::block::BlockHeader; 6 | use bitcoin::consensus::encode; 7 | use bitcoin::hash_types::FilterHash; 8 | use bitcoin::util::bip158::BlockFilter; 9 | use thiserror::Error; 10 | 11 | use crate::network::Network; 12 | use crate::source; 13 | 14 | /// A block storage error. 15 | #[derive(Debug, Error)] 16 | pub enum Error { 17 | /// An I/O error. 18 | #[error("i/o error: {0}")] 19 | Io(#[from] std::io::Error), 20 | /// An error decoding block data. 21 | #[error("error decoding header: {0}")] 22 | Decoding(#[from] encode::Error), 23 | /// A data-corruption error. 24 | #[error("error: the store data is corrupt")] 25 | Corruption, 26 | /// Operation was interrupted. 27 | #[error("the operation was interrupted")] 28 | Interrupted, 29 | } 30 | 31 | /// Represents an object (such as a header), that has a genesis. 32 | pub trait Genesis { 33 | /// Create a genesis header. 34 | fn genesis(network: Network) -> Self; 35 | } 36 | 37 | /// Genesis implementation for `bitcoin`'s header. 38 | impl Genesis for BlockHeader { 39 | fn genesis(network: Network) -> Self { 40 | network.genesis() 41 | } 42 | } 43 | 44 | /// Genesis implementation for `bitcoin`'s `FilterHash`. 45 | impl Genesis for FilterHash { 46 | fn genesis(network: Network) -> Self { 47 | use bitcoin::hashes::Hash; 48 | 49 | let genesis = network.genesis_block(); 50 | let filter = BlockFilter::new_script_filter(&genesis, |_| { 51 | panic!("{}: genesis block should have no inputs", source!()) 52 | }) 53 | .unwrap(); 54 | 55 | FilterHash::hash(&filter.content) 56 | } 57 | } 58 | 59 | /// Genesis implementation for `bitcoin`'s `BlockFilter`. 60 | impl Genesis for BlockFilter { 61 | fn genesis(network: Network) -> Self { 62 | let genesis = network.genesis_block(); 63 | 64 | BlockFilter::new_script_filter(&genesis, |_| { 65 | panic!("{}: genesis block should have no inputs", source!()) 66 | }) 67 | .unwrap() 68 | } 69 | } 70 | 71 | /// Represents objects that can store block headers. 72 | pub trait Store { 73 | /// The type of header used in the store. 74 | type Header: Sized; 75 | 76 | /// Get the genesis block. 77 | fn genesis(&self) -> Self::Header; 78 | /// Append a batch of consecutive block headers to the end of the chain. 79 | fn put>(&mut self, headers: I) -> Result; 80 | /// Get the block at the given height. 81 | fn get(&self, height: Height) -> Result; 82 | /// Rollback the chain to the given height. 83 | fn rollback(&mut self, height: Height) -> Result<(), Error>; 84 | /// Synchronize the changes to disk. 85 | fn sync(&mut self) -> Result<(), Error>; 86 | /// Iterate over all headers in the store. 87 | fn iter(&self) -> Box>>; 88 | /// Return the number of headers in the store. 89 | fn len(&self) -> Result; 90 | /// Return the store block height. 91 | fn height(&self) -> Result; 92 | /// Check the store integrity. 93 | fn check(&self) -> Result<(), Error>; 94 | /// Heal data corruption. 95 | fn heal(&self) -> Result<(), Error>; 96 | } 97 | -------------------------------------------------------------------------------- /test/src/lib.rs: -------------------------------------------------------------------------------- 1 | pub mod assert; 2 | pub mod block; 3 | 4 | use std::fs::File; 5 | use std::io::Read; 6 | use std::path::{Path, PathBuf}; 7 | 8 | use once_cell::sync::Lazy; 9 | 10 | use nakamoto_common::bitcoin; 11 | use nakamoto_common::bitcoin::blockdata::constants; 12 | use nakamoto_common::bitcoin::consensus::encode::Decodable; 13 | 14 | use nakamoto_common::block::BlockHeader; 15 | use nakamoto_common::nonempty::NonEmpty; 16 | 17 | pub use fastrand; 18 | 19 | pub static BITCOIN_HEADERS: Lazy> = Lazy::new(|| { 20 | let genesis = constants::genesis_block(bitcoin::Network::Bitcoin).header; 21 | let mut f = File::open(&*self::headers::PATH).unwrap(); 22 | let mut buf = [0; 80]; 23 | let mut headers = NonEmpty::new(genesis); 24 | 25 | while f.read_exact(&mut buf).is_ok() { 26 | let header = BlockHeader::consensus_decode(&mut buf.as_slice()).unwrap(); 27 | headers.push(header); 28 | } 29 | headers 30 | }); 31 | 32 | pub mod headers { 33 | use super::*; 34 | 35 | pub static PATH: Lazy = 36 | Lazy::new(|| Path::new(env!("CARGO_MANIFEST_DIR")).join("data/headers.bin")); 37 | } 38 | 39 | pub mod logger { 40 | use log::*; 41 | 42 | struct Logger { 43 | level: Level, 44 | } 45 | 46 | impl Log for Logger { 47 | fn enabled(&self, metadata: &Metadata) -> bool { 48 | metadata.level() <= self.level 49 | } 50 | 51 | fn log(&self, record: &Record) { 52 | use colored::Colorize; 53 | 54 | match record.target() { 55 | "test" => { 56 | println!( 57 | "{} {}", 58 | "test:".yellow(), 59 | record.args().to_string().yellow() 60 | ) 61 | } 62 | "sim" => { 63 | println!("{} {}", "sim:".bold(), record.args().to_string().bold()) 64 | } 65 | target => { 66 | if self.enabled(record.metadata()) { 67 | let s = format!("{:<5} {}", format!("{}:", target), record.args()); 68 | println!("{}", s.dimmed()); 69 | } 70 | } 71 | } 72 | } 73 | 74 | fn flush(&self) {} 75 | } 76 | 77 | pub fn init(level: Level) { 78 | let logger = Logger { level }; 79 | 80 | log::set_boxed_logger(Box::new(logger)).ok(); 81 | log::set_max_level(level.to_level_filter()); 82 | } 83 | } 84 | 85 | pub mod arbitrary { 86 | /// Generator for numbers in a statically defined inclusive range. 87 | #[derive(Debug, Clone)] 88 | pub struct InRange(pub u64); 89 | 90 | impl quickcheck::Arbitrary for InRange { 91 | fn arbitrary(g: &mut quickcheck::Gen) -> Self { 92 | let rng = fastrand::Rng::with_seed(u64::arbitrary(g)); 93 | 94 | Self(rng.u64(N..=M)) 95 | } 96 | 97 | fn shrink(&self) -> Box> { 98 | let InRange(n) = self; 99 | 100 | if *n > N { 101 | Box::new((N..*n).map(InRange)) 102 | } else { 103 | Box::new(std::iter::empty()) 104 | } 105 | } 106 | } 107 | } 108 | -------------------------------------------------------------------------------- /wallet/src/lib.rs: -------------------------------------------------------------------------------- 1 | //! A TUI Bitcoin wallet. 2 | #![allow(clippy::too_many_arguments)] 3 | pub mod error; 4 | pub mod input; 5 | pub mod logger; 6 | pub mod wallet; 7 | 8 | use std::path::Path; 9 | use std::{io, net, thread}; 10 | 11 | use termion::raw::IntoRawMode; 12 | use termion::screen::IntoAlternateScreen; 13 | 14 | use nakamoto_client::chan; 15 | use nakamoto_client::handle::Handle; 16 | use nakamoto_client::Network; 17 | use nakamoto_client::{Client, Config}; 18 | use nakamoto_common::bitcoin::util::bip32::DerivationPath; 19 | use nakamoto_common::block::Height; 20 | 21 | use crate::error::Error; 22 | use crate::wallet::Db; 23 | use crate::wallet::Hw; 24 | use crate::wallet::Wallet; 25 | 26 | /// The network reactor we're going to use. 27 | type Reactor = nakamoto_net_poll::Reactor; 28 | 29 | /// Entry point for running the wallet. 30 | pub fn run( 31 | wallet: &Path, 32 | birth: Height, 33 | hd_path: DerivationPath, 34 | network: Network, 35 | connect: Vec, 36 | offline: bool, 37 | ) -> Result<(), Error> { 38 | let cfg = Config { 39 | network, 40 | connect, 41 | listen: vec![], // Don't listen for incoming connections. 42 | ..Config::default() 43 | }; 44 | 45 | // Create a new client using `Reactor` for networking. 46 | let client = Client::::new()?; 47 | let handle = client.handle(); 48 | let client_recv = handle.events(); 49 | let (loading_send, loading_recv) = chan::unbounded(); 50 | 51 | log::info!("Opening wallet file `{}`..", wallet.display()); 52 | 53 | let db = Db::open(wallet)?; 54 | let hw = Hw::new(hd_path); 55 | 56 | let (inputs_tx, inputs_rx) = crossbeam_channel::unbounded(); 57 | let (exit_tx, exit_rx) = crossbeam_channel::bounded(1); 58 | let (signals_tx, signals_rx) = crossbeam_channel::unbounded(); 59 | 60 | log::info!("Spawning client threads.."); 61 | 62 | // Start the UI loop in the background. 63 | let t1 = thread::spawn(|| input::run(inputs_tx, exit_rx)); 64 | // Start the signal handler thread. 65 | let t2 = thread::spawn(|| input::signals(signals_tx)); 66 | // Start the network client in the background. 67 | let t3 = thread::spawn(move || { 68 | if offline { 69 | Ok(()) 70 | } else { 71 | client.load(cfg, loading_send)?.run() 72 | } 73 | }); 74 | 75 | log::info!("Switching to alternative screen.."); 76 | 77 | let stdout = io::stdout().into_raw_mode()?; 78 | let term = termion::cursor::HideCursor::from(termion::input::MouseTerminal::from(stdout)) 79 | .into_alternate_screen()?; 80 | 81 | // Run the main wallet loop. This will block until the wallet exits. 82 | log::info!("Running main wallet loop.."); 83 | Wallet::new(handle.clone(), network, db, hw).run( 84 | birth, 85 | inputs_rx, 86 | signals_rx, 87 | loading_recv, 88 | client_recv, 89 | offline, 90 | term, 91 | )?; 92 | 93 | // Tell other threads that they should exit. 94 | log::info!("Exiting.."); 95 | exit_tx.send(()).unwrap(); 96 | 97 | // Shutdown the client, since the main loop exited. 98 | log::info!("Shutting down client.."); 99 | handle.shutdown()?; 100 | 101 | t1.join().unwrap()?; 102 | t2.join().unwrap()?; 103 | t3.join().unwrap()?; 104 | 105 | Ok(()) 106 | } 107 | -------------------------------------------------------------------------------- /common/src/collections.rs: -------------------------------------------------------------------------------- 1 | //! Collections used in `nakamoto`. 2 | use bitcoin_hashes::siphash24::Hash; 3 | use std::ops::{Deref, DerefMut}; 4 | 5 | use crate::nonempty::NonEmpty; 6 | 7 | /// A `HashMap` which uses `fastrand::Rng` for its random state. 8 | pub type HashMap = std::collections::HashMap; 9 | 10 | /// A `HashSet` which uses `fastrand::Rng` for its random state. 11 | pub type HashSet = std::collections::HashSet; 12 | 13 | /// Hasher using `siphash24`. 14 | #[derive(Default)] 15 | pub struct Hasher { 16 | data: Vec, 17 | key1: u64, 18 | key2: u64, 19 | } 20 | 21 | impl Hasher { 22 | fn new(key1: u64, key2: u64) -> Self { 23 | Self { 24 | data: vec![], 25 | key1, 26 | key2, 27 | } 28 | } 29 | } 30 | 31 | impl std::hash::Hasher for Hasher { 32 | fn write(&mut self, bytes: &[u8]) { 33 | self.data.extend_from_slice(bytes) 34 | } 35 | 36 | fn finish(&self) -> u64 { 37 | Hash::hash_with_keys(self.key1, self.key2, &self.data).as_u64() 38 | } 39 | } 40 | 41 | /// Random hasher state. 42 | #[derive(Default, Clone)] 43 | pub struct RandomState { 44 | key1: u64, 45 | key2: u64, 46 | } 47 | 48 | impl RandomState { 49 | fn new(rng: fastrand::Rng) -> Self { 50 | Self { 51 | key1: rng.u64(..), 52 | key2: rng.u64(..), 53 | } 54 | } 55 | } 56 | 57 | impl std::hash::BuildHasher for RandomState { 58 | type Hasher = Hasher; 59 | 60 | fn build_hasher(&self) -> Self::Hasher { 61 | Hasher::new(self.key1, self.key2) 62 | } 63 | } 64 | 65 | impl From for RandomState { 66 | fn from(rng: fastrand::Rng) -> Self { 67 | Self::new(rng) 68 | } 69 | } 70 | 71 | /// A map with the ability to randomly select values. 72 | #[derive(Debug)] 73 | pub struct AddressBook { 74 | inner: HashMap, 75 | rng: fastrand::Rng, 76 | } 77 | 78 | impl AddressBook { 79 | /// Create a new address book. 80 | pub fn new(rng: fastrand::Rng) -> Self { 81 | Self { 82 | inner: HashMap::with_hasher(rng.clone().into()), 83 | rng, 84 | } 85 | } 86 | 87 | /// Pick a random value in the book. 88 | pub fn sample(&self) -> Option<(&K, &V)> { 89 | self.sample_with(|_, _| true) 90 | } 91 | 92 | /// Pick a random value in the book matching a predicate. 93 | pub fn sample_with(&self, mut predicate: impl FnMut(&K, &V) -> bool) -> Option<(&K, &V)> { 94 | if let Some(pairs) = NonEmpty::from_vec( 95 | self.inner 96 | .iter() 97 | .filter(|(k, v)| predicate(*k, *v)) 98 | .collect(), 99 | ) { 100 | let ix = self.rng.usize(..pairs.len()); 101 | let pair = pairs[ix]; // Can't fail. 102 | 103 | Some(pair) 104 | } else { 105 | None 106 | } 107 | } 108 | 109 | /// Cycle through the keys at random. The random cycle repeats ad-infintum. 110 | pub fn cycle(&self) -> impl Iterator { 111 | self.shuffled().map(|(k, _)| k).cycle() 112 | } 113 | 114 | /// Return a shuffled iterator over the keys. 115 | pub fn shuffled(&self) -> std::vec::IntoIter<(&K, &V)> { 116 | let mut keys = self.inner.iter().collect::>(); 117 | self.rng.shuffle(&mut keys); 118 | 119 | keys.into_iter() 120 | } 121 | } 122 | 123 | impl Deref for AddressBook { 124 | type Target = HashMap; 125 | 126 | fn deref(&self) -> &Self::Target { 127 | &self.inner 128 | } 129 | } 130 | 131 | impl DerefMut for AddressBook { 132 | fn deref_mut(&mut self) -> &mut Self::Target { 133 | &mut self.inner 134 | } 135 | } 136 | -------------------------------------------------------------------------------- /net/src/event.rs: -------------------------------------------------------------------------------- 1 | //! Events generated by the peer-to-peer system. 2 | use std::sync::{Arc, Mutex}; 3 | use std::time; 4 | 5 | use crossbeam_channel as chan; 6 | 7 | pub use chan::RecvTimeoutError; 8 | 9 | /// An event publish/subscribe channel. 10 | /// 11 | /// Takes events of type `E`, and maps them to events of type `T` which 12 | /// are forwarded to all subscribers. 13 | pub struct Broadcast { 14 | /// Emits events of type `T` to subscribers. 15 | emitter: Emitter, 16 | /// Takes an event of type `E` and emits zero or more events of type `T` to subscribers. 17 | forward: Box) + Send + Sync>, 18 | } 19 | 20 | impl Broadcast { 21 | /// Broadcast an event to all subscribers. 22 | pub fn broadcast(&mut self, event: E) { 23 | (self.forward)(event, &self.emitter.clone()); 24 | } 25 | } 26 | 27 | /// Publishes events to subscribers. 28 | #[derive(Clone)] 29 | pub struct Emitter { 30 | subscribers: Arc>>>, 31 | } 32 | 33 | impl Default for Emitter { 34 | fn default() -> Self { 35 | Self { 36 | subscribers: Default::default(), 37 | } 38 | } 39 | } 40 | 41 | impl Emitter { 42 | /// Emit an event to all subscribers and drop subscribers who can't receive it. 43 | pub fn emit(&self, event: T) { 44 | self.subscribers 45 | .lock() 46 | .unwrap() 47 | .retain(|s| s.try_send(event.clone()).is_ok()); 48 | } 49 | 50 | /// Drop all subscribers. 51 | pub fn close(self) { 52 | self.subscribers.lock().unwrap().clear(); 53 | } 54 | 55 | /// Create a subscriber from this emitter. 56 | pub fn subscriber(&self) -> Subscriber { 57 | Subscriber { 58 | subscribers: self.subscribers.clone(), 59 | } 60 | } 61 | } 62 | 63 | /// Subscribes to events. 64 | #[derive(Clone)] 65 | pub struct Subscriber { 66 | subscribers: Arc>>>, 67 | } 68 | 69 | impl Subscriber { 70 | /// Add a subscription to receive broadcast events. 71 | pub fn subscribe(&self) -> chan::Receiver { 72 | let (sender, receiver) = chan::unbounded(); 73 | let mut subs = self.subscribers.lock().unwrap(); 74 | subs.push(sender); 75 | 76 | receiver 77 | } 78 | } 79 | 80 | /// Create a new broadcast channel. 81 | pub fn broadcast( 82 | forward: impl FnMut(E, &Emitter) + Send + Sync + 'static, 83 | ) -> (Broadcast, Subscriber) { 84 | let emitter = Emitter::default(); 85 | let subscriber = emitter.subscriber(); 86 | ( 87 | Broadcast { 88 | emitter, 89 | forward: Box::new(forward), 90 | }, 91 | subscriber, 92 | ) 93 | } 94 | 95 | /// Listen to an event feed, and wait for the given function to return something, 96 | /// or timeout if the specified amount of time has elapsed. 97 | pub fn wait( 98 | events: &chan::Receiver, 99 | mut f: F, 100 | timeout: time::Duration, 101 | ) -> Result 102 | where 103 | F: FnMut(E) -> Option, 104 | { 105 | let start = time::Instant::now(); 106 | 107 | loop { 108 | if let Some(timeout) = timeout.checked_sub(start.elapsed()) { 109 | match events.recv_timeout(timeout) { 110 | Ok(event) => { 111 | if let Some(t) = f(event) { 112 | return Ok(t); 113 | } 114 | } 115 | Err(err @ chan::RecvTimeoutError::Disconnected) => { 116 | return Err(err); 117 | } 118 | Err(chan::RecvTimeoutError::Timeout) => { 119 | // Keep trying until our timeout reaches zero. 120 | continue; 121 | } 122 | } 123 | } else { 124 | return Err(chan::RecvTimeoutError::Timeout); 125 | } 126 | } 127 | } 128 | 129 | /// Any type that is able to publish events. 130 | pub trait Publisher: Send + Sync { 131 | /// Publish an event. 132 | fn publish(&mut self, event: E); 133 | } 134 | 135 | impl Publisher for Broadcast { 136 | /// Publish a message to all subscribers. 137 | fn publish(&mut self, event: E) { 138 | self.broadcast(event) 139 | } 140 | } 141 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | nakamoto 2 | ======== 3 | 4 | Nakamoto is a privacy-preserving Bitcoin light-client implementation in Rust, 5 | with a focus on low resource utilization, modularity and security. 6 | 7 | The vision for the project is to build a set of libraries targeting light 8 | client functionality, that are easy to embed in any program and on any 9 | platform, be it mobile or desktop. The project's small cpu, memory and code 10 | footprint is made possible by its efficient runtime and minimal set of 11 | dependencies. The implementation language, Rust, opens up the possibility for 12 | programs written in other languages (eg. Swift, Python, Java), to bind directly 13 | to it via a foreign function interface (FFI). 14 | 15 | --- 16 | 17 | [dependencies] 18 | nakamoto = "0.4.0" 19 | 20 | ## Design 21 | 22 | Nakamoto is split into several crates, each handling a different aspect of the 23 | light-client protocol. Although these crates are designed to be used in tandem, 24 | swapping implementations is trivial, due to the explicit boundaries between 25 | them, and the use of traits. From a high-level, we have: 26 | 27 | * `nakamoto-client`: the core light-client library 28 | * `nakamoto-p2p`: the protocol state-machine implementation 29 | * `nakamoto-chain`: the block store and fork selection logic 30 | * `nakamoto-net`: networking primitives used by the reactor implementations 31 | * `nakamoto-net-poll`: the default *poll*-based networking backend 32 | * `nakamoto-common`: common functionality used by all crates 33 | * `nakamoto-node`: a standalone light-client daemon 34 | * `nakamoto-wallet`: a very basic watch-only wallet built on the above crates 35 | 36 | For an overview of the above, see the [architecture diagram](docs/architecture.svg) 37 | in the `docs` folder. 38 | 39 | ## Status 40 | 41 | While the project is still in its infancy, the base functionality has been 42 | implemented. Nakamoto is able to discover peers, download and verify the 43 | longest chain and handle forks, while implementing the full header verification 44 | protocol. 45 | 46 | Client side block filtering (BIP 157/158) is implemented and working. See 47 | `nakamoto-wallet` for an example of how to use it. 48 | 49 | Peer-to-peer layer encryption (BIP 324), available in Bitcoin Core since v26.0, 50 | will also be implemented in Nakamoto soon. 51 | 52 | Finally, a C FFI will be implemented, to make it easy to embed the client 53 | in mobile applications. 54 | 55 | Though wallet functionality will slowly be added, it isn't the primary focus 56 | of this project, which sits one level below wallets. 57 | 58 | ## Projects goals 59 | 60 | * __High assurance__: the library should be thoroughly tested using modern 61 | techniques such as *property* and *model-based testing* as well as *discrete 62 | event simulation* and *fuzzing*. These approaches benefit from a clean 63 | separation between I/O and protocol logic and have been shown to catch more 64 | bugs than unit testing. 65 | 66 | * __Security__: as a library that may find its way into wallet implementations, 67 | a primary goal is security and auditability. For this reason, we try to 68 | minimize the total dependency footprint, keep the code easy to read and 69 | forbid any unsafe code. 70 | 71 | * __Efficiency__: blockchain synchronization should be done as efficiently as 72 | possible, with low memory, disk and bandwidth overhead. We target 73 | resource-constrained environments, such as mobile. 74 | 75 | * __Privacy__: when possible, privacy-preserving techniques should be employed. 76 | *Client Side Block Filtering* (BIP 157/158) should be used over bloom 77 | filters (BIP 37) to ensure user privacy and provide censorship resistance. 78 | 79 | ## Running the tests 80 | 81 | cargo test --all 82 | 83 | ## Running the daemon 84 | 85 | cargo run --release -p nakamoto-node -- --testnet 86 | 87 | ## Contributing 88 | 89 | If you'd like to contribute to the development of Nakamoto, please get in touch! 90 | Otherwise, do read the contribution [guidelines](CONTRIBUTING.md). 91 | 92 | ## Donations 93 | 94 | To help fund the project and ensure its ongoing development and maintenance, your 95 | support in Bitcoin is appreciated at the following address: 96 | 97 | bc1qa47tl4vmz8j82wdsdkmxex30r23c9ljs84fxah 98 | 99 | ## Motivation 100 | 101 | Looking at ecosystems that aren't light-client friendly—Ethereum for example—we 102 | see that the vast majority of users are forced into trusting third parties when 103 | transacting on the network. This is completely counter to the idea and *raison 104 | d'être* of blockchains, and poses a giant security and privacy risk. 105 | Unfortunately, this is due to the lackluster support for light-clients, both at 106 | the protocol level, and in terms of the available implementations. Light-clients 107 | are necessary for the average user to be able to securely interface with a 108 | network like Ethereum or Bitcoin. 109 | 110 | For this purpose, Nakamoto was conceived as a client that can efficiently run 111 | on any mobile device, with the highest standards of privacy and security 112 | achievable given the constraints. 113 | 114 | ## License 115 | 116 | Licensed under the MIT license. 117 | © 2020 Alexis Sellier () 118 | -------------------------------------------------------------------------------- /client/src/service.rs: -------------------------------------------------------------------------------- 1 | //! TODO 2 | use std::borrow::{Borrow, Cow}; 3 | use std::collections::HashMap; 4 | use std::net; 5 | use std::sync::Arc; 6 | 7 | use nakamoto_chain::BlockTree; 8 | use nakamoto_common::bitcoin::consensus::Encodable; 9 | use nakamoto_common::block::time::{AdjustedClock, LocalTime}; 10 | use nakamoto_net::{Disconnect, Io, Link, StateMachine}; 11 | use nakamoto_p2p as p2p; 12 | 13 | use crate::client::Config; 14 | use crate::peer; 15 | use nakamoto_common::block::filter; 16 | use nakamoto_common::block::filter::Filters; 17 | 18 | /// Client service. Wraps a state machine and handles decoding and encoding of network messages. 19 | pub struct Service { 20 | inboxes: HashMap, 21 | machine: p2p::StateMachine, 22 | } 23 | 24 | impl> 25 | Service 26 | { 27 | /// Create a new client service. 28 | pub fn new( 29 | tree: T, 30 | filters: F, 31 | peers: P, 32 | clock: C, 33 | rng: fastrand::Rng, 34 | config: Config, 35 | ) -> Self { 36 | Self { 37 | inboxes: HashMap::new(), 38 | machine: p2p::StateMachine::new( 39 | tree, 40 | filters, 41 | peers, 42 | clock, 43 | rng, 44 | p2p::Config { 45 | network: config.network, 46 | domains: config.domains, 47 | connect: config.connect, 48 | user_agent: config.user_agent, 49 | hooks: config.hooks, 50 | limits: config.limits, 51 | services: config.services, 52 | 53 | ..p2p::Config::default() 54 | }, 55 | ), 56 | } 57 | } 58 | } 59 | 60 | impl nakamoto_net::Service for Service 61 | where 62 | T: BlockTree, 63 | F: filter::Filters, 64 | P: peer::Store, 65 | C: AdjustedClock, 66 | { 67 | type Command = p2p::Command; 68 | 69 | fn command_received(&mut self, cmd: Self::Command) { 70 | // TODO: Commands shouldn't be handled by the inner state machine. 71 | self.machine.command(cmd) 72 | } 73 | } 74 | 75 | impl StateMachine for Service 76 | where 77 | T: BlockTree, 78 | F: filter::Filters, 79 | P: peer::Store, 80 | C: AdjustedClock, 81 | { 82 | type Message = [u8]; 83 | type Event = p2p::Event; 84 | type DisconnectReason = p2p::DisconnectReason; 85 | 86 | fn initialize(&mut self, time: LocalTime) { 87 | self.machine.initialize(time); 88 | } 89 | 90 | fn tick(&mut self, local_time: LocalTime) { 91 | self.machine.tick(local_time); 92 | } 93 | 94 | fn timer_expired(&mut self) { 95 | self.machine.timer_expired(); 96 | } 97 | 98 | fn message_received(&mut self, addr: &net::SocketAddr, bytes: Cow<[u8]>) { 99 | if let Some(inbox) = self.inboxes.get_mut(addr) { 100 | inbox.input(bytes.borrow()); 101 | 102 | loop { 103 | match inbox.decode_next() { 104 | Ok(Some(msg)) => self.machine.message_received(addr, Cow::Owned(msg)), 105 | Ok(None) => break, 106 | 107 | Err(err) => { 108 | log::error!("Invalid message received from {}: {}", addr, err); 109 | 110 | self.machine 111 | .disconnect(*addr, p2p::DisconnectReason::DecodeError(Arc::new(err))); 112 | 113 | return; 114 | } 115 | } 116 | } 117 | } else { 118 | log::debug!("Received message from unknown peer {}", addr); 119 | } 120 | } 121 | 122 | fn attempted(&mut self, addr: &net::SocketAddr) { 123 | self.machine.attempted(addr) 124 | } 125 | 126 | fn connected(&mut self, addr: net::SocketAddr, local_addr: &net::SocketAddr, link: Link) { 127 | self.inboxes.insert(addr, p2p::stream::Decoder::new(1024)); 128 | self.machine.connected(addr, local_addr, link) 129 | } 130 | 131 | fn disconnected(&mut self, addr: &net::SocketAddr, reason: Disconnect) { 132 | self.inboxes.remove(addr); 133 | self.machine.disconnected(addr, reason) 134 | } 135 | } 136 | 137 | impl> Iterator 138 | for Service 139 | { 140 | type Item = Io, p2p::Event, p2p::DisconnectReason>; 141 | 142 | fn next(&mut self) -> Option { 143 | match self.machine.next() { 144 | Some(Io::Write(addr, msg)) => { 145 | log::trace!(target: "client", "Write {:?} to {}", &msg, addr.ip()); 146 | let mut buf = Vec::new(); 147 | 148 | msg.consensus_encode(&mut buf) 149 | .expect("writing to an in-memory buffer doesn't fail"); 150 | 151 | Some(Io::Write(addr, buf)) 152 | } 153 | Some(Io::Event(e)) => Some(Io::Event(e)), 154 | Some(Io::Connect(a)) => Some(Io::Connect(a)), 155 | Some(Io::Disconnect(a, r)) => Some(Io::Disconnect(a, r)), 156 | Some(Io::SetTimer(d)) => Some(Io::SetTimer(d)), 157 | 158 | None => None, 159 | } 160 | } 161 | } 162 | -------------------------------------------------------------------------------- /client/src/peer.rs: -------------------------------------------------------------------------------- 1 | //! Client-related peer functionality. 2 | use std::collections::HashMap; 3 | use std::path::Path; 4 | use std::{fs, io, net}; 5 | 6 | pub use nakamoto_common::p2p::peer::*; 7 | 8 | /// A file-backed implementation of [`Store`]. 9 | #[derive(Debug)] 10 | pub struct Cache { 11 | addrs: HashMap, 12 | file: fs::File, 13 | } 14 | 15 | impl Cache { 16 | /// Open an existing cache. 17 | pub fn open>(path: P) -> io::Result { 18 | fs::OpenOptions::new() 19 | .read(true) 20 | .write(true) 21 | .open(path) 22 | .and_then(Self::from) 23 | } 24 | 25 | /// Create a new cache. 26 | pub fn create>(path: P) -> io::Result { 27 | let file = fs::OpenOptions::new() 28 | .create_new(true) 29 | .write(true) 30 | .open(path)?; 31 | 32 | Ok(Self { 33 | file, 34 | addrs: HashMap::new(), 35 | }) 36 | } 37 | 38 | /// Create a new cache from a file. 39 | pub fn from(mut file: fs::File) -> io::Result { 40 | use io::Read; 41 | use microserde::json::Value; 42 | use std::str::FromStr; 43 | 44 | let mut s = String::new(); 45 | let mut addrs = HashMap::new(); 46 | 47 | file.read_to_string(&mut s)?; 48 | 49 | if !s.is_empty() { 50 | let val = microserde::json::from_str(&s) 51 | .map_err(|_| io::Error::from(io::ErrorKind::InvalidData))?; 52 | 53 | match val { 54 | Value::Object(ary) => { 55 | for (k, v) in ary.into_iter() { 56 | let ka = KnownAddress::from_json(v) 57 | .map_err(|_| io::Error::from(io::ErrorKind::InvalidData))?; 58 | let ip = net::IpAddr::from_str(k.as_str()) 59 | .map_err(|_| io::Error::from(io::ErrorKind::InvalidData))?; 60 | 61 | addrs.insert(ip, ka); 62 | } 63 | } 64 | _ => return Err(io::ErrorKind::InvalidData.into()), 65 | } 66 | } 67 | 68 | Ok(Self { file, addrs }) 69 | } 70 | } 71 | 72 | impl Store for Cache { 73 | fn get_mut(&mut self, ip: &net::IpAddr) -> Option<&mut KnownAddress> { 74 | self.addrs.get_mut(ip) 75 | } 76 | 77 | fn get(&self, ip: &net::IpAddr) -> Option<&KnownAddress> { 78 | self.addrs.get(ip) 79 | } 80 | 81 | fn remove(&mut self, ip: &net::IpAddr) -> Option { 82 | self.addrs.remove(ip) 83 | } 84 | 85 | fn insert(&mut self, ip: net::IpAddr, ka: KnownAddress) -> bool { 86 | as Store>::insert(&mut self.addrs, ip, ka) 87 | } 88 | 89 | fn iter<'a>(&'a self) -> Box + 'a> { 90 | Box::new(self.addrs.iter()) 91 | } 92 | 93 | fn clear(&mut self) { 94 | self.addrs.clear() 95 | } 96 | 97 | fn len(&self) -> usize { 98 | self.addrs.len() 99 | } 100 | 101 | fn flush<'a>(&mut self) -> io::Result<()> { 102 | use io::{Seek, Write}; 103 | use microserde::json::Value; 104 | 105 | let peers: microserde::json::Object = self 106 | .addrs 107 | .iter() 108 | .map(|(ip, ka)| (ip.to_string(), ka.to_json())) 109 | .collect(); 110 | let s = microserde::json::to_string(&Value::Object(peers)); 111 | 112 | self.file.set_len(0)?; 113 | self.file.seek(io::SeekFrom::Start(0))?; 114 | self.file.write_all(s.as_bytes())?; 115 | self.file.write_all(&[b'\n'])?; 116 | self.file.sync_data()?; 117 | 118 | Ok(()) 119 | } 120 | } 121 | 122 | #[cfg(test)] 123 | mod test { 124 | use super::*; 125 | use nakamoto_common::bitcoin::network::address::Address; 126 | use nakamoto_common::bitcoin::network::constants::ServiceFlags; 127 | use nakamoto_common::block::time::LocalTime; 128 | 129 | #[test] 130 | fn test_empty() { 131 | let tmp = tempfile::tempdir().unwrap(); 132 | let path = tmp.path().join("cache"); 133 | 134 | Cache::create(&path).unwrap(); 135 | let cache = Cache::open(&path).unwrap(); 136 | 137 | assert!(cache.is_empty()); 138 | } 139 | 140 | #[test] 141 | fn test_save_and_load() { 142 | let tmp = tempfile::tempdir().unwrap(); 143 | let path = tmp.path().join("cache"); 144 | let mut expected = Vec::new(); 145 | 146 | { 147 | let mut cache = Cache::create(&path).unwrap(); 148 | 149 | for i in 32..48 { 150 | let ip = net::IpAddr::from([127, 0, 0, i]); 151 | let sockaddr = net::SocketAddr::from((ip, 8333)); 152 | let services = ServiceFlags::NETWORK; 153 | let ka = KnownAddress { 154 | addr: Address::new(&sockaddr, services), 155 | source: Source::Dns, 156 | last_success: Some(LocalTime::from_secs(i as u64)), 157 | last_sampled: Some(LocalTime::from_secs((i + 1) as u64)), 158 | last_attempt: None, 159 | last_active: None, 160 | }; 161 | cache.insert(ip, ka); 162 | } 163 | cache.flush().unwrap(); 164 | 165 | for (ip, ka) in cache.iter() { 166 | expected.push((*ip, ka.clone())); 167 | } 168 | } 169 | 170 | { 171 | let cache = Cache::open(&path).unwrap(); 172 | let mut actual = cache 173 | .iter() 174 | .map(|(i, ka)| (*i, ka.clone())) 175 | .collect::>(); 176 | 177 | actual.sort_by_key(|(i, _)| *i); 178 | expected.sort_by_key(|(i, _)| *i); 179 | 180 | assert_eq!(actual, expected); 181 | } 182 | } 183 | } 184 | -------------------------------------------------------------------------------- /chain/src/filter/cache.rs: -------------------------------------------------------------------------------- 1 | #![allow(dead_code)] 2 | //! Compact block filter cache. 3 | 4 | use std::io; 5 | use std::ops::ControlFlow; 6 | use std::ops::RangeInclusive; 7 | 8 | use nakamoto_common::bitcoin::consensus::{encode, Decodable, Encodable}; 9 | 10 | use nakamoto_common::bitcoin_hashes::Hash; 11 | pub use nakamoto_common::block::filter::{ 12 | self, BlockFilter, Error, FilterHash, FilterHeader, Filters, 13 | }; 14 | pub use nakamoto_common::block::store::Store; 15 | 16 | use nakamoto_common::block::store::Genesis; 17 | use nakamoto_common::block::Height; 18 | use nakamoto_common::network::Network; 19 | use nakamoto_common::nonempty::NonEmpty; 20 | 21 | use crate::filter::store; 22 | 23 | #[derive(Debug, Clone, Copy)] 24 | pub struct StoredHeader { 25 | pub hash: FilterHash, 26 | pub header: FilterHeader, 27 | } 28 | 29 | impl Default for StoredHeader { 30 | fn default() -> Self { 31 | Self { 32 | hash: FilterHash::all_zeros(), 33 | header: FilterHeader::all_zeros(), 34 | } 35 | } 36 | } 37 | 38 | impl Encodable for StoredHeader { 39 | fn consensus_encode(&self, e: &mut W) -> Result { 40 | let mut len = 0; 41 | 42 | len += self.hash.consensus_encode(e)?; 43 | len += self.header.consensus_encode(e)?; 44 | 45 | Ok(len) 46 | } 47 | } 48 | 49 | impl Decodable for StoredHeader { 50 | fn consensus_decode(d: &mut D) -> Result { 51 | let hash = FilterHash::consensus_decode(d)?; 52 | let header = FilterHeader::consensus_decode(d)?; 53 | 54 | Ok(StoredHeader { hash, header }) 55 | } 56 | } 57 | 58 | impl Genesis for StoredHeader { 59 | fn genesis(network: Network) -> Self { 60 | Self { 61 | hash: FilterHash::genesis(network), 62 | header: FilterHeader::genesis(network), 63 | } 64 | } 65 | } 66 | 67 | pub struct FilterCache { 68 | headers: NonEmpty, 69 | header_store: S, 70 | } 71 | 72 | impl> FilterCache { 73 | pub fn load(header_store: S) -> Result { 74 | Self::load_with(header_store, |_| ControlFlow::Continue(())) 75 | } 76 | 77 | pub fn load_with( 78 | header_store: S, 79 | progress: impl Fn(Height) -> ControlFlow<()>, 80 | ) -> Result { 81 | let mut headers = NonEmpty::new(header_store.genesis()); 82 | 83 | for (height, result) in header_store.iter().enumerate().skip(1) { 84 | let (_, header) = result?; 85 | headers.push(header); 86 | 87 | if progress(height as Height).is_break() { 88 | return Err(nakamoto_common::block::store::Error::Interrupted); 89 | } 90 | } 91 | 92 | Ok(Self { 93 | header_store, 94 | headers, 95 | }) 96 | } 97 | } 98 | 99 | impl FilterCache { 100 | /// Verify the filter header chain. Returns `true` if the chain is valid. 101 | pub fn verify(&self, network: Network) -> Result<(), store::Error> { 102 | self.verify_with(network, |_| ControlFlow::Continue(())) 103 | } 104 | 105 | pub fn verify_with( 106 | &self, 107 | network: Network, 108 | progress: impl Fn(Height) -> ControlFlow<()>, 109 | ) -> Result<(), store::Error> { 110 | let mut prev_header = FilterHeader::all_zeros(); 111 | 112 | if self.headers.first().header != FilterHeader::genesis(network) { 113 | return Err(store::Error::Integrity); 114 | } 115 | 116 | for (height, stored_header) in self.headers.iter().enumerate() { 117 | let expected = stored_header.hash.filter_header(&prev_header); 118 | let actual = stored_header.header; 119 | 120 | if actual != expected { 121 | return Err(store::Error::Integrity); 122 | } 123 | prev_header = actual; 124 | 125 | if progress(height as Height).is_break() { 126 | return Err(store::Error::Interrupted); 127 | } 128 | } 129 | Ok(()) 130 | } 131 | } 132 | 133 | #[allow(unused_variables)] 134 | impl> Filters for FilterCache { 135 | fn get_header(&self, height: Height) -> Option<(FilterHash, FilterHeader)> { 136 | self.headers 137 | .get(height as usize) 138 | .map(|s| (s.hash, s.header)) 139 | } 140 | 141 | fn get_headers(&self, range: RangeInclusive) -> Vec<(FilterHash, FilterHeader)> { 142 | let (start, end) = (*range.start(), *range.end()); 143 | 144 | self.headers 145 | .iter() 146 | .skip(start as usize) 147 | .take(end as usize - start as usize + 1) 148 | .map(|h| (h.hash, h.header)) 149 | .collect() 150 | } 151 | 152 | fn import_headers( 153 | &mut self, 154 | headers: Vec<(FilterHash, FilterHeader)>, 155 | ) -> Result { 156 | let iter = headers 157 | .into_iter() 158 | .map(|(hash, header)| StoredHeader { hash, header }); 159 | 160 | self.headers.tail.extend(iter.clone()); 161 | self.header_store.put(iter).map_err(Error::from) 162 | } 163 | 164 | fn tip(&self) -> (&FilterHash, &FilterHeader) { 165 | let StoredHeader { hash, header } = self.headers.last(); 166 | (hash, header) 167 | } 168 | 169 | fn height(&self) -> Height { 170 | self.headers.tail.len() as Height 171 | } 172 | 173 | fn rollback(&mut self, height: Height) -> Result<(), Error> { 174 | self.header_store.rollback(height)?; 175 | self.headers.tail.truncate(height as usize); 176 | 177 | Ok(()) 178 | } 179 | 180 | fn clear(&mut self) -> Result<(), Error> { 181 | self.header_store.rollback(0)?; 182 | self.headers.tail.clear(); 183 | 184 | Ok(()) 185 | } 186 | } 187 | -------------------------------------------------------------------------------- /docs/nakamoto-talk.md: -------------------------------------------------------------------------------- 1 | --- 2 | author: "@cloudhead" 3 | paging: Slide %d / %d 4 | --- 5 | 6 | # Nakamoto: a new bitcoin light-client 7 | 8 | Nakamoto is a Bitcoin light-client library written in Rust, with a focus on low 9 | resource utilization and privacy. 10 | 11 | It's built on BIP 157/158 and targeted at mobile and other resource constrained 12 | environments. 13 | 14 | --- 15 | 16 | # whoami 17 | 18 | *@cloudhead* 19 | 20 | Got into Bitcoin in 2017. Interested in light clients and user sovereignty. 21 | 22 | Most of my time goes into Radicle, a peer-to-peer code collaboration stack. 23 | 24 | --- 25 | 26 | # Why? 27 | 28 | * Because an increasing amount of users are on mobile. 29 | * Because decentralization is important. 30 | * Because running a full node is not for everyone. 31 | 32 | --- 33 | 34 | # Mapping the space 35 | 36 | ``` 37 | Electrum <--------------------------------------------------> Full node 38 | BIP 37 BIP 157 Utreexo 39 | 👍 efficiency 👎 efficiency 40 | 👎 reliability 👍 reliability 41 | 👎 privacy 👍 privacy 42 | ``` 43 | 44 | --- 45 | 46 | # Mapping the space 47 | 48 | ## BIP 37: Bloom filters 49 | 50 | * User computes own per-peer filter and sends to peers 51 | * Peers send filtered blocks with some false positives, plus proofs 52 | * User checks transaction inclusion in filtered blocks 53 | 54 | ## BIP 157: Client-side filtering 55 | 56 | * Full nodes compute generalized per-block filters 57 | * User downloads all filters starting from wallet birth height 58 | * User checks transaction inclusion locally 59 | * If there's a match, user asks random peer for full block and updates UTXOs 60 | 61 | --- 62 | 63 | # Mapping the space 64 | 65 | ## BIP 37: Bloom filters 66 | 67 | A critical design flaw means that if two or more filters are collected by 68 | an adversary, the intersection can be computed. 69 | 70 | The flaws of BIP 37 and the lack of alternative for a long time has lead 71 | to a migration to trusted third parties, ie. vendor-supplied Electrum 72 | endpoints. A very small minority of users run their own nodes. 73 | 74 | --- 75 | 76 | ## Electrum 77 | 78 | * Uses SPV proof of inclusion. 79 | * Leaks transactions and associated IP address. 80 | * Leaks public keys and/or addresses. 81 | * Can lie by omission. 82 | 83 | This is the default in most bitcoin & lightning wallets today. Until 84 | recently, the only alternative was to run your own node or deal with 85 | the broken privacy of BIP 37. 86 | 87 | --- 88 | 89 | ## Neutrino 90 | 91 | A few years ago, lightning labs developed a new type of client called 92 | *neutrino* and specified its protocol under BIP 157 and 158. 93 | 94 | In 2021, support was added to bitcoin core (`0.21`). 95 | 96 | Allows lightning nodes to operate without a full node. 97 | 98 | --- 99 | 100 | # Defaults 101 | 102 | *First time users need good defaults.* 103 | 104 | This means an acceptable trade-off between sync speed, resource usage, 105 | trust minimization and privacy. There's no one-size-fits-all. 106 | 107 | *Experienced users need optionality.* 108 | 109 | This means means more privacy-conscious users will opt for different 110 | solutions to performance-conscious users. 111 | 112 | --- 113 | 114 | ## BIP 157/8 115 | 116 | * Has a "one honest peer" assumption. 117 | * Very good privacy (only leak the blocks you're interested in). 118 | * No header commitment to the compact filter, so a good address book is important. 119 | 120 | --- 121 | 122 | ## BIP 157/8 123 | 124 | * `~20MB` compact filter header chain. 125 | * `~20KB/block` median compact filter size 126 | 127 | Compact filters are usually not stored for a long time by light clients. 128 | 129 | *Trades bandwidth for privacy.* 130 | 131 | --- 132 | 133 | ## Nakamoto: Example 134 | 135 | ```rust 136 | // The network reactor we're going to use. 137 | type Reactor = nakamoto::net::poll::Reactor; 138 | 139 | // Create a client using the above network reactor. 140 | let client = Client::::new()?; 141 | let handle = client.handle(); 142 | 143 | // Run the client on a different thread, to not block the main thread. 144 | thread::spawn(|| client.run(Config::new(Network::Testnet)).unwrap()); 145 | 146 | // Wait for the client to be connected to a peer. 147 | handle.wait_for_peers(1, Services::default())?; 148 | 149 | // … Wallet code goes here … 150 | ``` 151 | --- 152 | 153 | ## Nakamoto: Architecture 154 | 155 | * Client doesn't spawn any threads, very small footprint 156 | * API is event-based with a command channel 157 | * Networking I/O is cleanly separated from protocol code 158 | * Allows the networking backend to be swapped out 159 | * Protocol code is fully deterministic 160 | * Allows for *discrete event simulation* 161 | * Ships with a simple *poll*-based network reactor 162 | * Minimal dependencies 163 | 164 | --- 165 | 166 | ## Nakamoto: API 167 | 168 | ```rust 169 | trait Handle { 170 | // Wait for the node to be ready and in sync with the blockchain. 171 | fn wait_for_ready(&self) -> Result<(), Error>; 172 | // Rescan the blockchain for matching scripts. 173 | fn rescan( 174 | &self, 175 | range: impl RangeBounds, 176 | watch: impl Iterator 177 | ) -> Result<(), Error>; 178 | // Submit a transaction to the network. 179 | fn submit_transaction(&self, tx: Transaction) -> Result<(), Error>; 180 | // Listen on events. 181 | fn events(&self) -> chan::Receiver; 182 | } 183 | ``` 184 | --- 185 | 186 | ## Nakamoto: API Events 187 | 188 | ```rust 189 | enum Event { 190 | // Ready to process peer events and start receiving commands. 191 | Ready { … }, 192 | 193 | // Peer events. 194 | PeerConnected { … }, 195 | PeerDisconnected { … }, 196 | PeerNegotiated { … }, 197 | PeerHeightUpdated { … }, 198 | 199 | // Block events. 200 | BlockConnected { … }, 201 | BlockDisconnected { … }, 202 | BlockMatched { transactions: Vec, … }, 203 | 204 | // The status of a transaction has changed. 205 | TxStatusChanged { … } 206 | // Compact filters have been synced up to this point. 207 | Synced { … }, 208 | } 209 | ``` 210 | --- 211 | 212 | # What's next? 213 | 214 | ## Already stable 215 | 216 | * Peer-to-peer networking, handshake, connection management 217 | * Peer stochastic address selection 218 | * Block header sync and verification 219 | * Transaction management 220 | * Compact filter fetching, caching and matching (BIP 157) 221 | 222 | ## Work in progress 223 | 224 | * Filter verification protocol to prevent lies by omission 225 | * Address derivation support in filter scan 226 | 227 | --- 228 | 229 | # Thank you. 230 | 231 | 232 | 233 | *@cloudhead* on twitter, github etc. 234 | 235 | -------------------------------------------------------------------------------- /common/src/network.rs: -------------------------------------------------------------------------------- 1 | //! Bitcoin peer network. Eg. *Mainnet*. 2 | use std::str::FromStr; 3 | 4 | use bitcoin::blockdata::block::{Block, BlockHeader}; 5 | use bitcoin::consensus::params::Params; 6 | use bitcoin::hash_types::BlockHash; 7 | use bitcoin::hashes::hex::FromHex; 8 | use bitcoin::network::constants::ServiceFlags; 9 | 10 | use bitcoin_hashes::sha256d; 11 | 12 | use crate::block::Height; 13 | 14 | /// Peer services supported by nakamoto. 15 | #[derive(Debug, Copy, Clone, Default)] 16 | pub enum Services { 17 | /// Peers with compact filter support. 18 | #[default] 19 | All, 20 | /// Peers with only block support. 21 | Chain, 22 | } 23 | 24 | impl From for ServiceFlags { 25 | fn from(value: Services) -> Self { 26 | match value { 27 | Services::All => Self::COMPACT_FILTERS | Self::NETWORK, 28 | Services::Chain => Self::NETWORK, 29 | } 30 | } 31 | } 32 | 33 | /// Bitcoin peer network. 34 | #[derive(Debug, Copy, Clone)] 35 | pub enum Network { 36 | /// Bitcoin Mainnet. 37 | Mainnet, 38 | /// Bitcoin Testnet. 39 | Testnet, 40 | /// Bitcoin regression test net. 41 | Regtest, 42 | /// Bitcoin signet. 43 | Signet, 44 | } 45 | 46 | impl Default for Network { 47 | fn default() -> Self { 48 | Self::Mainnet 49 | } 50 | } 51 | 52 | impl FromStr for Network { 53 | type Err = String; 54 | 55 | fn from_str(s: &str) -> Result { 56 | match s { 57 | "mainnet" | "bitcoin" => Ok(Self::Mainnet), 58 | "testnet" => Ok(Self::Testnet), 59 | "regtest" => Ok(Self::Regtest), 60 | "signet" => Ok(Self::Signet), 61 | _ => Err(format!("invalid network specified {:?}", s)), 62 | } 63 | } 64 | } 65 | 66 | impl From for bitcoin::Network { 67 | fn from(value: Network) -> Self { 68 | match value { 69 | Network::Mainnet => Self::Bitcoin, 70 | Network::Testnet => Self::Testnet, 71 | Network::Regtest => Self::Regtest, 72 | Network::Signet => Self::Signet, 73 | } 74 | } 75 | } 76 | 77 | impl From for Network { 78 | fn from(value: bitcoin::Network) -> Self { 79 | match value { 80 | bitcoin::Network::Bitcoin => Self::Mainnet, 81 | bitcoin::Network::Testnet => Self::Testnet, 82 | bitcoin::Network::Signet => Self::Signet, 83 | bitcoin::Network::Regtest => Self::Regtest, 84 | } 85 | } 86 | } 87 | 88 | impl Network { 89 | /// Return the default listen port for the network. 90 | pub fn port(&self) -> u16 { 91 | match self { 92 | Network::Mainnet => 8333, 93 | Network::Testnet => 18333, 94 | Network::Regtest => 18334, 95 | Network::Signet => 38333, 96 | } 97 | } 98 | 99 | /// Blockchain checkpoints. 100 | pub fn checkpoints(&self) -> Box> { 101 | use crate::block::checkpoints; 102 | 103 | let iter = match self { 104 | Network::Mainnet => checkpoints::MAINNET, 105 | Network::Testnet => checkpoints::TESTNET, 106 | Network::Regtest => checkpoints::REGTEST, 107 | Network::Signet => checkpoints::SIGNET, 108 | } 109 | .iter() 110 | .cloned() 111 | .map(|(height, hash)| { 112 | let hash = BlockHash::from_hex(hash).unwrap(); 113 | (height, hash) 114 | }); 115 | 116 | Box::new(iter) 117 | } 118 | 119 | /// Return the short string representation of this network. 120 | pub fn as_str(&self) -> &'static str { 121 | match self { 122 | Network::Mainnet => "mainnet", 123 | Network::Testnet => "testnet", 124 | Network::Regtest => "regtest", 125 | Network::Signet => "signet", 126 | } 127 | } 128 | 129 | /// DNS seeds. Used to bootstrap the client's address book. 130 | pub fn seeds(&self) -> &[&str] { 131 | match self { 132 | Network::Mainnet => &[ 133 | "seed.bitcoin.sipa.be", // Pieter Wuille 134 | "dnsseed.bluematt.me", // Matt Corallo 135 | "dnsseed.bitcoin.dashjr.org", // Luke Dashjr 136 | "seed.bitcoinstats.com", // Christian Decker 137 | "seed.bitcoin.jonasschnelli.ch", // Jonas Schnelli 138 | "seed.btc.petertodd.org", // Peter Todd 139 | "seed.bitcoin.sprovoost.nl", // Sjors Provoost 140 | "dnsseed.emzy.de", // Stephan Oeste 141 | "seed.bitcoin.wiz.biz", // Jason Maurice 142 | "seed.cloudhead.io", // Alexis Sellier 143 | ], 144 | Network::Testnet => &[ 145 | "testnet-seed.bitcoin.jonasschnelli.ch", 146 | "seed.tbtc.petertodd.org", 147 | "seed.testnet.bitcoin.sprovoost.nl", 148 | "testnet-seed.bluematt.me", 149 | ], 150 | Network::Regtest => &[], // No seeds 151 | Network::Signet => &["seed.signet.bitcoin.sprovoost.nl"], 152 | } 153 | } 154 | } 155 | 156 | impl Network { 157 | /// Get the genesis block header. 158 | /// 159 | /// ``` 160 | /// use nakamoto_common::network::Network; 161 | /// 162 | /// let network = Network::Mainnet; 163 | /// let genesis = network.genesis(); 164 | /// 165 | /// assert_eq!(network.genesis_hash(), genesis.block_hash()); 166 | /// ``` 167 | pub fn genesis(&self) -> BlockHeader { 168 | self.genesis_block().header 169 | } 170 | 171 | /// Get the genesis block. 172 | pub fn genesis_block(&self) -> Block { 173 | use bitcoin::blockdata::constants; 174 | 175 | constants::genesis_block((*self).into()) 176 | } 177 | 178 | /// Get the hash of the genesis block of this network. 179 | pub fn genesis_hash(&self) -> BlockHash { 180 | use crate::block::genesis; 181 | use bitcoin_hashes::Hash; 182 | 183 | let hash = match self { 184 | Self::Mainnet => genesis::MAINNET, 185 | Self::Testnet => genesis::TESTNET, 186 | Self::Regtest => genesis::REGTEST, 187 | Self::Signet => genesis::SIGNET, 188 | }; 189 | BlockHash::from_hash( 190 | sha256d::Hash::from_slice(hash) 191 | .expect("the genesis hash has the right number of bytes"), 192 | ) 193 | } 194 | 195 | /// Get the consensus parameters for this network. 196 | pub fn params(&self) -> Params { 197 | Params::new((*self).into()) 198 | } 199 | 200 | /// Get the network magic number for this network. 201 | pub fn magic(&self) -> u32 { 202 | bitcoin::Network::from(*self).magic() 203 | } 204 | } 205 | -------------------------------------------------------------------------------- /p2p/src/fsm/pingmgr.rs: -------------------------------------------------------------------------------- 1 | //! Ping manager. 2 | //! 3 | //! Detects dead peer connections and responds to peer `ping` messages. 4 | //! 5 | //! *Implementation of BIP 0031.* 6 | //! 7 | use std::collections::VecDeque; 8 | use std::net; 9 | 10 | use nakamoto_common::bitcoin::network::message::NetworkMessage; 11 | use nakamoto_common::block::time::{Clock, LocalDuration, LocalTime}; 12 | use nakamoto_common::collections::HashMap; 13 | 14 | use crate::fsm::PeerId; 15 | 16 | use super::{ 17 | output::{Io, Outbox}, 18 | DisconnectReason, Event, 19 | }; 20 | 21 | /// Time interval to wait between sent pings. 22 | pub const PING_INTERVAL: LocalDuration = LocalDuration::from_mins(2); 23 | /// Time to wait to receive a pong when sending a ping. 24 | pub const PING_TIMEOUT: LocalDuration = LocalDuration::from_secs(30); 25 | 26 | /// Maximum number of latencies recorded per peer. 27 | const MAX_RECORDED_LATENCIES: usize = 64; 28 | 29 | #[derive(Debug)] 30 | enum State { 31 | AwaitingPong { nonce: u64, since: LocalTime }, 32 | Idle { since: LocalTime }, 33 | } 34 | 35 | #[derive(Debug)] 36 | struct Peer { 37 | address: net::SocketAddr, 38 | state: State, 39 | /// Observed round-trip latencies for this peer. 40 | latencies: VecDeque, 41 | } 42 | 43 | impl Peer { 44 | /// Calculate the average latency of this peer. 45 | #[allow(dead_code)] 46 | fn latency(&self) -> LocalDuration { 47 | let sum: LocalDuration = self.latencies.iter().sum(); 48 | 49 | sum / self.latencies.len() as u32 50 | } 51 | 52 | fn record_latency(&mut self, sample: LocalDuration) { 53 | self.latencies.push_front(sample); 54 | self.latencies.truncate(MAX_RECORDED_LATENCIES); 55 | } 56 | } 57 | 58 | /// Detects dead peer connections. 59 | #[derive(Debug)] 60 | pub struct PingManager { 61 | peers: HashMap, 62 | ping_timeout: LocalDuration, 63 | /// Random number generator. 64 | rng: fastrand::Rng, 65 | outbox: Outbox, 66 | clock: C, 67 | } 68 | 69 | impl Iterator for PingManager { 70 | type Item = Io; 71 | 72 | fn next(&mut self) -> Option { 73 | self.outbox.next() 74 | } 75 | } 76 | 77 | impl PingManager { 78 | /// Create a new ping manager. 79 | pub fn new(ping_timeout: LocalDuration, rng: fastrand::Rng, clock: C) -> Self { 80 | let peers = HashMap::with_hasher(rng.clone().into()); 81 | let outbox = Outbox::default(); 82 | 83 | Self { 84 | peers, 85 | ping_timeout, 86 | rng, 87 | outbox, 88 | clock, 89 | } 90 | } 91 | 92 | /// Event received. 93 | pub fn received_event(&mut self, event: Event, _tree: &T) { 94 | match event { 95 | Event::PeerNegotiated { addr, .. } => { 96 | self.peer_negotiated(addr); 97 | } 98 | Event::PeerDisconnected { addr, .. } => { 99 | self.peers.remove(&addr); 100 | } 101 | Event::MessageReceived { from, message } => match message.as_ref() { 102 | NetworkMessage::Ping(nonce) => { 103 | self.received_ping(from, *nonce); 104 | } 105 | NetworkMessage::Pong(nonce) => { 106 | self.received_pong(from, *nonce); 107 | } 108 | _ => {} 109 | }, 110 | _ => {} 111 | } 112 | } 113 | 114 | /// Called when a peer is negotiated. 115 | fn peer_negotiated(&mut self, address: PeerId) { 116 | let nonce = self.rng.u64(..); 117 | let now = self.clock.local_time(); 118 | 119 | self.outbox.ping(address, nonce); 120 | self.peers.insert( 121 | address, 122 | Peer { 123 | address, 124 | state: State::AwaitingPong { nonce, since: now }, 125 | latencies: VecDeque::new(), 126 | }, 127 | ); 128 | } 129 | 130 | /// Called when a tick is received. 131 | pub fn timer_expired(&mut self) { 132 | let now = self.clock.local_time(); 133 | 134 | for peer in self.peers.values_mut() { 135 | match peer.state { 136 | State::AwaitingPong { since, .. } => { 137 | // TODO: By using nonces we should be able to overlap ping messages. 138 | // This would allow us to only disconnect a peer after N ping messages 139 | // are sent in a row with no reply. 140 | // 141 | // A ping was sent and we're waiting for a `pong`. If too much 142 | // time has passed, we consider this peer dead, and disconnect 143 | // from them. 144 | if now - since >= self.ping_timeout { 145 | self.outbox 146 | .disconnect(peer.address, DisconnectReason::PeerTimeout("ping")); 147 | } 148 | } 149 | State::Idle { since } => { 150 | // We aren't waiting for any `pong`. Check whether enough time has passed since we 151 | // received the last `pong`, and if so, send a new `ping`. 152 | if now - since >= PING_INTERVAL { 153 | let nonce = self.rng.u64(..); 154 | 155 | self.outbox 156 | .ping(peer.address, nonce) 157 | .set_timer(self.ping_timeout) 158 | .set_timer(PING_INTERVAL); 159 | 160 | peer.state = State::AwaitingPong { nonce, since: now }; 161 | } 162 | } 163 | } 164 | } 165 | } 166 | 167 | /// Called when a `ping` is received. 168 | fn received_ping(&mut self, addr: PeerId, nonce: u64) -> bool { 169 | if self.peers.contains_key(&addr) { 170 | self.outbox.pong(addr, nonce); 171 | 172 | return true; 173 | } 174 | false 175 | } 176 | 177 | /// Called when a `pong` is received. 178 | fn received_pong(&mut self, addr: PeerId, nonce: u64) -> bool { 179 | if let Some(peer) = self.peers.get_mut(&addr) { 180 | let now = self.clock.local_time(); 181 | 182 | match peer.state { 183 | State::AwaitingPong { 184 | nonce: last_nonce, 185 | since, 186 | } => { 187 | if nonce == last_nonce { 188 | peer.record_latency(now - since); 189 | peer.state = State::Idle { since: now }; 190 | 191 | return true; 192 | } 193 | } 194 | // Unsolicited or redundant `pong`. Ignore. 195 | State::Idle { .. } => {} 196 | } 197 | } 198 | false 199 | } 200 | } 201 | -------------------------------------------------------------------------------- /net/poll/src/time.rs: -------------------------------------------------------------------------------- 1 | //! Time-related functionality useful for reactors. 2 | pub use nakamoto_net::time::{LocalDuration, LocalTime}; 3 | 4 | /// Manages timers and triggers timeouts. 5 | pub struct TimeoutManager { 6 | timeouts: Vec<(K, LocalTime)>, 7 | threshold: LocalDuration, 8 | } 9 | 10 | impl TimeoutManager { 11 | /// Create a new timeout manager. 12 | /// 13 | /// Takes a threshold below which two timeouts cannot overlap. 14 | pub fn new(threshold: LocalDuration) -> Self { 15 | Self { 16 | timeouts: vec![], 17 | threshold, 18 | } 19 | } 20 | 21 | /// Return the number of timeouts being tracked. 22 | pub fn len(&self) -> usize { 23 | self.timeouts.len() 24 | } 25 | 26 | /// Check whether there are timeouts being tracked. 27 | pub fn is_empty(&self) -> bool { 28 | self.timeouts.is_empty() 29 | } 30 | 31 | /// Register a new timeout with an associated key and wake-up time. 32 | /// 33 | /// ``` 34 | /// use nakamoto_net_poll::time::{LocalTime, LocalDuration, TimeoutManager}; 35 | /// 36 | /// let mut tm = TimeoutManager::new(LocalDuration::from_secs(1)); 37 | /// let now = LocalTime::now(); 38 | /// 39 | /// let registered = tm.register(0xA, now + LocalDuration::from_secs(8)); 40 | /// assert!(registered); 41 | /// 42 | /// let registered = tm.register(0xB, now + LocalDuration::from_secs(9)); 43 | /// assert!(registered); 44 | /// assert_eq!(tm.len(), 2); 45 | /// 46 | /// let registered = tm.register(0xC, now + LocalDuration::from_millis(9541)); 47 | /// assert!(!registered); 48 | /// 49 | /// let registered = tm.register(0xC, now + LocalDuration::from_millis(9999)); 50 | /// assert!(!registered); 51 | /// assert_eq!(tm.len(), 2); 52 | /// ``` 53 | pub fn register(&mut self, key: K, time: LocalTime) -> bool { 54 | // If this timeout is too close to a pre-existing timeout, 55 | // don't register it. 56 | if self 57 | .timeouts 58 | .iter() 59 | .any(|(_, t)| t.diff(time) < self.threshold) 60 | { 61 | return false; 62 | } 63 | 64 | self.timeouts.push((key, time)); 65 | self.timeouts.sort_unstable_by(|(_, a), (_, b)| b.cmp(a)); 66 | 67 | true 68 | } 69 | 70 | /// Get the minimum time duration we should wait for at least one timeout 71 | /// to be reached. Returns `None` if there are no timeouts. 72 | /// 73 | /// ``` 74 | /// use nakamoto_net_poll::time::{LocalTime, LocalDuration, TimeoutManager}; 75 | /// 76 | /// let mut tm = TimeoutManager::new(LocalDuration::from_secs(0)); 77 | /// let mut now = LocalTime::now(); 78 | /// 79 | /// tm.register(0xA, now + LocalDuration::from_millis(16)); 80 | /// tm.register(0xB, now + LocalDuration::from_millis(8)); 81 | /// tm.register(0xC, now + LocalDuration::from_millis(64)); 82 | /// 83 | /// // We need to wait 8 millis to trigger the next timeout (1). 84 | /// assert!(tm.next(now) <= Some(LocalDuration::from_millis(8))); 85 | /// 86 | /// // ... sleep for a millisecond ... 87 | /// now.elapse(LocalDuration::from_millis(1)); 88 | /// 89 | /// // Now we don't need to wait as long! 90 | /// assert!(tm.next(now).unwrap() <= LocalDuration::from_millis(7)); 91 | /// ``` 92 | pub fn next(&self, now: impl Into) -> Option { 93 | let now = now.into(); 94 | 95 | self.timeouts.last().map(|(_, t)| { 96 | if *t >= now { 97 | *t - now 98 | } else { 99 | LocalDuration::from_secs(0) 100 | } 101 | }) 102 | } 103 | 104 | /// Given the current time, populate the input vector with the keys that 105 | /// have timed out. Returns the number of keys that timed out. 106 | /// 107 | /// ``` 108 | /// use nakamoto_net_poll::time::{LocalTime, LocalDuration, TimeoutManager}; 109 | /// 110 | /// let mut tm = TimeoutManager::new(LocalDuration::from_secs(0)); 111 | /// let now = LocalTime::now(); 112 | /// 113 | /// tm.register(0xA, now + LocalDuration::from_millis(8)); 114 | /// tm.register(0xB, now + LocalDuration::from_millis(16)); 115 | /// tm.register(0xC, now + LocalDuration::from_millis(64)); 116 | /// tm.register(0xD, now + LocalDuration::from_millis(72)); 117 | /// 118 | /// let mut timeouts = Vec::new(); 119 | /// 120 | /// assert_eq!(tm.wake(now + LocalDuration::from_millis(21), &mut timeouts), 2); 121 | /// assert_eq!(timeouts, vec![0xA, 0xB]); 122 | /// assert_eq!(tm.len(), 2); 123 | /// ``` 124 | pub fn wake(&mut self, now: LocalTime, woken: &mut Vec) -> usize { 125 | let before = woken.len(); 126 | 127 | while let Some((k, t)) = self.timeouts.pop() { 128 | if now >= t { 129 | woken.push(k); 130 | } else { 131 | self.timeouts.push((k, t)); 132 | break; 133 | } 134 | } 135 | woken.len() - before 136 | } 137 | } 138 | 139 | #[cfg(test)] 140 | mod tests { 141 | use super::*; 142 | use quickcheck_macros::quickcheck; 143 | 144 | #[quickcheck] 145 | fn properties(timeouts: Vec, threshold: u64) -> bool { 146 | let threshold = LocalDuration::from_secs(threshold); 147 | let mut tm = TimeoutManager::new(threshold); 148 | let mut now = LocalTime::now(); 149 | 150 | for t in timeouts { 151 | tm.register(t, now + LocalDuration::from_secs(t)); 152 | } 153 | 154 | let mut woken = Vec::new(); 155 | while let Some(delta) = tm.next(now) { 156 | now.elapse(delta); 157 | assert!(tm.wake(now, &mut woken) > 0); 158 | } 159 | 160 | let sorted = woken.windows(2).all(|w| w[0] <= w[1]); 161 | let granular = woken.windows(2).all(|w| w[1] - w[0] >= threshold.as_secs()); 162 | 163 | sorted && granular 164 | } 165 | 166 | #[test] 167 | fn test_wake() { 168 | let mut tm = TimeoutManager::new(LocalDuration::from_secs(0)); 169 | let now = LocalTime::now(); 170 | 171 | tm.register(0xA, now + LocalDuration::from_millis(8)); 172 | tm.register(0xB, now + LocalDuration::from_millis(16)); 173 | tm.register(0xC, now + LocalDuration::from_millis(64)); 174 | tm.register(0xD, now + LocalDuration::from_millis(72)); 175 | 176 | let mut timeouts = Vec::new(); 177 | 178 | assert_eq!(tm.wake(now, &mut timeouts), 0); 179 | assert_eq!(timeouts, vec![]); 180 | assert_eq!(tm.len(), 4); 181 | assert_eq!( 182 | tm.wake(now + LocalDuration::from_millis(9), &mut timeouts), 183 | 1 184 | ); 185 | assert_eq!(timeouts, vec![0xA]); 186 | assert_eq!(tm.len(), 3, "one timeout has expired"); 187 | 188 | timeouts.clear(); 189 | 190 | assert_eq!( 191 | tm.wake(now + LocalDuration::from_millis(66), &mut timeouts), 192 | 2 193 | ); 194 | assert_eq!(timeouts, vec![0xB, 0xC]); 195 | assert_eq!(tm.len(), 1, "another two timeouts have expired"); 196 | 197 | timeouts.clear(); 198 | 199 | assert_eq!( 200 | tm.wake(now + LocalDuration::from_millis(96), &mut timeouts), 201 | 1 202 | ); 203 | assert_eq!(timeouts, vec![0xD]); 204 | assert!(tm.is_empty(), "all timeouts have expired"); 205 | } 206 | } 207 | -------------------------------------------------------------------------------- /net/src/time.rs: -------------------------------------------------------------------------------- 1 | use std::sync::atomic; 2 | use std::time::{SystemTime, UNIX_EPOCH}; 3 | 4 | /// Local time. 5 | /// 6 | /// This clock is monotonic. 7 | #[derive(Debug, PartialEq, Eq, Clone, Copy, Ord, PartialOrd, Default)] 8 | pub struct LocalTime { 9 | /// Milliseconds since Epoch. 10 | millis: u128, 11 | } 12 | 13 | impl std::fmt::Display for LocalTime { 14 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 15 | write!(f, "{}", self.as_secs()) 16 | } 17 | } 18 | 19 | impl LocalTime { 20 | /// Construct a local time from the current system time. 21 | pub fn now() -> Self { 22 | static LAST: atomic::AtomicU64 = atomic::AtomicU64::new(0); 23 | 24 | let now = Self::from(SystemTime::now()).as_secs(); 25 | let last = LAST.load(atomic::Ordering::SeqCst); 26 | 27 | // If the current time is in the past, return the last recorded time instead. 28 | if now < last { 29 | Self::from_secs(last) 30 | } else { 31 | LAST.store(now, atomic::Ordering::SeqCst); 32 | LocalTime::from_secs(now) 33 | } 34 | } 35 | 36 | /// Construct a local time from whole seconds since Epoch. 37 | pub const fn from_secs(secs: u64) -> Self { 38 | Self { 39 | millis: secs as u128 * 1000, 40 | } 41 | } 42 | 43 | /// Construct a local time to whole seconds since Epoch. 44 | pub fn as_secs(&self) -> u64 { 45 | (self.millis / 1000).try_into().unwrap() 46 | } 47 | 48 | /// Get the duration since the given time. 49 | /// 50 | /// # Panics 51 | /// 52 | /// This function will panic if `earlier` is later than `self`. 53 | pub fn duration_since(&self, earlier: LocalTime) -> LocalDuration { 54 | LocalDuration::from_millis( 55 | self.millis 56 | .checked_sub(earlier.millis) 57 | .expect("supplied time is later than self"), 58 | ) 59 | } 60 | 61 | /// Get the difference between two times. 62 | pub fn diff(&self, other: LocalTime) -> LocalDuration { 63 | if self > &other { 64 | self.duration_since(other) 65 | } else { 66 | other.duration_since(*self) 67 | } 68 | } 69 | 70 | /// Elapse time. 71 | /// 72 | /// Adds the given duration to the time. 73 | pub fn elapse(&mut self, duration: LocalDuration) { 74 | self.millis += duration.as_millis() 75 | } 76 | } 77 | 78 | /// Convert a `SystemTime` into a local time. 79 | impl From for LocalTime { 80 | fn from(system: SystemTime) -> Self { 81 | let millis = system.duration_since(UNIX_EPOCH).unwrap().as_millis(); 82 | 83 | Self { millis } 84 | } 85 | } 86 | 87 | /// Substract two local times. Yields a duration. 88 | impl std::ops::Sub for LocalTime { 89 | type Output = LocalDuration; 90 | 91 | fn sub(self, other: LocalTime) -> LocalDuration { 92 | LocalDuration(self.millis.saturating_sub(other.millis)) 93 | } 94 | } 95 | 96 | /// Substract a duration from a local time. Yields a local time. 97 | impl std::ops::Sub for LocalTime { 98 | type Output = LocalTime; 99 | 100 | fn sub(self, other: LocalDuration) -> LocalTime { 101 | LocalTime { 102 | millis: self.millis - other.0, 103 | } 104 | } 105 | } 106 | 107 | /// Add a duration to a local time. Yields a local time. 108 | impl std::ops::Add for LocalTime { 109 | type Output = LocalTime; 110 | 111 | fn add(self, other: LocalDuration) -> LocalTime { 112 | LocalTime { 113 | millis: self.millis + other.0, 114 | } 115 | } 116 | } 117 | 118 | /// Time duration as measured locally. 119 | #[derive(Debug, Copy, Clone, PartialOrd, Ord, PartialEq, Eq)] 120 | pub struct LocalDuration(u128); 121 | 122 | impl LocalDuration { 123 | /// The time interval between blocks. The "block time". 124 | pub const BLOCK_INTERVAL: LocalDuration = Self::from_mins(10); 125 | 126 | /// Maximum duration. 127 | pub const MAX: LocalDuration = LocalDuration(u128::MAX); 128 | 129 | /// Create a new duration from whole seconds. 130 | pub const fn from_secs(secs: u64) -> Self { 131 | Self(secs as u128 * 1000) 132 | } 133 | 134 | /// Create a new duration from whole minutes. 135 | pub const fn from_mins(mins: u64) -> Self { 136 | Self::from_secs(mins * 60) 137 | } 138 | 139 | /// Construct a new duration from milliseconds. 140 | pub const fn from_millis(millis: u128) -> Self { 141 | Self(millis) 142 | } 143 | 144 | /// Return the number of minutes in this duration. 145 | pub const fn as_mins(&self) -> u64 { 146 | self.as_secs() / 60 147 | } 148 | 149 | /// Return the number of seconds in this duration. 150 | pub const fn as_secs(&self) -> u64 { 151 | (self.0 / 1000) as u64 152 | } 153 | 154 | /// Return the number of milliseconds in this duration. 155 | pub const fn as_millis(&self) -> u128 { 156 | self.0 157 | } 158 | } 159 | 160 | impl std::fmt::Display for LocalDuration { 161 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 162 | if self.as_millis() < 1000 { 163 | write!(f, "{} millisecond(s)", self.as_millis()) 164 | } else if self.as_secs() < 60 { 165 | let fraction = self.as_millis() % 1000; 166 | if fraction > 0 { 167 | write!(f, "{}.{} second(s)", self.as_secs(), fraction) 168 | } else { 169 | write!(f, "{} second(s)", self.as_secs()) 170 | } 171 | } else if self.as_mins() < 60 { 172 | let fraction = self.as_secs() % 60; 173 | if fraction > 0 { 174 | write!( 175 | f, 176 | "{:.2} minutes(s)", 177 | self.as_mins() as f64 + (fraction as f64 / 60.) 178 | ) 179 | } else { 180 | write!(f, "{} minutes(s)", self.as_mins()) 181 | } 182 | } else { 183 | let fraction = self.as_mins() % 60; 184 | if fraction > 0 { 185 | write!(f, "{:.2} hour(s)", self.as_mins() as f64 / 60.) 186 | } else { 187 | write!(f, "{} hour(s)", self.as_mins() / 60) 188 | } 189 | } 190 | } 191 | } 192 | 193 | impl<'a> std::iter::Sum<&'a LocalDuration> for LocalDuration { 194 | fn sum>(iter: I) -> LocalDuration { 195 | let mut total: u128 = 0; 196 | 197 | for entry in iter { 198 | total = total 199 | .checked_add(entry.0) 200 | .expect("iter::sum should not overflow"); 201 | } 202 | Self(total) 203 | } 204 | } 205 | 206 | impl std::ops::Add for LocalDuration { 207 | type Output = LocalDuration; 208 | 209 | fn add(self, other: LocalDuration) -> LocalDuration { 210 | LocalDuration(self.0 + other.0) 211 | } 212 | } 213 | 214 | impl std::ops::Div for LocalDuration { 215 | type Output = LocalDuration; 216 | 217 | fn div(self, other: u32) -> LocalDuration { 218 | LocalDuration(self.0 / other as u128) 219 | } 220 | } 221 | 222 | impl std::ops::Mul for LocalDuration { 223 | type Output = LocalDuration; 224 | 225 | fn mul(self, other: u64) -> LocalDuration { 226 | LocalDuration(self.0 * other as u128) 227 | } 228 | } 229 | 230 | impl From for std::time::Duration { 231 | fn from(other: LocalDuration) -> Self { 232 | std::time::Duration::from_millis(other.0 as u64) 233 | } 234 | } 235 | -------------------------------------------------------------------------------- /client/src/handle.rs: -------------------------------------------------------------------------------- 1 | //! Node handles are created from nodes by users of the library, to communicate with the underlying 2 | //! protocol instance. 3 | use std::net; 4 | use std::ops::{RangeBounds, RangeInclusive}; 5 | 6 | use crossbeam_channel as chan; 7 | use thiserror::Error; 8 | 9 | use nakamoto_common::bitcoin::network::constants::ServiceFlags; 10 | use nakamoto_common::bitcoin::network::Address; 11 | use nakamoto_common::bitcoin::util::uint::Uint256; 12 | use nakamoto_common::bitcoin::{Script, Txid}; 13 | 14 | use nakamoto_common::bitcoin::network::message::NetworkMessage; 15 | use nakamoto_common::block::filter::BlockFilter; 16 | use nakamoto_common::block::tree::{BlockReader, ImportResult}; 17 | use nakamoto_common::block::{self, Block, BlockHash, BlockHeader, Height, Transaction}; 18 | use nakamoto_common::nonempty::NonEmpty; 19 | use nakamoto_p2p::fsm::Link; 20 | use nakamoto_p2p::fsm::{self, Command, CommandError, Event, GetFiltersError, Peer}; 21 | 22 | /// An error resulting from a handle method. 23 | #[derive(Error, Debug)] 24 | pub enum Error { 25 | /// The command channel disconnected. 26 | #[error("command channel disconnected")] 27 | Disconnected, 28 | /// The command returned an error. 29 | #[error("command failed: {0}")] 30 | Command(#[from] CommandError), 31 | /// Failed to fetch filters. 32 | #[error("failed to get filters: {0}")] 33 | GetFilters(#[from] GetFiltersError), 34 | /// The operation timed out. 35 | #[error("the operation timed out")] 36 | Timeout, 37 | /// An I/O error occured. 38 | #[error(transparent)] 39 | Io(#[from] std::io::Error), 40 | } 41 | 42 | impl From for Error { 43 | fn from(_: chan::RecvError) -> Self { 44 | Self::Disconnected 45 | } 46 | } 47 | 48 | impl From for Error { 49 | fn from(err: chan::RecvTimeoutError) -> Self { 50 | match err { 51 | chan::RecvTimeoutError::Timeout => Self::Timeout, 52 | chan::RecvTimeoutError::Disconnected => Self::Disconnected, 53 | } 54 | } 55 | } 56 | 57 | impl From> for Error { 58 | fn from(_: chan::SendError) -> Self { 59 | Self::Disconnected 60 | } 61 | } 62 | 63 | /// A handle for communicating with a node process. 64 | pub trait Handle: Sized + Send + Sync + Clone { 65 | /// Get the tip of the active chain. Returns the height of the chain, the header, 66 | /// and the total accumulated work. 67 | fn get_tip(&self) -> Result<(Height, BlockHeader, Uint256), Error>; 68 | /// Get a block header from the block header cache. 69 | fn get_block(&self, hash: &BlockHash) -> Result, Error>; 70 | /// Get a block header by height, from the block header cache. 71 | fn get_block_by_height(&self, height: Height) -> Result, Error>; 72 | /// Query the local block tree using the given function. To return results from 73 | /// the query function, a [channel](`crate::chan`) may be used. 74 | fn query_tree( 75 | &self, 76 | query: impl Fn(&dyn BlockReader) + Send + Sync + 'static, 77 | ) -> Result<(), Error>; 78 | /// Find a branch from the active chain to the given (stale) block. 79 | /// 80 | /// See [BlockReader::find_branch](`nakamoto_common::block::tree::BlockReader::find_branch`). 81 | fn find_branch(&self, to: &BlockHash) 82 | -> Result)>, Error>; 83 | 84 | /// Request a full block from the network. The block will be sent over the channel created 85 | /// by [`Handle::blocks`] once received. 86 | fn request_block(&self, hash: &BlockHash) -> Result<(), Error>; 87 | /// Request compact filters from the network. The filters will be sent over the channel created 88 | /// by [`Handle::filters`] as they are received. 89 | fn request_filters(&self, range: RangeInclusive) -> Result<(), Error>; 90 | 91 | /// Subscribe to blocks received. 92 | fn blocks(&self) -> chan::Receiver<(Block, Height)>; 93 | /// Subscribe to compact filters received. 94 | fn filters(&self) -> chan::Receiver<(BlockFilter, BlockHash, Height)>; 95 | /// Subscribe to client events. 96 | fn events(&self) -> chan::Receiver; 97 | 98 | /// Send a command to the client. 99 | fn command(&self, cmd: Command) -> Result<(), Error>; 100 | /// Rescan the blockchain for matching scripts. 101 | /// 102 | /// If a "reorg" takes place, filters up to the start of the provided range 103 | /// will be re-fetched and scanned. 104 | fn rescan( 105 | &self, 106 | range: impl RangeBounds, 107 | watch: impl Iterator, 108 | ) -> Result<(), Error> { 109 | // TODO: Handle invalid/empty ranges. 110 | 111 | let from = range.start_bound().cloned(); 112 | let to = range.end_bound().cloned(); 113 | 114 | self.command(Command::Rescan { 115 | from, 116 | to, 117 | watch: watch.collect(), 118 | })?; 119 | 120 | Ok(()) 121 | } 122 | /// Update the watchlist with the provided scripts. 123 | /// 124 | /// Note that this won't trigger a rescan of any existing blocks. To avoid 125 | /// missing matching blocks, always watch scripts before sharing their 126 | /// corresponding address. 127 | fn watch(&self, watch: impl Iterator) -> Result<(), Error> { 128 | self.command(Command::Watch { 129 | watch: watch.collect(), 130 | })?; 131 | 132 | Ok(()) 133 | } 134 | /// Broadcast a message to peers matching the predicate. 135 | /// To only broadcast to outbound peers, use [`Peer::is_outbound`]. 136 | fn broadcast( 137 | &self, 138 | msg: NetworkMessage, 139 | predicate: fn(Peer) -> bool, 140 | ) -> Result, Error>; 141 | /// Connect to the designated peer address. 142 | fn connect(&self, addr: net::SocketAddr) -> Result; 143 | /// Disconnect from the designated peer address. 144 | fn disconnect(&self, addr: net::SocketAddr) -> Result<(), Error>; 145 | /// Submit a transaction to the network. 146 | /// 147 | /// Returns the peer(s) the transaction was announced to, or an error if no peers were found. 148 | fn submit_transaction(&self, tx: Transaction) -> Result, Error>; 149 | /// Return a transaction that was propagated by the client. 150 | fn get_submitted_transaction(&self, txid: &Txid) -> Result, Error>; 151 | /// Import block headers into the node. 152 | /// This may cause the node to broadcast header or inventory messages to its peers. 153 | fn import_headers( 154 | &self, 155 | headers: Vec, 156 | ) -> Result, Error>; 157 | /// Import peer addresses into the node's address book. 158 | fn import_addresses(&self, addrs: Vec
) -> Result<(), Error>; 159 | /// Wait for the given predicate to be fulfilled. 160 | fn wait Option, T>(&self, f: F) -> Result; 161 | /// Wait for a given number of peers to be connected with the given services. 162 | fn wait_for_peers( 163 | &self, 164 | count: usize, 165 | required_services: impl Into, 166 | ) -> Result, Error>; 167 | /// Wait for the node's active chain to reach a certain height. The hash at that height 168 | /// is returned. 169 | fn wait_for_height(&self, h: Height) -> Result; 170 | /// Shutdown the node process. 171 | fn shutdown(self) -> Result<(), Error>; 172 | } 173 | -------------------------------------------------------------------------------- /p2p/src/fsm/fees.rs: -------------------------------------------------------------------------------- 1 | //! Types and utilities related to transaction fees and fee rates. 2 | use std::collections::VecDeque; 3 | 4 | use nakamoto_common::bitcoin::blockdata::constants::WITNESS_SCALE_FACTOR; 5 | use nakamoto_common::bitcoin::{Block, OutPoint, Transaction, TxOut}; 6 | 7 | use nakamoto_common::collections::HashMap; 8 | use nakamoto_common::nonempty::NonEmpty; 9 | 10 | use super::Height; 11 | 12 | // TODO: Prune UTXO set so that it doesn't grow indefinitely. 13 | 14 | /// Maximum depth of a re-org that we are able to handle. 15 | pub const MAX_UTXO_SNAPSHOTS: usize = 12; 16 | 17 | /// Transaction fee rate in satoshis/vByte. 18 | pub type FeeRate = u64; 19 | 20 | /// Fee rate estimate for a single block. 21 | /// Measured in satoshis/vByte. 22 | #[derive(Debug, Clone, PartialEq, Eq)] 23 | pub struct FeeEstimate { 24 | /// The lowest fee rate included in the block. 25 | pub low: FeeRate, 26 | /// The median fee rate of the block. 27 | pub median: FeeRate, 28 | /// The highest fee rate included in the block. 29 | pub high: FeeRate, 30 | } 31 | 32 | impl FeeEstimate { 33 | /// Calculate a fee estimate from a list of fees. 34 | /// Returns [`None`] if the list is empty. 35 | /// 36 | /// ``` 37 | /// use nakamoto_p2p::fsm::fees::FeeEstimate; 38 | /// 39 | /// assert_eq!( 40 | /// FeeEstimate::from(vec![3, 9, 2]), 41 | /// Some(FeeEstimate { low: 2, median: 3, high: 9 }), 42 | /// ); 43 | /// 44 | /// assert_eq!( 45 | /// FeeEstimate::from(vec![4, 6]), 46 | /// Some(FeeEstimate { low: 4, median: 5, high: 6 }), 47 | /// ); 48 | /// 49 | /// assert_eq!( 50 | /// FeeEstimate::from(vec![9, 2, 1, 7]), 51 | /// Some(FeeEstimate { low: 1, median: 5, high: 9 }), 52 | /// ); 53 | /// 54 | /// assert_eq!( 55 | /// FeeEstimate::from(vec![3]), 56 | /// Some(FeeEstimate { low: 3, median: 3, high: 3 }), 57 | /// ); 58 | /// 59 | /// assert_eq!(FeeEstimate::from(vec![]), None); 60 | /// ``` 61 | pub fn from(mut fees: Vec) -> Option { 62 | fees.sort_unstable(); 63 | 64 | NonEmpty::from_vec(fees).map(|fees| { 65 | let count = fees.len(); 66 | let median = if count % 2 == 1 { 67 | fees[count / 2] 68 | } else { 69 | let left = fees[count / 2 - 1] as f64; 70 | let right = fees[count / 2] as f64; 71 | 72 | ((left + right) / 2.).round() as FeeRate 73 | }; 74 | 75 | Self { 76 | low: *fees.first(), 77 | median, 78 | high: *fees.last(), 79 | } 80 | }) 81 | } 82 | } 83 | 84 | /// Set of unspent transaction outputs (UTXO). 85 | type UtxoSet = HashMap; 86 | 87 | /// Transaction fee rate estimator. 88 | #[derive(Debug, Default)] 89 | pub struct FeeEstimator { 90 | /// UTXO set. 91 | utxos: UtxoSet, 92 | /// Current (best) height. 93 | height: Height, 94 | /// UTXO set snapshots. 95 | /// These are used to return to a previous state in the case of a re-org. 96 | snapshots: VecDeque<(Height, UtxoSet)>, 97 | } 98 | 99 | impl FeeEstimator { 100 | /// Process a block and get a fee estimate. Returns [`None`] if none of the transactions 101 | /// could be processed due to missing UTXOs, or the block height isn't greater than the 102 | /// current block height of the fee estimator. 103 | pub fn process(&mut self, block: Block, height: Height) -> Option { 104 | let mut fees = Vec::new(); 105 | let snapshot = self.utxos.clone(); 106 | 107 | if height <= self.height { 108 | return None; 109 | } 110 | 111 | for tx in &block.txdata { 112 | if let Some(rate) = self.apply(tx) { 113 | fees.push(rate); 114 | } 115 | } 116 | 117 | self.snapshots.push_back((self.height, snapshot)); 118 | if self.snapshots.len() > MAX_UTXO_SNAPSHOTS { 119 | self.snapshots.pop_front(); 120 | } 121 | self.height = height; 122 | 123 | FeeEstimate::from(fees) 124 | } 125 | 126 | /// Rollback to a certain height. 127 | pub fn rollback(&mut self, height: Height) { 128 | self.snapshots.retain(|(h, _)| h <= &height); 129 | 130 | if let Some((h, snapshot)) = self.snapshots.pop_back() { 131 | assert!(h <= height); 132 | 133 | self.utxos = snapshot; 134 | self.height = h; 135 | } 136 | } 137 | 138 | /// Apply the transaction to the UTXO set and calculate the fee rate. 139 | fn apply(&mut self, tx: &Transaction) -> Option { 140 | let txid = tx.txid(); 141 | let mut received = 0; 142 | let mut sent = 0; 143 | 144 | // Look for outputs. 145 | for (vout, output) in tx.output.iter().enumerate() { 146 | let outpoint = OutPoint { 147 | txid, 148 | vout: vout as u32, 149 | }; 150 | self.utxos.insert(outpoint, output.clone()); 151 | sent += output.value; 152 | } 153 | // Since coinbase transactions have no inputs, we only process the outputs. 154 | if tx.is_coin_base() { 155 | return None; 156 | } 157 | 158 | // Look for inputs. 159 | // 160 | // Only if we have all inputs (ie. previous outputs) in our UTXO set can we calculate 161 | // the transaction fee. If one is missing, we have to bail. 162 | for input in tx.input.iter() { 163 | if let Some(out) = self.utxos.remove(&input.previous_output) { 164 | received += out.value; 165 | } else { 166 | return None; 167 | } 168 | } 169 | assert!(received >= sent, "you can't spend what you don't have",); 170 | 171 | let fee = received - sent; 172 | let weight = tx.weight(); 173 | let rate = fee as f64 / (weight as f64 / WITNESS_SCALE_FACTOR as f64); 174 | 175 | Some(rate.round() as FeeRate) 176 | } 177 | } 178 | 179 | #[cfg(test)] 180 | mod tests { 181 | use super::*; 182 | use nakamoto_test::assert_matches; 183 | use nakamoto_test::block::gen; 184 | 185 | #[test] 186 | fn test_rollback() { 187 | let mut fe = FeeEstimator::default(); 188 | let mut rng = fastrand::Rng::new(); 189 | let genesis = gen::genesis(&mut rng); 190 | let blocks = gen::blockchain(genesis, 21, &mut rng); 191 | 192 | let mut estimates = HashMap::with_hasher(rng.into()); 193 | 194 | for (height, block) in blocks.iter().cloned().enumerate().skip(1) { 195 | let estimate = fe.process(block, height as Height); 196 | estimates.insert(height, estimate); 197 | } 198 | assert_eq!(fe.snapshots.len(), { MAX_UTXO_SNAPSHOTS }); 199 | assert_eq!(fe.height, 21); 200 | assert_matches!(fe.snapshots.back(), Some((20, _))); 201 | 202 | fe.rollback(18); 203 | assert_eq!(fe.snapshots.len(), 9); 204 | assert_eq!(fe.height, 18); 205 | assert_matches!(fe.snapshots.back(), Some((17, _))); 206 | 207 | assert_eq!( 208 | fe.process(blocks[19].clone(), 19).as_ref().unwrap(), 209 | estimates[&19].as_ref().unwrap() 210 | ); 211 | assert_eq!(fe.snapshots.len(), 10); 212 | assert_eq!(fe.height, 19); 213 | assert_matches!(fe.snapshots.back(), Some((18, _))); 214 | } 215 | 216 | #[test] 217 | fn test_rollback_missing_height() { 218 | let mut fe = FeeEstimator::default(); 219 | let mut rng = fastrand::Rng::new(); 220 | let genesis = gen::genesis(&mut rng); 221 | let blocks = gen::blockchain(genesis, 14, &mut rng); 222 | 223 | fe.process(blocks[8].clone(), 8); 224 | fe.process(blocks[9].clone(), 9); 225 | 226 | fe.process(blocks[13].clone(), 13); 227 | fe.process(blocks[14].clone(), 14); 228 | 229 | assert_eq!(fe.snapshots.len(), 4); 230 | 231 | fe.rollback(10); // Missing height 232 | 233 | assert_eq!(fe.snapshots.len(), 2); 234 | assert_eq!(fe.height, 9); 235 | assert_matches!(fe.snapshots.back(), Some((8, _))); 236 | 237 | fe.rollback(8); 238 | 239 | assert_eq!(fe.snapshots.len(), 1); 240 | assert_eq!(fe.height, 8); 241 | assert_matches!(fe.snapshots.back(), Some((0, _))); 242 | 243 | fe.rollback(4); 244 | 245 | assert_eq!(fe.snapshots.len(), 0); 246 | assert_eq!(fe.height, 0); 247 | } 248 | } 249 | -------------------------------------------------------------------------------- /net/src/lib.rs: -------------------------------------------------------------------------------- 1 | //! Peer-to-peer networking core types. 2 | #![allow(clippy::type_complexity)] 3 | use std::borrow::Cow; 4 | use std::hash::Hash; 5 | use std::sync::Arc; 6 | use std::{fmt, io, net}; 7 | 8 | use crossbeam_channel as chan; 9 | 10 | pub mod error; 11 | pub mod event; 12 | pub mod simulator; 13 | pub mod time; 14 | 15 | pub use event::Publisher; 16 | pub use time::{LocalDuration, LocalTime}; 17 | 18 | /// Link direction of the peer connection. 19 | #[derive(Clone, Copy, Debug, PartialEq, Eq)] 20 | pub enum Link { 21 | /// Inbound conneciton. 22 | Inbound, 23 | /// Outbound connection. 24 | Outbound, 25 | } 26 | 27 | impl Link { 28 | /// Check whether the link is outbound. 29 | pub fn is_outbound(&self) -> bool { 30 | *self == Link::Outbound 31 | } 32 | 33 | /// Check whether the link is inbound. 34 | pub fn is_inbound(&self) -> bool { 35 | *self == Link::Inbound 36 | } 37 | } 38 | 39 | /// Output of a state transition of the state machine. 40 | #[derive(Clone, Debug)] 41 | pub enum Io { 42 | /// There are some bytes ready to be sent to a peer. 43 | Write(Id, M), 44 | /// Connect to a peer. 45 | Connect(Id), 46 | /// Disconnect from a peer. 47 | Disconnect(Id, D), 48 | /// Ask for a wakeup in a specified amount of time. 49 | SetTimer(LocalDuration), 50 | /// Emit an event. 51 | Event(E), 52 | } 53 | 54 | /// Disconnection event which includes the reason. 55 | #[derive(Debug, Clone)] 56 | pub enum Disconnect { 57 | /// Error while dialing the remote. This error occures before a connection is 58 | /// even established. Errors of this kind are usually not transient. 59 | DialError(Arc), 60 | /// Error with an underlying established connection. Sometimes, reconnecting 61 | /// after such an error is possible. 62 | ConnectionError(Arc), 63 | /// Peer was disconnected for another reason. 64 | StateMachine(T), 65 | } 66 | 67 | impl Disconnect { 68 | pub fn is_dial_err(&self) -> bool { 69 | matches!(self, Self::DialError(_)) 70 | } 71 | 72 | pub fn is_connection_err(&self) -> bool { 73 | matches!(self, Self::ConnectionError(_)) 74 | } 75 | } 76 | 77 | impl fmt::Display for Disconnect { 78 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 79 | match self { 80 | Self::DialError(err) => write!(f, "{}", err), 81 | Self::ConnectionError(err) => write!(f, "{}", err), 82 | Self::StateMachine(reason) => write!(f, "{}", reason), 83 | } 84 | } 85 | } 86 | 87 | /// Remote peer id, which must be convertible into a [`net::SocketAddr`] 88 | pub trait PeerId: Eq + Ord + Clone + Hash + fmt::Debug + From { 89 | fn to_socket_addr(&self) -> net::SocketAddr; 90 | } 91 | 92 | impl PeerId for T 93 | where 94 | T: Eq + Ord + Clone + Hash + fmt::Debug, 95 | T: Into, 96 | T: From, 97 | { 98 | fn to_socket_addr(&self) -> net::SocketAddr { 99 | self.clone().into() 100 | } 101 | } 102 | 103 | /// A network service. 104 | /// 105 | /// Network protocols must implement this trait to be drivable by the reactor. 106 | pub trait Service: StateMachine { 107 | /// Commands handled by the service. These commands should originate from an 108 | /// external "user" thread. They are passed through the reactor via a channel 109 | /// given to [`Reactor::run`]. The reactor calls [`Service::command_received`] 110 | /// on the service for each command received. 111 | type Command; 112 | 113 | /// An external command has been received. 114 | fn command_received(&mut self, cmd: Self::Command); 115 | } 116 | 117 | /// A service state-machine to implement a network protocol's logic. 118 | /// 119 | /// This trait defines an API for connecting specific protocol domain logic to a 120 | /// [`Reactor`]. It is parametrized by a peer id, which is shared between the reactor 121 | /// and state machine. 122 | /// 123 | /// The state machine emits [`Io`] instructions to the reactor via its [`Iterator`] trait. 124 | pub trait StateMachine: 125 | Iterator::Owned, Self::Event, Self::DisconnectReason, Id>> 126 | { 127 | /// Message type sent between peers. 128 | type Message: fmt::Debug + ToOwned + ?Sized; 129 | /// Events emitted by the state machine. 130 | /// These are forwarded by the reactor to the user thread. 131 | type Event: fmt::Debug; 132 | /// Reason a peer was disconnected, in case the peer was disconnected by the internal 133 | /// state-machine logic. 134 | type DisconnectReason: fmt::Debug + fmt::Display + Into>; 135 | 136 | /// Initialize the state machine. Called once before any event is sent to the state machine. 137 | fn initialize(&mut self, _time: LocalTime) { 138 | // "He was alone. He was unheeded, happy and near to the wild heart of life. He was alone 139 | // and young and wilful and wildhearted, alone amid a waste of wild air and brackish waters 140 | // and the sea-harvest of shells and tangle and veiled grey sunlight and gayclad lightclad 141 | // figures of children and girls and voices childish and girlish in the air." -JJ 142 | } 143 | /// Called by the reactor upon receiving a message from a remote peer. 144 | fn message_received(&mut self, addr: &Id, message: Cow); 145 | /// Connection attempt underway. 146 | /// 147 | /// This is only encountered when an outgoing connection attempt is made, 148 | /// and is always called before [`StateMachine::connected`]. 149 | /// 150 | /// For incoming connections, [`StateMachine::connected`] is called directly. 151 | fn attempted(&mut self, addr: &Id); 152 | /// New connection with a peer. 153 | fn connected(&mut self, addr: Id, local_addr: &net::SocketAddr, link: Link); 154 | /// Called whenever a remote peer was disconnected, either because of a 155 | /// network-related event or due to a local instruction from this state machine, 156 | /// using [`Io::Disconnect`]. 157 | fn disconnected(&mut self, addr: &Id, reason: Disconnect); 158 | /// Called by the reactor every time the event loop gets data from the network, or times out. 159 | /// Used to update the state machine's internal clock. 160 | /// 161 | /// "a regular short, sharp sound, especially that made by a clock or watch, typically 162 | /// every second." 163 | fn tick(&mut self, local_time: LocalTime); 164 | /// A timer set with [`Io::SetTimer`] has expired. 165 | fn timer_expired(&mut self); 166 | } 167 | 168 | /// Used by certain types of reactors to wake the event loop, for example when a 169 | /// [`Service::Command`] is ready to be processed by the service. 170 | pub trait Waker: Send + Sync + Clone { 171 | /// Wake up! Call this after sending a command to make sure the command is processed 172 | /// in a timely fashion. 173 | fn wake(&self) -> io::Result<()>; 174 | } 175 | 176 | /// Any network reactor that can drive the light-client service. 177 | pub trait Reactor { 178 | /// The type of waker this reactor uses. 179 | type Waker: Waker; 180 | 181 | /// Create a new reactor, initializing it with a publisher for service events, 182 | /// a channel to receive commands, and a channel to shut it down. 183 | fn new( 184 | shutdown: chan::Receiver<()>, 185 | listening: chan::Sender, 186 | ) -> Result 187 | where 188 | Self: Sized; 189 | 190 | /// Run the given service with the reactor. 191 | /// 192 | /// Takes: 193 | /// 194 | /// * The addresses to listen for connections on. 195 | /// * The [`Service`] to run. 196 | /// * The [`StateMachine::Event`] publisher to use when the service emits events. 197 | /// * The [`Service::Command`] channel on which commands will be received. 198 | fn run( 199 | &mut self, 200 | listen_addrs: &[net::SocketAddr], 201 | service: S, 202 | publisher: E, 203 | commands: chan::Receiver, 204 | ) -> Result<(), error::Error> 205 | where 206 | S: Service, 207 | S::DisconnectReason: Into>, 208 | E: Publisher; 209 | 210 | /// Return a new waker. 211 | /// 212 | /// The reactor can provide multiple wakers such that multiple user threads may wake 213 | /// the event loop. 214 | fn waker(&self) -> Self::Waker; 215 | } 216 | -------------------------------------------------------------------------------- /client/src/tests/mock.rs: -------------------------------------------------------------------------------- 1 | use std::borrow::Cow; 2 | use std::collections::HashMap; 3 | use std::net; 4 | use std::ops::RangeInclusive; 5 | 6 | use nakamoto_chain::block::Block; 7 | use nakamoto_chain::filter::BlockFilter; 8 | 9 | use nakamoto_common::bitcoin::network::constants::ServiceFlags; 10 | use nakamoto_common::bitcoin::network::message::{NetworkMessage, RawNetworkMessage}; 11 | use nakamoto_common::bitcoin::network::Address; 12 | use nakamoto_common::bitcoin::util::uint::Uint256; 13 | use nakamoto_common::bitcoin::Txid; 14 | use nakamoto_common::block::filter::FilterHeader; 15 | use nakamoto_common::block::store::Genesis as _; 16 | use nakamoto_common::block::time::{AdjustedTime, LocalTime}; 17 | use nakamoto_common::block::tree::{self, ImportResult}; 18 | use nakamoto_common::block::{BlockHash, BlockHeader, Height, Transaction}; 19 | use nakamoto_common::network::Network; 20 | use nakamoto_common::nonempty::NonEmpty; 21 | use nakamoto_common::p2p::peer::KnownAddress; 22 | use nakamoto_test::block::cache::model; 23 | 24 | use nakamoto_net::event; 25 | use nakamoto_net::StateMachine as _; 26 | use nakamoto_p2p::fsm; 27 | use nakamoto_p2p::fsm::Command; 28 | use nakamoto_p2p::fsm::Link; 29 | use nakamoto_p2p::fsm::Peer; 30 | use nakamoto_p2p::fsm::StateMachine; 31 | 32 | use crate::client::{chan, Event, Loading}; 33 | use crate::handle::{self, Handle}; 34 | 35 | pub struct Client { 36 | // Used by tests. 37 | pub network: Network, 38 | pub blocks: chan::Sender<(Block, Height)>, 39 | pub filters: chan::Sender<(BlockFilter, BlockHash, Height)>, 40 | pub subscriber: event::Broadcast, 41 | pub commands: chan::Receiver, 42 | pub loading: event::Emitter, 43 | pub protocol: StateMachine< 44 | model::Cache, 45 | model::FilterCache, 46 | HashMap, 47 | AdjustedTime, 48 | >, 49 | 50 | // Used in handle. 51 | blocks_: chan::Receiver<(Block, Height)>, 52 | filters_: chan::Receiver<(BlockFilter, BlockHash, Height)>, 53 | subscriber_: event::Subscriber, 54 | commands_: chan::Sender, 55 | } 56 | 57 | impl Client { 58 | pub fn new(network: Network) -> Self { 59 | Self { 60 | network, 61 | ..Self::default() 62 | } 63 | } 64 | 65 | pub fn handle(&self) -> TestHandle { 66 | TestHandle { 67 | tip: (0, self.network.genesis(), self.network.genesis().work()), 68 | network: self.network, 69 | blocks: self.blocks_.clone(), 70 | filters: self.filters_.clone(), 71 | subscriber: self.subscriber_.clone(), 72 | commands: self.commands_.clone(), 73 | } 74 | } 75 | 76 | pub fn received(&mut self, remote: &net::SocketAddr, payload: NetworkMessage) { 77 | let msg = RawNetworkMessage { 78 | magic: self.network.magic(), 79 | payload, 80 | }; 81 | 82 | self.protocol.message_received(remote, Cow::Owned(msg)); 83 | } 84 | 85 | pub fn step(&mut self) -> Vec { 86 | let mut outputs = Vec::new(); 87 | 88 | for out in self.protocol.drain() { 89 | match out { 90 | fsm::Io::Event(event) => { 91 | self.subscriber.broadcast(event.clone()); 92 | } 93 | _ => outputs.push(out), 94 | } 95 | } 96 | outputs 97 | } 98 | } 99 | 100 | impl Default for Client { 101 | fn default() -> Self { 102 | let (blocks, blocks_) = chan::unbounded(); 103 | let (filters, filters_) = chan::unbounded(); 104 | let (commands_, commands) = chan::unbounded(); 105 | let (subscriber, subscriber_) = event::broadcast(|e, p| p.emit(e)); 106 | let loading = event::Emitter::default(); 107 | let network = Network::default(); 108 | let protocol = { 109 | let tree = model::Cache::new(network.genesis()); 110 | let cfilters = model::FilterCache::new(FilterHeader::genesis(network)); 111 | let peers = HashMap::new(); 112 | let time = LocalTime::now(); 113 | let clock = AdjustedTime::new(time); 114 | let rng = fastrand::Rng::new(); 115 | let cfg = fsm::Config::default(); 116 | 117 | StateMachine::new(tree, cfilters, peers, clock, rng, cfg) 118 | }; 119 | 120 | Self { 121 | network, 122 | protocol, 123 | loading, 124 | blocks, 125 | blocks_, 126 | filters, 127 | filters_, 128 | subscriber, 129 | subscriber_, 130 | commands, 131 | commands_, 132 | } 133 | } 134 | } 135 | 136 | #[derive(Clone)] 137 | pub struct TestHandle { 138 | pub tip: (Height, BlockHeader, Uint256), 139 | 140 | #[allow(dead_code)] 141 | network: Network, 142 | blocks: chan::Receiver<(Block, Height)>, 143 | filters: chan::Receiver<(BlockFilter, BlockHash, Height)>, 144 | subscriber: event::Subscriber, 145 | commands: chan::Sender, 146 | } 147 | 148 | impl Handle for TestHandle { 149 | fn get_tip(&self) -> Result<(Height, BlockHeader, Uint256), handle::Error> { 150 | Ok(self.tip) 151 | } 152 | 153 | fn get_block(&self, _hash: &BlockHash) -> Result, handle::Error> { 154 | unimplemented!() 155 | } 156 | 157 | fn get_block_by_height(&self, _height: Height) -> Result, handle::Error> { 158 | unimplemented!() 159 | } 160 | 161 | fn get_submitted_transaction( 162 | &self, 163 | _txid: &Txid, 164 | ) -> Result, handle::Error> { 165 | unimplemented!() 166 | } 167 | 168 | fn request_block(&self, hash: &BlockHash) -> Result<(), handle::Error> { 169 | self.command(Command::RequestBlock(*hash))?; 170 | 171 | Ok(()) 172 | } 173 | 174 | fn request_filters(&self, range: RangeInclusive) -> Result<(), handle::Error> { 175 | let (transmit, receive) = chan::bounded(1); 176 | self.command(Command::RequestFilters(range, transmit))?; 177 | 178 | receive.recv()?.map_err(handle::Error::GetFilters) 179 | } 180 | 181 | fn find_branch( 182 | &self, 183 | _to: &BlockHash, 184 | ) -> Result)>, handle::Error> { 185 | unimplemented!() 186 | } 187 | 188 | fn blocks(&self) -> chan::Receiver<(Block, Height)> { 189 | self.blocks.clone() 190 | } 191 | 192 | fn filters(&self) -> chan::Receiver<(BlockFilter, BlockHash, Height)> { 193 | self.filters.clone() 194 | } 195 | 196 | fn events(&self) -> chan::Receiver { 197 | self.subscriber.subscribe() 198 | } 199 | 200 | fn command(&self, cmd: Command) -> Result<(), handle::Error> { 201 | log::debug!("Sending {:?}", cmd); 202 | self.commands.send(cmd).map_err(handle::Error::from) 203 | } 204 | 205 | fn broadcast( 206 | &self, 207 | _msg: NetworkMessage, 208 | _predicate: fn(Peer) -> bool, 209 | ) -> Result, handle::Error> { 210 | unimplemented!() 211 | } 212 | 213 | fn connect(&self, _addr: net::SocketAddr) -> Result { 214 | unimplemented!() 215 | } 216 | 217 | fn disconnect(&self, _addr: net::SocketAddr) -> Result<(), handle::Error> { 218 | unimplemented!() 219 | } 220 | 221 | fn query_tree( 222 | &self, 223 | _query: impl Fn(&dyn nakamoto_chain::BlockReader) + Send + Sync + 'static, 224 | ) -> Result<(), handle::Error> { 225 | unimplemented!() 226 | } 227 | 228 | fn import_headers( 229 | &self, 230 | _headers: Vec, 231 | ) -> Result, handle::Error> { 232 | unimplemented!() 233 | } 234 | 235 | fn import_addresses(&self, _addrs: Vec
) -> Result<(), handle::Error> { 236 | unimplemented!() 237 | } 238 | 239 | fn submit_transaction( 240 | &self, 241 | _tx: Transaction, 242 | ) -> Result, handle::Error> { 243 | unimplemented!() 244 | } 245 | 246 | fn wait(&self, _f: F) -> Result 247 | where 248 | F: FnMut(fsm::Event) -> Option, 249 | { 250 | unimplemented!() 251 | } 252 | 253 | fn wait_for_peers( 254 | &self, 255 | _count: usize, 256 | _required_services: impl Into, 257 | ) -> Result, handle::Error> { 258 | unimplemented!() 259 | } 260 | 261 | fn wait_for_height(&self, _h: Height) -> Result { 262 | unimplemented!() 263 | } 264 | 265 | fn shutdown(self) -> Result<(), handle::Error> { 266 | Ok(()) 267 | } 268 | } 269 | -------------------------------------------------------------------------------- /client/src/tests.rs: -------------------------------------------------------------------------------- 1 | #[cfg(test)] 2 | pub mod mock; 3 | 4 | use std::collections::HashMap; 5 | use std::net; 6 | use std::thread; 7 | use std::time; 8 | 9 | use nakamoto_chain::block::cache::BlockCache; 10 | use nakamoto_chain::block::store; 11 | use nakamoto_chain::filter::cache::FilterCache; 12 | use nakamoto_common::bitcoin::network::constants::ServiceFlags; 13 | use nakamoto_common::block::time::AdjustedTime; 14 | use nakamoto_common::block::Height; 15 | use nakamoto_common::network::Services; 16 | use nakamoto_net::event; 17 | use nakamoto_test::{logger, BITCOIN_HEADERS}; 18 | 19 | use crate::client::{self, Client, Config}; 20 | use crate::error; 21 | use crate::handle::Handle as _; 22 | use crate::service::Service; 23 | 24 | type Reactor = nakamoto_net_poll::Reactor; 25 | 26 | fn network( 27 | cfgs: &[Config], 28 | ) -> Result< 29 | Vec<( 30 | client::Handle, 31 | net::SocketAddr, 32 | thread::JoinHandle<()>, 33 | )>, 34 | error::Error, 35 | > { 36 | let mut handles = Vec::new(); 37 | 38 | for cfg in cfgs.iter().cloned() { 39 | let checkpoints = cfg.network.checkpoints().collect::>(); 40 | let genesis = cfg.network.genesis(); 41 | let params = cfg.network.params(); 42 | 43 | let node = Client::::new()?; 44 | let mut handle = node.handle(); 45 | handle.set_timeout(time::Duration::from_secs(5)); 46 | 47 | let t = thread::spawn({ 48 | let params = params.clone(); 49 | let checkpoints = checkpoints.clone(); 50 | 51 | move || { 52 | let store = store::Memory::new((genesis, vec![]).into()); 53 | let cache = BlockCache::from(store, params, &checkpoints).unwrap(); 54 | let filters = FilterCache::load(store::Memory::default()).unwrap(); 55 | let peers = HashMap::new(); 56 | let local_time = time::SystemTime::now().into(); 57 | let clock = AdjustedTime::::new(local_time); 58 | let rng = fastrand::Rng::new(); 59 | 60 | node.run_service( 61 | &[([0, 0, 0, 0], 0).into()], 62 | Service::new(cache, filters, peers, clock, rng, cfg), 63 | ) 64 | .unwrap(); 65 | } 66 | }); 67 | let addr = handle.listening().unwrap(); 68 | 69 | handles.push((handle, addr, t)); 70 | } 71 | 72 | for (i, (handle, _, _)) in handles.iter().enumerate() { 73 | for (_, peer, _) in handles.iter().skip(i + 1) { 74 | handle.connect(*peer).unwrap(); 75 | } 76 | } 77 | 78 | Ok(handles) 79 | } 80 | 81 | #[test] 82 | fn test_full_sync() { 83 | logger::init(log::Level::Debug); 84 | 85 | let cfgs = vec![ 86 | Config { 87 | services: ServiceFlags::NETWORK, 88 | ..Config::default() 89 | }; 90 | 3 91 | ]; 92 | let nodes = network(&cfgs).unwrap(); 93 | let (handle, _, _) = nodes.last().unwrap(); 94 | let headers = BITCOIN_HEADERS.tail.clone(); 95 | let height = headers.len() as Height; 96 | let hash = headers.last().unwrap().block_hash(); 97 | 98 | // Ensure all peers are connected to misha, 99 | // so that misha can effectively send blocks to 100 | // all peers on time. 101 | handle.wait_for_peers(2, Services::Chain).unwrap(); 102 | 103 | handle 104 | .import_headers(headers) 105 | .expect("command is successful") 106 | .expect("chain is valid"); 107 | 108 | for (mut node, _, thread) in nodes.into_iter() { 109 | node.set_timeout(std::time::Duration::from_secs(5)); 110 | assert_eq!(node.wait_for_height(height).unwrap(), hash); 111 | 112 | node.shutdown().unwrap(); 113 | thread.join().unwrap(); 114 | } 115 | } 116 | 117 | #[test] 118 | fn test_wait_for_peers() { 119 | logger::init(log::Level::Debug); 120 | 121 | let cfgs = vec![ 122 | Config { 123 | services: ServiceFlags::NETWORK, 124 | ..Default::default() 125 | }; 126 | 5 127 | ]; 128 | 129 | let nodes = network(&cfgs).unwrap(); 130 | let (handle, _, _) = nodes.first().unwrap(); 131 | 132 | let peers = handle 133 | .wait_for_peers(nodes.len() - 1, Services::Chain) 134 | .unwrap(); 135 | 136 | assert_eq!(peers.len(), nodes.len() - 1); 137 | } 138 | 139 | #[test] 140 | fn test_send_handle() { 141 | let client: Client = Client::new().unwrap(); 142 | let handle = client.handle(); 143 | 144 | thread::spawn(move || { 145 | handle.wait_for_height(1).unwrap(); 146 | }); 147 | } 148 | 149 | #[test] 150 | fn test_multiple_handle_events() { 151 | use std::time; 152 | 153 | let cfg = Config::default(); 154 | let genesis = cfg.network.genesis(); 155 | let params = cfg.network.params(); 156 | let client: Client = Client::new().unwrap(); 157 | let store = store::Memory::new((genesis, vec![]).into()); 158 | let cache = BlockCache::from(store, params, &[]).unwrap(); 159 | let filters = FilterCache::load(store::Memory::default()).unwrap(); 160 | let peers = HashMap::new(); 161 | 162 | let alice = client.handle(); 163 | let bob = alice.clone(); 164 | let alice_events = alice.events(); 165 | let bob_events = bob.events(); 166 | 167 | thread::spawn(|| { 168 | let local_time = time::SystemTime::now().into(); 169 | let clock = AdjustedTime::::new(local_time); 170 | let rng = fastrand::Rng::new(); 171 | 172 | client 173 | .run_service( 174 | &[([0, 0, 0, 0], 0).into()], 175 | Service::new(cache, filters, peers, clock, rng, cfg), 176 | ) 177 | .unwrap(); 178 | }); 179 | 180 | event::wait( 181 | &alice_events, 182 | |e| match e { 183 | client::Event::Ready { .. } => Some(()), 184 | _ => None, 185 | }, 186 | time::Duration::from_secs(2), 187 | ) 188 | .unwrap(); 189 | 190 | event::wait( 191 | &bob_events, 192 | |e| match e { 193 | client::Event::Ready { .. } => Some(()), 194 | _ => None, 195 | }, 196 | time::Duration::from_secs(2), 197 | ) 198 | .unwrap(); 199 | } 200 | 201 | #[test] 202 | fn test_handle_shutdown() { 203 | let cfg = Config::default(); 204 | let genesis = cfg.network.genesis(); 205 | let params = cfg.network.params(); 206 | let client: Client = Client::new().unwrap(); 207 | let handle = client.handle(); 208 | let store = store::Memory::new((genesis, vec![]).into()); 209 | let cache = BlockCache::from(store, params, &[]).unwrap(); 210 | let filters = FilterCache::load(store::Memory::default()).unwrap(); 211 | let peers = HashMap::new(); 212 | 213 | let th = thread::spawn(|| { 214 | let local_time = time::SystemTime::now().into(); 215 | let clock = AdjustedTime::::new(local_time); 216 | let rng = fastrand::Rng::new(); 217 | 218 | client.run_service(&[], Service::new(cache, filters, peers, clock, rng, cfg)) 219 | }); 220 | 221 | handle.shutdown().unwrap(); 222 | th.join().unwrap().unwrap(); 223 | } 224 | 225 | #[test] 226 | fn test_client_dropped() { 227 | let client: Client = Client::new().unwrap(); 228 | let handle = client.handle(); 229 | 230 | drop(client); 231 | 232 | assert!(matches!( 233 | handle.get_tip(), 234 | Err(client::handle::Error::Disconnected) 235 | )); 236 | } 237 | 238 | #[test] 239 | fn test_query_headers() { 240 | let cfg = Config::default(); 241 | let genesis = cfg.network.genesis(); 242 | let params = cfg.network.params(); 243 | let client: Client = Client::new().unwrap(); 244 | let handle = client.handle(); 245 | let store = store::Memory::new((genesis, BITCOIN_HEADERS.tail.clone()).into()); 246 | let cache = BlockCache::from(store, params, &[]).unwrap(); 247 | let filters = FilterCache::load(store::Memory::default()).unwrap(); 248 | 249 | thread::spawn(|| { 250 | let local_time = time::SystemTime::now().into(); 251 | let clock = AdjustedTime::::new(local_time); 252 | let rng = fastrand::Rng::new(); 253 | 254 | client.run_service( 255 | &[], 256 | Service::new(cache, filters, HashMap::new(), clock, rng, cfg), 257 | ) 258 | }); 259 | 260 | let height = 1; 261 | let (tx, rx) = crate::chan::bounded(1); 262 | 263 | handle 264 | .query_tree(move |r| { 265 | let blk = r.get_block_by_height(height).cloned(); 266 | tx.send((blk, blk.is_some())).ok(); 267 | }) 268 | .unwrap(); 269 | 270 | let (header, found) = rx.recv().unwrap(); 271 | 272 | assert_eq!(header, BITCOIN_HEADERS.tail.first().cloned()); 273 | assert!(found); 274 | } 275 | -------------------------------------------------------------------------------- /p2p/src/fsm/filter_cache.rs: -------------------------------------------------------------------------------- 1 | //! Compact filter cache. 2 | use std::collections::BTreeMap; 3 | use std::rc::Rc; 4 | 5 | use nakamoto_common::block::filter::BlockFilter; 6 | use nakamoto_common::block::Height; 7 | 8 | /// Cachable block filter. 9 | #[allow(clippy::len_without_is_empty)] 10 | pub trait Filter: Eq + PartialEq { 11 | /// Length in bytes of the block filter. 12 | fn len(&self) -> usize; 13 | } 14 | 15 | impl Filter for Rc { 16 | fn len(&self) -> usize { 17 | self.content.len() 18 | } 19 | } 20 | 21 | impl Filter for BlockFilter { 22 | fn len(&self) -> usize { 23 | self.content.len() 24 | } 25 | } 26 | 27 | /// An in-memory compact filter cache with a fixed capacity. 28 | #[derive(Debug)] 29 | pub struct FilterCache { 30 | /// Cache. 31 | cache: BTreeMap, 32 | /// Cache size in bytes. 33 | size: usize, 34 | /// Cache capacity in bytes. 35 | capacity: usize, 36 | } 37 | 38 | impl Default for FilterCache { 39 | fn default() -> Self { 40 | Self { 41 | cache: BTreeMap::new(), 42 | size: 0, 43 | capacity: 0, 44 | } 45 | } 46 | } 47 | 48 | impl FilterCache { 49 | /// Create a new filter cache. 50 | pub fn new(capacity: usize) -> Self { 51 | Self { 52 | cache: BTreeMap::new(), 53 | size: 0, 54 | capacity, 55 | } 56 | } 57 | 58 | /// Return the size of the cache filters in bytes. 59 | pub fn size(&self) -> usize { 60 | self.size 61 | } 62 | 63 | /// Return the cache capacity in bytes. 64 | /// 65 | /// ``` 66 | /// use nakamoto_p2p::fsm::filter_cache::FilterCache; 67 | /// use nakamoto_common::block::filter::BlockFilter; 68 | /// 69 | /// let mut cache = FilterCache::::new(32); 70 | /// assert_eq!(cache.capacity(), 32); 71 | /// ``` 72 | pub fn capacity(&self) -> usize { 73 | self.capacity 74 | } 75 | 76 | /// Return the number of filters in the cache. 77 | pub fn len(&self) -> usize { 78 | self.cache.len() 79 | } 80 | 81 | /// Check whether the cache is empty. 82 | pub fn is_empty(&self) -> bool { 83 | self.cache.len() == 0 84 | } 85 | 86 | /// Push a filter into the cache. 87 | /// 88 | /// Returns `false` if the filter was not added to the cache because it wasn't 89 | /// subsequent to the last filter height. 90 | /// 91 | /// ``` 92 | /// use nakamoto_p2p::fsm::filter_cache::FilterCache; 93 | /// use nakamoto_common::block::filter::BlockFilter; 94 | /// 95 | /// let mut cache = FilterCache::new(8); 96 | /// 97 | /// assert!(cache.push(3, BlockFilter::new(&[1, 2, 3]))); 98 | /// assert!(cache.push(4, BlockFilter::new(&[4, 5]))); 99 | /// assert!(cache.push(5, BlockFilter::new(&[6]))); 100 | /// 101 | /// assert_eq!(cache.len(), 3); 102 | /// assert_eq!(cache.size(), 6); 103 | /// 104 | /// assert!(cache.push(7, BlockFilter::new(&[7]))); // Non-contiguous height. 105 | /// assert_eq!(cache.len(), 4); 106 | /// 107 | /// assert!(cache.push(6, BlockFilter::new(&[8]))); // Hit max capacity. 108 | /// assert_eq!(cache.len(), 5); 109 | /// assert_eq!(cache.size(), 8); 110 | /// assert_eq!(cache.start(), Some(3)); 111 | /// 112 | /// assert!(cache.push(8, BlockFilter::new(&[9]))); // Evict the first element. 113 | /// assert_eq!(cache.len(), 5); 114 | /// assert_eq!(cache.size(), 6); 115 | /// assert_eq!(cache.start(), Some(4)); 116 | /// assert_eq!(cache.end(), Some(8)); 117 | /// 118 | /// ``` 119 | pub fn push(&mut self, height: Height, filter: T) -> bool { 120 | assert!(self.size <= self.capacity); 121 | 122 | let size = filter.len(); 123 | if size > self.capacity { 124 | return false; 125 | } 126 | 127 | self.cache.insert(height, filter); 128 | self.size += size; 129 | 130 | while self.size > self.capacity { 131 | if let Some(height) = self.cache.keys().cloned().next() { 132 | if let Some(filter) = self.cache.remove(&height) { 133 | self.size -= filter.len(); 134 | } 135 | } 136 | } 137 | true 138 | } 139 | 140 | /// Get the start height of the cache. 141 | /// 142 | /// ``` 143 | /// use nakamoto_p2p::fsm::filter_cache::FilterCache; 144 | /// use nakamoto_common::block::filter::BlockFilter; 145 | /// 146 | /// let mut cache = FilterCache::new(32); 147 | /// 148 | /// cache.push(3, BlockFilter::new(&[1])); 149 | /// cache.push(4, BlockFilter::new(&[2])); 150 | /// cache.push(5, BlockFilter::new(&[3])); 151 | /// 152 | /// assert_eq!(cache.start(), Some(3)); 153 | /// assert_eq!(cache.end(), Some(5)); 154 | /// ``` 155 | pub fn start(&self) -> Option { 156 | self.cache.keys().next().copied() 157 | } 158 | 159 | /// Get the end height of the cache. 160 | pub fn end(&self) -> Option { 161 | self.cache.keys().next_back().copied() 162 | } 163 | 164 | /// Iterate over cached filters. 165 | pub fn iter(&self) -> impl Iterator { 166 | self.cache.iter().map(|(h, b)| (h, b)) 167 | } 168 | 169 | /// Iterate over cached heights. 170 | pub fn heights(&self) -> impl Iterator + '_ { 171 | self.cache.keys().copied() 172 | } 173 | 174 | /// Get a filter in the cache by height. 175 | /// 176 | /// ``` 177 | /// use nakamoto_p2p::fsm::filter_cache::FilterCache; 178 | /// use nakamoto_common::block::filter::BlockFilter; 179 | /// 180 | /// let mut cache = FilterCache::new(32); 181 | /// 182 | /// cache.push(3, BlockFilter::new(&[1])); 183 | /// cache.push(4, BlockFilter::new(&[2])); 184 | /// cache.push(5, BlockFilter::new(&[3])); 185 | /// 186 | /// assert_eq!(cache.get(&4).unwrap().content, vec![2]); 187 | /// assert_eq!(cache.get(&5).unwrap().content, vec![3]); 188 | /// assert_eq!(cache.get(&1), None); 189 | /// 190 | /// ``` 191 | pub fn get(&self, height: &Height) -> Option<&T> { 192 | self.cache.get(height) 193 | } 194 | 195 | /// Rollback the cache to a certain height. Drops all filters with a height greater 196 | /// than the given height. 197 | /// 198 | /// ``` 199 | /// use nakamoto_p2p::fsm::filter_cache::FilterCache; 200 | /// use nakamoto_common::block::filter::BlockFilter; 201 | /// 202 | /// let mut cache = FilterCache::new(0); 203 | /// 204 | /// cache.push(3, BlockFilter::new(&[])); 205 | /// cache.push(4, BlockFilter::new(&[])); 206 | /// cache.push(5, BlockFilter::new(&[])); 207 | /// 208 | /// cache.rollback(4); 209 | /// assert_eq!(cache.end(), Some(4)); 210 | /// 211 | /// cache.rollback(5); 212 | /// assert_eq!(cache.end(), Some(4)); 213 | /// 214 | /// cache.rollback(1); 215 | /// assert_eq!(cache.end(), None); 216 | /// ``` 217 | pub fn rollback(&mut self, height: Height) { 218 | while let Some(h) = self.end() { 219 | if h > height { 220 | if let Some(k) = self.cache.keys().cloned().next_back() { 221 | if let Some(filter) = self.cache.remove(&k) { 222 | self.size -= filter.len(); 223 | } 224 | } 225 | } else { 226 | break; 227 | } 228 | } 229 | } 230 | } 231 | 232 | #[cfg(test)] 233 | mod tests { 234 | use super::*; 235 | use quickcheck::{Arbitrary, Gen}; 236 | use quickcheck_macros::quickcheck; 237 | 238 | #[derive(Clone, Debug)] 239 | enum Op { 240 | Push(BlockFilter), 241 | Rollback, 242 | } 243 | 244 | impl Op { 245 | fn apply(self, cache: &mut FilterCache, rng: &mut fastrand::Rng) { 246 | match self { 247 | Self::Push(filter) => { 248 | if let Some(end) = cache.end() { 249 | cache.push(end + 1, filter); 250 | } else { 251 | cache.push(rng.u64(..), filter); 252 | } 253 | } 254 | Self::Rollback => { 255 | if let (Some(start), Some(end)) = (cache.start(), cache.end()) { 256 | cache.rollback(rng.u64(start - 1..=end + 1)); 257 | } 258 | } 259 | } 260 | } 261 | } 262 | 263 | impl Arbitrary for Op { 264 | fn arbitrary(g: &mut Gen) -> Self { 265 | let n = u8::arbitrary(g); 266 | 267 | match n % 4 { 268 | 0..=2 => { 269 | let content: Vec<_> = Arbitrary::arbitrary(g); 270 | let filter = BlockFilter::new(&content); 271 | 272 | Op::Push(filter) 273 | } 274 | 3 => Op::Rollback, 275 | 276 | _ => unreachable! {}, 277 | } 278 | } 279 | } 280 | 281 | #[quickcheck] 282 | fn prop_capacity(capacity: usize, operations: Vec, seed: u64) { 283 | let mut cache = FilterCache::new(capacity); 284 | let mut rng = fastrand::Rng::with_seed(seed); 285 | 286 | for op in operations.into_iter() { 287 | op.apply(&mut cache, &mut rng); 288 | 289 | let size = cache.cache.values().map(|f| f.content.len()).sum::(); 290 | 291 | assert!(cache.size <= cache.capacity); 292 | assert!(size == cache.size); 293 | } 294 | } 295 | } 296 | -------------------------------------------------------------------------------- /wallet/src/wallet.rs: -------------------------------------------------------------------------------- 1 | pub mod db; 2 | pub mod hw; 3 | pub mod ui; 4 | 5 | use std::collections::HashSet; 6 | use std::io; 7 | use std::ops::ControlFlow; 8 | use std::ops::ControlFlow::*; 9 | 10 | use crossbeam_channel as chan; 11 | use termion::event::Event; 12 | 13 | use nakamoto_client as client; 14 | use nakamoto_client::handle::Handle; 15 | use nakamoto_common::bitcoin::Address; 16 | use nakamoto_common::bitcoin::{OutPoint, Script, Transaction, TxOut}; 17 | use nakamoto_common::block::Height; 18 | 19 | use crate::error::Error; 20 | use crate::input::Signal; 21 | 22 | pub use db::Db; 23 | pub use db::{Read as _, Write as _}; 24 | pub use hw::Hw; 25 | pub use ui::Ui; 26 | 27 | pub type Utxos = Vec<(OutPoint, TxOut)>; 28 | 29 | #[derive(Default)] 30 | pub struct Tips { 31 | header: Height, 32 | cfilter: Height, 33 | } 34 | 35 | /// Wallet state. 36 | pub struct Wallet { 37 | client: H, 38 | db: Db, 39 | ui: Ui, 40 | hw: Hw, 41 | network: client::Network, 42 | watch: HashSet
, 43 | tips: Tips, 44 | } 45 | 46 | impl Wallet { 47 | /// Create a new wallet. 48 | pub fn new(client: H, network: client::Network, db: Db, hw: Hw) -> Self { 49 | Self { 50 | client, 51 | db, 52 | hw, 53 | network, 54 | watch: HashSet::new(), 55 | ui: Ui::default(), 56 | tips: Tips::default(), 57 | } 58 | } 59 | 60 | /// Calculate the wallet balance. 61 | pub fn balance(&self) -> Result { 62 | self.db.balance().map_err(Error::from) 63 | } 64 | 65 | /// Apply a transaction to the wallet's UTXO set. 66 | pub fn apply(&mut self, tx: &Transaction, scripts: &[Script]) { 67 | // Look for outputs. 68 | for (vout, output) in tx.output.iter().enumerate() { 69 | // Received coin. Mark the address as *used*, and update the balance for that 70 | // address. 71 | if scripts.contains(&output.script_pubkey) { 72 | // Update UTXOs. 73 | let txid = tx.txid(); 74 | let addr = 75 | Address::from_script(&output.script_pubkey, self.network.into()).unwrap(); 76 | 77 | self.db 78 | .add_utxo(txid, vout as u32, addr, output.value) 79 | .unwrap(); 80 | } 81 | } 82 | 83 | // Look for inputs. 84 | for input in tx.input.iter() { 85 | // Spent coin. Remove the address from the set, since it is no longer ours. 86 | if let Ok(Some((_, _output))) = self.db.remove_utxo(&input.previous_output) { 87 | // TODO: Handle change addresses? 88 | } 89 | } 90 | } 91 | 92 | /// Run the wallet loop until it exits. 93 | pub fn run( 94 | &mut self, 95 | birth: Height, 96 | inputs: chan::Receiver, 97 | signals: chan::Receiver, 98 | loading: chan::Receiver, 99 | events: chan::Receiver, 100 | offline: bool, 101 | mut term: W, 102 | ) -> Result<(), Error> { 103 | let addresses = self.db.addresses()?; 104 | if addresses.is_empty() { 105 | log::info!("No addresses found, requesting from hardware device.."); 106 | 107 | match self.hw.request_addresses(0..16, hw::AddressFormat::P2WPKH) { 108 | Ok(addrs) => { 109 | for (ix, addr) in addrs { 110 | self.db.add_address(&addr, ix, None)?; 111 | self.watch.insert(addr); 112 | } 113 | } 114 | Err(err) => { 115 | log::warn!("Failed to request addresses from hardware device: {err}"); 116 | } 117 | } 118 | } else { 119 | for addr in addresses { 120 | self.watch.insert(addr.address); 121 | } 122 | } 123 | 124 | // TODO: Don't rescan if watch list is empty. 125 | 126 | // Convert our address list into scripts. 127 | let watch: Vec<_> = self.watch.iter().map(|a| a.script_pubkey()).collect(); 128 | let balance = self.db.balance()?; 129 | 130 | self.ui.message = format!("Scanning from block height {}", birth); 131 | self.ui.reset(&mut term)?; 132 | self.ui.decorations(&mut term)?; 133 | self.ui.set_balance(balance); 134 | self.ui.offline(offline); 135 | 136 | if offline { 137 | ui::refresh(&mut self.ui, &self.db, &mut term)?; 138 | } else { 139 | // Start a re-scan from the birht height, which keeps scanning as new blocks arrive. 140 | self.client.rescan(birth.., watch.iter().cloned())?; 141 | 142 | // Loading... 143 | loop { 144 | chan::select! { 145 | recv(inputs) -> input => { 146 | let input = input?; 147 | 148 | if let Break(()) = self.ui.handle_input_event(input)? { 149 | return Ok(()); 150 | } 151 | } 152 | recv(signals) -> signal => { 153 | let signal = signal?; 154 | 155 | if let Break(()) = self.handle_signal(signal, &mut term)? { 156 | return Ok(()); 157 | } 158 | } 159 | recv(loading) -> event => { 160 | if let Ok(event) = event { 161 | if let Break(()) = self.ui.handle_loading_event(event)? { 162 | return Ok(()); 163 | } 164 | } else { 165 | break; 166 | } 167 | } 168 | } 169 | ui::refresh(&mut self.ui, &self.db, &mut term)?; 170 | } 171 | } 172 | 173 | // Running... 174 | loop { 175 | chan::select! { 176 | recv(inputs) -> input => { 177 | let input = input?; 178 | 179 | if let Break(()) = self.handle_input(input)? { 180 | return Ok(()); 181 | } 182 | } 183 | recv(signals) -> signal => { 184 | let signal = signal?; 185 | 186 | if let Break(()) = self.handle_signal(signal, &mut term)? { 187 | return Ok(()); 188 | } 189 | } 190 | recv(events) -> event => { 191 | let event = event?; 192 | 193 | if let Break(()) = self.handle_client_event(event, &watch, offline, &mut term)? { 194 | break; 195 | } 196 | } 197 | } 198 | ui::refresh(&mut self.ui, &self.db, &mut term)?; 199 | } 200 | Ok(()) 201 | } 202 | 203 | fn handle_input(&mut self, input: Event) -> Result, Error> { 204 | use termion::event::Key; 205 | 206 | match input { 207 | Event::Key(Key::F(1)) => { 208 | self.hw.connect()?; 209 | } 210 | _ => return self.ui.handle_input_event(input).map_err(Error::from), 211 | } 212 | 213 | Ok(Continue(())) 214 | } 215 | 216 | fn handle_signal( 217 | &mut self, 218 | signal: Signal, 219 | term: &mut W, 220 | ) -> Result, Error> { 221 | log::info!("Received signal: {:?}", signal); 222 | 223 | match signal { 224 | Signal::WindowResized => { 225 | self.ui.redraw(&self.db, term)?; 226 | } 227 | Signal::Interrupted => return Ok(Break(())), 228 | } 229 | Ok(Continue(())) 230 | } 231 | 232 | fn handle_client_event( 233 | &mut self, 234 | event: client::Event, 235 | watch: &[Script], 236 | offline: bool, 237 | term: &mut W, 238 | ) -> Result, Error> { 239 | log::debug!("Received event: {}", event); 240 | 241 | match event { 242 | client::Event::Ready { tip, .. } => { 243 | self.ui.handle_ready(tip, offline); 244 | } 245 | client::Event::PeerHeightUpdated { height } => { 246 | self.ui.handle_peer_height(height); 247 | } 248 | client::Event::FilterProcessed { height, .. } => { 249 | self.ui.handle_filter_processed(height); 250 | self.tips.cfilter = height; 251 | } 252 | client::Event::BlockHeadersImported { height, .. } => { 253 | self.tips.header = height; 254 | } 255 | client::Event::BlockMatched { block, height } => { 256 | for t in &block.txdata { 257 | self.apply(t, watch); 258 | } 259 | let balance = self.balance()?; 260 | self.ui.set_balance(balance); 261 | self.ui.redraw(&self.db, term)?; 262 | 263 | log::info!( 264 | "Processed block at height #{} (balance = {})", 265 | height, 266 | balance, 267 | ); 268 | } 269 | client::Event::Scanned { height, .. } => { 270 | self.ui.handle_synced(height, self.tips.header); 271 | } 272 | _ => {} 273 | } 274 | Ok(ControlFlow::Continue(())) 275 | } 276 | } 277 | -------------------------------------------------------------------------------- /common/src/block/tree.rs: -------------------------------------------------------------------------------- 1 | //! Types and functions relating to block trees. 2 | #![warn(missing_docs)] 3 | use std::collections::BTreeMap; 4 | 5 | use bitcoin::blockdata::block::BlockHeader; 6 | use bitcoin::consensus::params::Params; 7 | use bitcoin::hash_types::BlockHash; 8 | use bitcoin::util::uint::Uint256; 9 | 10 | use thiserror::Error; 11 | 12 | use crate::block::store; 13 | use crate::block::time::Clock; 14 | use crate::block::{Bits, BlockTime, Height, Target, Work}; 15 | use crate::nonempty::NonEmpty; 16 | 17 | /// An error related to the block tree. 18 | #[derive(Debug, Error)] 19 | pub enum Error { 20 | /// The block's proof-of-work is invalid. 21 | #[error("invalid block proof-of-work")] 22 | InvalidBlockPoW, 23 | 24 | /// The block's difficulty target is invalid. 25 | #[error("invalid block difficulty target: {0}, expected {1}")] 26 | InvalidBlockTarget(Target, Target), 27 | 28 | /// The block's hash doesn't match the checkpoint. 29 | #[error("invalid checkpoint block hash {0} at height {1}")] 30 | InvalidBlockHash(BlockHash, Height), 31 | 32 | /// The block forks off the main chain prior to the last checkpoint. 33 | #[error("block height {0} is prior to last checkpoint")] 34 | InvalidBlockHeight(Height), 35 | 36 | /// The block timestamp is invalid. 37 | #[error("block timestamp {0} is invalid")] 38 | InvalidBlockTime(BlockTime, std::cmp::Ordering), 39 | 40 | /// The block is already known. 41 | #[error("duplicate block {0}")] 42 | DuplicateBlock(BlockHash), 43 | 44 | /// The block is orphan. 45 | #[error("block missing: {0}")] 46 | BlockMissing(BlockHash), 47 | 48 | /// A block import was aborted. FIXME: Move this error out of here. 49 | #[error("block import aborted at height {2}: {0} ({1} block(s) imported)")] 50 | BlockImportAborted(Box, usize, Height), 51 | 52 | /// Mismatched genesis. 53 | #[error("stored genesis header doesn't match network genesis")] 54 | GenesisMismatch, 55 | 56 | /// A storage error occured. 57 | #[error("storage error: {0}")] 58 | Store(#[from] store::Error), 59 | 60 | /// The operation was interrupted. 61 | #[error("the operation was interrupted")] 62 | Interrupted, 63 | } 64 | 65 | /// A generic block header. 66 | pub trait Header { 67 | /// Return the proof-of-work of this header. 68 | fn work(&self) -> Work; 69 | } 70 | 71 | impl Header for BlockHeader { 72 | fn work(&self) -> Work { 73 | self.work() 74 | } 75 | } 76 | 77 | /// The outcome of a successful block header import. 78 | #[allow(clippy::large_enum_variant)] 79 | #[derive(Debug, Clone, PartialEq, Eq)] 80 | pub enum ImportResult { 81 | /// A new tip was found. This can happen in either of two scenarios: 82 | /// 83 | /// 1. The imported block(s) extended the active chain, or 84 | /// 2. The imported block(s) caused a chain re-org. 85 | /// 86 | TipChanged { 87 | /// Tip header. 88 | header: BlockHeader, 89 | /// Tip hash. 90 | hash: BlockHash, 91 | /// Tip height. 92 | height: Height, 93 | /// Blocks reverted/disconnected. 94 | reverted: Vec<(Height, BlockHeader)>, 95 | /// Blocks added/connected. 96 | connected: NonEmpty<(Height, BlockHeader)>, 97 | }, 98 | /// The block headers were imported successfully, but our best block hasn't changed. 99 | /// This will happen if we imported a duplicate, orphan or stale block. 100 | TipUnchanged, // TODO: We could add a parameter eg. BlockMissing or DuplicateBlock. 101 | } 102 | 103 | /// A chain of block headers that may or may not lead back to genesis. 104 | #[derive(Debug, Clone)] 105 | pub struct Branch<'a, H: Header>(pub &'a [H]); 106 | 107 | impl<'a, H: Header> Branch<'a, H> { 108 | /// Compute the total proof-of-work carried by this branch. 109 | pub fn work(&self) -> Work { 110 | let mut work = Work::default(); 111 | for header in self.0.iter() { 112 | work = work + header.work(); 113 | } 114 | work 115 | } 116 | } 117 | 118 | /// A representation of all known blocks that keeps track of the longest chain. 119 | pub trait BlockTree: BlockReader { 120 | /// Import a chain of block headers into the block tree. 121 | fn import_blocks, C: Clock>( 122 | &mut self, 123 | chain: I, 124 | context: &C, 125 | ) -> Result; 126 | /// Attempts to extend the active chain. Returns `Ok` with `ImportResult::TipUnchanged` if 127 | /// the block didn't connect, and `Err` if the block was invalid. 128 | fn extend_tip( 129 | &mut self, 130 | header: BlockHeader, 131 | context: &C, 132 | ) -> Result; 133 | } 134 | 135 | /// Read block header state. 136 | pub trait BlockReader { 137 | /// Get a block by hash. 138 | fn get_block(&self, hash: &BlockHash) -> Option<(Height, &BlockHeader)>; 139 | /// Get a block by height. 140 | fn get_block_by_height(&self, height: Height) -> Option<&BlockHeader>; 141 | /// Find a path from the active chain to the provided (stale) block hash. 142 | /// 143 | /// If a path is found, the height of the start/fork block is returned, along with the 144 | /// headers up to and including the tip, forming a branch. 145 | /// 146 | /// If the given block is on the active chain, its height and header is returned. 147 | fn find_branch(&self, to: &BlockHash) -> Option<(Height, NonEmpty)>; 148 | /// Iterate over the longest chain, starting from genesis. 149 | fn chain<'a>(&'a self) -> Box + 'a> { 150 | Box::new(self.iter().map(|(_, h)| h)) 151 | } 152 | /// Get the "chainwork", ie. the total accumulated proof-of-work of the active chain. 153 | fn chain_work(&self) -> Uint256; 154 | /// Iterate over the longest chain, starting from genesis, including heights. 155 | fn iter<'a>(&'a self) -> Box + 'a>; 156 | /// Iterate over a range of blocks. 157 | fn range<'a>( 158 | &'a self, 159 | range: std::ops::Range, 160 | ) -> Box + 'a> { 161 | Box::new( 162 | self.iter() 163 | .map(|(height, header)| (height, header.block_hash())) 164 | .skip(range.start as usize) 165 | .take((range.end - range.start) as usize), 166 | ) 167 | } 168 | /// Return the height of the longest chain. 169 | fn height(&self) -> Height; 170 | /// Get the tip of the longest chain. 171 | fn tip(&self) -> (BlockHash, BlockHeader); 172 | /// Get the last block of the longest chain. 173 | fn best_block(&self) -> (Height, &BlockHeader) { 174 | let height = self.height(); 175 | ( 176 | height, 177 | self.get_block_by_height(height) 178 | .expect("the best block is always present"), 179 | ) 180 | } 181 | /// Get the height of the last checkpoint block. 182 | fn last_checkpoint(&self) -> Height; 183 | /// Known checkpoints. 184 | fn checkpoints(&self) -> BTreeMap; 185 | /// Return the genesis block header. 186 | fn genesis(&self) -> &BlockHeader { 187 | self.get_block_by_height(0) 188 | .expect("the genesis block is always present") 189 | } 190 | /// Check whether a block hash is known. 191 | fn is_known(&self, hash: &BlockHash) -> bool; 192 | /// Check whether a block hash is part of the active chain. 193 | fn contains(&self, hash: &BlockHash) -> bool; 194 | /// Return the headers corresponding to the given locators, up to a maximum. 195 | fn locate_headers( 196 | &self, 197 | locators: &[BlockHash], 198 | stop_hash: BlockHash, 199 | max_headers: usize, 200 | ) -> Vec; 201 | /// Get the locator hashes starting from the given height and going backwards. 202 | fn locator_hashes(&self, from: Height) -> Vec; 203 | /// Get the next difficulty given a block height, time and bits. 204 | fn next_difficulty_target( 205 | &self, 206 | last_height: Height, 207 | last_time: BlockTime, 208 | last_target: Target, 209 | params: &Params, 210 | ) -> Bits { 211 | // Only adjust on set intervals. Otherwise return current target. 212 | // Since the height is 0-indexed, we add `1` to check it against the interval. 213 | if (last_height + 1) % params.difficulty_adjustment_interval() != 0 { 214 | return BlockHeader::compact_target_from_u256(&last_target); 215 | } 216 | 217 | let last_adjustment_height = 218 | last_height.saturating_sub(params.difficulty_adjustment_interval() - 1); 219 | let last_adjustment_block = self 220 | .get_block_by_height(last_adjustment_height) 221 | .unwrap_or_else(|| self.genesis()); 222 | let last_adjustment_time = last_adjustment_block.time; 223 | 224 | if params.no_pow_retargeting { 225 | return last_adjustment_block.bits; 226 | } 227 | 228 | let actual_timespan = last_time - last_adjustment_time; 229 | let mut adjusted_timespan = actual_timespan; 230 | 231 | if actual_timespan < params.pow_target_timespan as BlockTime / 4 { 232 | adjusted_timespan = params.pow_target_timespan as BlockTime / 4; 233 | } else if actual_timespan > params.pow_target_timespan as BlockTime * 4 { 234 | adjusted_timespan = params.pow_target_timespan as BlockTime * 4; 235 | } 236 | 237 | let mut target = last_target; 238 | 239 | target = target.mul_u32(adjusted_timespan); 240 | target = target / Target::from_u64(params.pow_target_timespan).unwrap(); 241 | 242 | // Ensure a difficulty floor. 243 | if target > params.pow_limit { 244 | target = params.pow_limit; 245 | } 246 | 247 | BlockHeader::compact_target_from_u256(&target) 248 | } 249 | } 250 | --------------------------------------------------------------------------------