├── .dockerignore ├── .editorconfig ├── .gitignore ├── .gitlab-ci.yml ├── .gitmodules ├── .travis.yml ├── Cargo.lock ├── Cargo.toml ├── LICENSE ├── README.md ├── bencher ├── Cargo.toml └── src │ ├── database.rs │ ├── main.rs │ └── verifier.rs ├── chain ├── Cargo.toml ├── README.md └── src │ ├── block.rs │ ├── block_header.rs │ ├── constants.rs │ ├── indexed_block.rs │ ├── indexed_header.rs │ ├── indexed_transaction.rs │ ├── lib.rs │ ├── merkle_root.rs │ ├── read_and_hash.rs │ └── transaction.rs ├── chainx_btc ├── cli.yml ├── commands │ ├── dev.rs │ ├── import.rs │ ├── mod.rs │ ├── rollback.rs │ └── start.rs ├── config.rs ├── main.rs ├── rpc.rs ├── rpc_apis.rs ├── seednodes.rs └── util.rs ├── cmake-build-debug └── CMakeFiles │ └── clion-log.txt ├── crypto ├── Cargo.toml └── src │ ├── buffer.rs │ ├── cryptoutil.rs │ ├── digest.rs │ ├── fixed_buffer.rs │ ├── lib.rs │ ├── ripemd160.rs │ ├── sha1.rs │ ├── sha2.rs │ ├── simd.rs │ └── symmetriccipher.rs ├── db ├── Cargo.toml ├── src │ ├── block_chain_db.rs │ ├── kv │ │ ├── cachedb.rs │ │ ├── db.rs │ │ ├── diskdb.rs │ │ ├── memorydb.rs │ │ ├── mod.rs │ │ ├── overlaydb.rs │ │ └── transaction.rs │ └── lib.rs └── tests │ └── blockchaindb.rs ├── doc └── bips.md ├── docker ├── hub │ └── Dockerfile └── ubuntu │ └── Dockerfile ├── import ├── Cargo.toml └── src │ ├── blk.rs │ ├── block.rs │ ├── fs.rs │ └── lib.rs ├── key ├── Cargo.toml └── src │ └── main.rs ├── keys ├── Cargo.toml └── src │ ├── address.rs │ ├── display.rs │ ├── error.rs │ ├── generator.rs │ ├── keypair.rs │ ├── lib.rs │ ├── network.rs │ ├── private.rs │ ├── public.rs │ └── signature.rs ├── logs ├── Cargo.toml └── src │ └── lib.rs ├── message ├── Cargo.toml └── src │ ├── common │ ├── address.rs │ ├── block_header_and_ids.rs │ ├── block_transactions.rs │ ├── block_transactions_request.rs │ ├── command.rs │ ├── inventory.rs │ ├── ip.rs │ ├── mod.rs │ ├── port.rs │ ├── prefilled_transaction.rs │ └── service.rs │ ├── error.rs │ ├── lib.rs │ ├── message │ ├── message.rs │ ├── message_header.rs │ ├── mod.rs │ └── payload.rs │ ├── serialization │ ├── mod.rs │ ├── reader.rs │ └── stream.rs │ └── types │ ├── addr.rs │ ├── block.rs │ ├── blocktxn.rs │ ├── compactblock.rs │ ├── feefilter.rs │ ├── filteradd.rs │ ├── filterclear.rs │ ├── filterload.rs │ ├── getaddr.rs │ ├── getblocks.rs │ ├── getblocktxn.rs │ ├── getdata.rs │ ├── getheaders.rs │ ├── headers.rs │ ├── inv.rs │ ├── mempool.rs │ ├── merkle_block.rs │ ├── mod.rs │ ├── notfound.rs │ ├── ping.rs │ ├── pong.rs │ ├── reject.rs │ ├── sendcompact.rs │ ├── sendheaders.rs │ ├── tx.rs │ ├── verack.rs │ └── version.rs ├── miner ├── Cargo.toml ├── benches │ └── mod.rs └── src │ ├── block_assembler.rs │ ├── cpu_miner.rs │ ├── fee.rs │ ├── lib.rs │ └── memory_pool.rs ├── network ├── Cargo.toml └── src │ ├── consensus.rs │ ├── deployments.rs │ ├── lib.rs │ └── network.rs ├── node ├── Cargo.toml └── src │ └── lib.rs ├── p2p ├── Cargo.toml └── src │ ├── config.rs │ ├── event_loop.rs │ ├── io │ ├── deadline.rs │ ├── handshake.rs │ ├── mod.rs │ ├── read_any_message.rs │ ├── read_header.rs │ ├── read_message.rs │ ├── read_payload.rs │ ├── sharedtcpstream.rs │ └── write_message.rs │ ├── lib.rs │ ├── net │ ├── accept_connection.rs │ ├── channel.rs │ ├── config.rs │ ├── connect.rs │ ├── connection.rs │ ├── connection_counter.rs │ ├── connections.rs │ ├── mod.rs │ ├── peer_context.rs │ └── stats.rs │ ├── p2p.rs │ ├── protocol │ ├── addr.rs │ ├── mod.rs │ ├── ping.rs │ └── sync.rs │ ├── session.rs │ └── util │ ├── internet_protocol.rs │ ├── interval.rs │ ├── mod.rs │ ├── node_table.rs │ ├── nonce.rs │ ├── peer.rs │ ├── response_queue.rs │ ├── synchronizer.rs │ └── time.rs ├── pbtc ├── cli.yml ├── commands │ ├── import.rs │ ├── mod.rs │ ├── rollback.rs │ └── start.rs ├── config.rs ├── main.rs ├── rpc.rs ├── rpc_apis.rs ├── seednodes.rs └── util.rs ├── primitives ├── Cargo.toml └── src │ ├── bytes.rs │ ├── compact.rs │ ├── hash.rs │ ├── io.rs │ ├── lib.rs │ └── u256.rs ├── rpc ├── Cargo.toml └── src │ ├── lib.rs │ ├── rpc_server.rs │ └── v1 │ ├── helpers │ ├── errors.rs │ └── mod.rs │ ├── impls │ ├── blockchain.rs │ ├── miner.rs │ ├── mod.rs │ ├── network.rs │ └── raw.rs │ ├── mod.rs │ ├── traits │ ├── blockchain.rs │ ├── miner.rs │ ├── mod.rs │ ├── network.rs │ └── raw.rs │ └── types │ ├── address.rs │ ├── block.rs │ ├── block_template.rs │ ├── block_template_request.rs │ ├── bytes.rs │ ├── get_block_response.rs │ ├── get_tx_out_response.rs │ ├── get_tx_out_set_info_response.rs │ ├── hash.rs │ ├── mod.rs │ ├── nodes.rs │ ├── script.rs │ ├── transaction.rs │ └── uint.rs ├── script ├── Cargo.toml └── src │ ├── builder.rs │ ├── error.rs │ ├── flags.rs │ ├── interpreter.rs │ ├── lib.rs │ ├── num.rs │ ├── opcode.rs │ ├── script.rs │ ├── sign.rs │ ├── stack.rs │ └── verify.rs ├── serialization ├── Cargo.toml └── src │ ├── compact_integer.rs │ ├── impls.rs │ ├── lib.rs │ ├── list.rs │ ├── reader.rs │ └── stream.rs ├── serialization_derive ├── Cargo.toml ├── src │ ├── de.rs │ ├── lib.rs │ └── ser.rs └── tests │ └── raw.rs ├── snap └── snapcraft.yaml ├── storage ├── Cargo.toml └── src │ ├── best_block.rs │ ├── block_ancestors.rs │ ├── block_chain.rs │ ├── block_impls.rs │ ├── block_iterator.rs │ ├── block_origin.rs │ ├── block_provider.rs │ ├── block_ref.rs │ ├── error.rs │ ├── lib.rs │ ├── store.rs │ ├── transaction_meta.rs │ └── transaction_provider.rs ├── sync ├── Cargo.toml └── src │ ├── blocks_writer.rs │ ├── inbound_connection.rs │ ├── inbound_connection_factory.rs │ ├── lib.rs │ ├── local_node.rs │ ├── synchronization_chain.rs │ ├── synchronization_client.rs │ ├── synchronization_client_core.rs │ ├── synchronization_executor.rs │ ├── synchronization_manager.rs │ ├── synchronization_peers.rs │ ├── synchronization_peers_tasks.rs │ ├── synchronization_server.rs │ ├── synchronization_verifier.rs │ ├── types.rs │ └── utils │ ├── average_speed_meter.rs │ ├── best_headers_chain.rs │ ├── bloom_filter.rs │ ├── compact_block_builder.rs │ ├── connection_filter.rs │ ├── fee_rate_filter.rs │ ├── hash_queue.rs │ ├── known_hash_filter.rs │ ├── memory_pool_transaction_provider.rs │ ├── message_block_headers_provider.rs │ ├── mod.rs │ ├── orphan_blocks_pool.rs │ ├── orphan_transactions_pool.rs │ ├── partial_merkle_tree.rs │ └── synchronization_state.rs ├── test-data ├── Cargo.toml └── src │ ├── block.rs │ ├── chain_builder.rs │ ├── invoke.rs │ └── lib.rs ├── tools ├── bench.sh ├── clippy.sh ├── deb-build.sh ├── deb_build.sh ├── doc.sh ├── docker_build.sh ├── draw_graph.sh ├── graph.dot ├── graph.svg ├── graph_ratio.diff ├── regtests.sh └── workspace.diff └── verification ├── Cargo.toml └── src ├── accept_block.rs ├── accept_chain.rs ├── accept_header.rs ├── accept_transaction.rs ├── canon.rs ├── chain_verifier.rs ├── constants.rs ├── deployments.rs ├── duplex_store.rs ├── error.rs ├── lib.rs ├── sigops.rs ├── timestamp.rs ├── verify_block.rs ├── verify_chain.rs ├── verify_header.rs ├── verify_transaction.rs ├── work.rs └── work_bch.rs /.dockerignore: -------------------------------------------------------------------------------- 1 | # Generated by Cargo 2 | # will have compiled files and executables 3 | target 4 | 5 | *.swp 6 | *.swo 7 | *.swn 8 | *.DS_Store 9 | 10 | # Visual Studio Code stuff 11 | .vscode 12 | 13 | # GitEye stuff 14 | .project 15 | 16 | # idea ide 17 | .idea 18 | 19 | # git stuff 20 | .git 21 | 22 | # ignore compare tools 23 | tools/compare-tool 24 | -------------------------------------------------------------------------------- /.editorconfig: -------------------------------------------------------------------------------- 1 | root = true 2 | [*] 3 | indent_style=tab 4 | indent_size=tab 5 | tab_width=4 6 | end_of_line=lf 7 | charset=utf-8 8 | trim_trailing_whitespace=true 9 | max_line_length=120 10 | insert_final_newline=true 11 | 12 | [.travis.yml] 13 | indent_style=space 14 | indent_size=2 15 | tab_width=8 16 | end_of_line=lf 17 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Generated by Cargo 2 | # will have compiled files and executables 3 | /target 4 | 5 | *.swp 6 | *.swo 7 | *.swn 8 | *.DS_Store 9 | 10 | # Visual Studio Code stuff 11 | /.vscode 12 | 13 | # GitEye stuff 14 | /.project 15 | 16 | # idea ide 17 | /.idea 18 | /cmake-build-debug 19 | -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "tools/compare-tool"] 2 | path = tools/compare-tool 3 | url = https://github.com/theuni/bitcoind-comparisontool 4 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | sudo: required 2 | dist: trusty 3 | language: rust 4 | branches: 5 | only: 6 | - master 7 | matrix: 8 | fast_finish: false 9 | include: 10 | - rust: stable 11 | cache: 12 | apt: true 13 | directories: 14 | - $TRAVIS_BUILD_DIR/target 15 | - $HOME/.cargo 16 | addons: 17 | apt: 18 | sources: 19 | - ubuntu-toolchain-r-test 20 | packages: 21 | - libcurl4-openssl-dev 22 | - libelf-dev 23 | - libdw-dev 24 | - gcc-4.8 25 | - g++-4.8 26 | - oracle-java8-set-default 27 | script: 28 | - echo 'Running tests' && echo -en 'travis_fold:start:tests' 29 | - cargo test --all 30 | - echo -en 'travis_fold:end:tests' 31 | - echo 'Building in release mode' && echo -en 'travis_fold:start:build.release' 32 | - cargo build --release 33 | - echo -en 'travis_fold:end:build.release' 34 | - echo 'Running regtests from ./tools/regtests.sh' && echo -en 'travis_fold:start:regtests' 35 | - ./tools/regtests.sh 36 | - echo -en 'travis_fold:end:regtests' 37 | - echo 'Running bench from ./tools/bench.sh' && echo -en 'travis_fold:start:bench' 38 | - ./tools/bench.sh 39 | - echo -en 'travis_fold:end:bench' 40 | after_success: | 41 | [ true ] && 42 | [ $TRAVIS_BRANCH = master ] && 43 | [ $TRAVIS_PULL_REQUEST = false ] && 44 | [ $TRAVIS_RUST_VERSION = stable ] && 45 | ./tools/doc.sh && 46 | echo '' > target/doc/index.html && 47 | pip install --user ghp-import && 48 | /home/travis/.local/bin/ghp-import -n target/doc && 49 | git push -fq https://${GH_TOKEN}@github.com/${TRAVIS_REPO_SLUG}.git gh-pages 50 | env: 51 | global: 52 | - CXX="g++-4.8" 53 | - CC="gcc-4.8" 54 | - secure: RPpiXGV2RgRNv/S5XICdym33pauO5PNQu06+0kIuap6acsT6rjWZDeKU0kXIkCyv7XGu6TAE3m1/N6DSFvoeObdoodzakpUnzI9ysC9UbGbX3KgFAILrsuD+Th0HR1yZ2+boXEnetl7lp87X6m3HL4dz7ccuRJOGQFCdzo4iQd4RXMJXq1nS8pXYOnOiGtWy/K6SBqHus42End303vR6ptd71iuEYmX2qL5LUpXRQ5JuvE78JL4v5e1jrLwwrIG5hr3ej+EICiEKjAtZcv3C/CYq2+V7T8mI8FQFBsVG56FxP1Qq0G1jnv6r8i/pLb342DiQapeQZ+CZ1vHv3Ruzxm4+T9VLkwIcI5kOZmbOjt/BnO1yHFMr6uslxYuyEvSLZXsvnB7Qi2cB/nFWxi8nk19CzuYy2hjrnDA/bfeii65oQ4hyqAuF6QvHgZtMmSONbea679b1mbl9K67EWt04J4k67Hfuj6HvmIOT3KF0TPXO6K4b6a4OJ43PinNSWtIOlCb4658HrfUBTUEe1XN14uC/F8dyFw8PTrw/VQKkmNEDRvI0AuWF1s+qN1u1mm+CpyJaqxcPloUR+RgVBk3KrRd/Gm6neunx07G6STv/S2mGCZ2ZRIUpbSqha7XTmHltoWZRYFeM3dhVMXe3hgEYuJfm4n7HxGvp4b1s1ckenPI= 55 | - RUST_BACKTRACE=1 56 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "chainx-btc" 3 | version = "0.1.0" 4 | license = "GPL-3.0" 5 | authors = ["Parity Technologies "] 6 | description = "Parity bitcoin client." 7 | 8 | [dependencies] 9 | parking_lot = "0.4" 10 | log = "0.4" 11 | env_logger = "0.5" 12 | app_dirs = { git = "https://github.com/paritytech/app-dirs-rs" } 13 | jsonrpc-http-server = { git = "https://github.com/ethcore/jsonrpc.git" } 14 | libc = "0.2" 15 | clap = { version = "2", features = ["yaml"] } 16 | chain = { path = "chain" } 17 | keys = { path = "keys" } 18 | message = { path = "message" } 19 | network = { path = "network" } 20 | miner = { path = "miner" } 21 | p2p = { path = "p2p" } 22 | node = { path = "node" } 23 | script = { path = "script" } 24 | storage = { path = "storage" } 25 | db = { path = "db" } 26 | verification = { path = "verification" } 27 | sync = { path = "sync" } 28 | #import = { path = "import" } 29 | logs = { path = "logs" } 30 | rpc = { path = "rpc" } 31 | primitives = { path = "primitives" } 32 | tokio = "0.1" 33 | exit-future = "0.1" 34 | 35 | [profile.test] 36 | debug = true 37 | 38 | [[bin]] 39 | path = "chainx_btc/main.rs" 40 | name = "chainx_btc" 41 | 42 | [workspace] 43 | members = [ 44 | "bencher", 45 | "db", 46 | "miner", 47 | "chain", 48 | "storage", 49 | "rpc", 50 | "crypto", 51 | "primitives", 52 | "message", 53 | "script", 54 | "serialization", 55 | "serialization_derive" ] 56 | -------------------------------------------------------------------------------- /bencher/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "bencher" 3 | version = "0.1.0" 4 | license = "GPL-3.0" 5 | authors = ["Ethcore "] 6 | description = "Parity bitcoin client." 7 | 8 | [dependencies] 9 | storage = { path = "../storage" } 10 | db = { path = "../db" } 11 | verification = { path = "../verification" } 12 | network = { path = "../network" } 13 | chain = { path = "../chain" } 14 | primitives = { path = "../primitives" } 15 | test-data = { path = "../test-data" } 16 | time = "*" 17 | byteorder = "1.0" 18 | 19 | [[bin]] 20 | path = "src/main.rs" 21 | name = "bencher" 22 | -------------------------------------------------------------------------------- /bencher/src/main.rs: -------------------------------------------------------------------------------- 1 | extern crate storage; 2 | extern crate db; 3 | extern crate chain; 4 | extern crate test_data; 5 | extern crate time; 6 | extern crate verification; 7 | extern crate network; 8 | extern crate byteorder; 9 | extern crate primitives; 10 | 11 | mod database; 12 | mod verifier; 13 | 14 | use time::{PreciseTime, Duration}; 15 | use std::io::Write; 16 | use std::str; 17 | 18 | #[derive(Default)] 19 | pub struct Benchmark { 20 | start: Option, 21 | end: Option, 22 | samples: Option, 23 | } 24 | 25 | impl Benchmark { 26 | pub fn start(&mut self) { 27 | self.start = Some(PreciseTime::now()); 28 | } 29 | 30 | pub fn stop(&mut self) { 31 | self.end = Some(PreciseTime::now()); 32 | } 33 | 34 | pub fn evaluate(&self) -> Duration { 35 | self.start.expect("benchmarch never ended").to(self.end.expect("benchmark never started")) 36 | } 37 | 38 | pub fn samples(&mut self, samples: usize) { 39 | self.samples = Some(samples); 40 | } 41 | } 42 | 43 | fn decimal_mark(s: String) -> String { 44 | let bytes: Vec<_> = s.bytes().rev().collect(); 45 | let chunks: Vec<_> = bytes.chunks(3).map(|chunk| str::from_utf8(chunk).unwrap()).collect(); 46 | let result: Vec<_> = chunks.join(",").bytes().rev().collect(); 47 | String::from_utf8(result).unwrap() 48 | } 49 | 50 | 51 | fn run_benchmark(name: &str, f: F) where F: FnOnce(&mut Benchmark) { 52 | print!("{}: ", name); 53 | ::std::io::stdout().flush().unwrap(); 54 | 55 | let mut benchmark = Benchmark::default(); 56 | f(&mut benchmark); 57 | if let Some(samples) = benchmark.samples { 58 | println!("{} ns/sample", 59 | decimal_mark(format!("{}", benchmark.evaluate().num_nanoseconds().unwrap() / samples as i64)), 60 | ); 61 | } 62 | else { 63 | println!("{} ns", decimal_mark(format!("{}", benchmark.evaluate().num_nanoseconds().unwrap()))); 64 | } 65 | } 66 | 67 | macro_rules! benchmark { 68 | ($t:expr) => { 69 | run_benchmark(stringify!($t), $t); 70 | }; 71 | } 72 | 73 | fn main() { 74 | benchmark!(database::fetch); 75 | benchmark!(database::write); 76 | benchmark!(database::reorg_short); 77 | benchmark!(database::write_heavy); 78 | benchmark!(verifier::main); 79 | } 80 | -------------------------------------------------------------------------------- /chain/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "chain" 3 | version = "0.1.0" 4 | authors = [ "debris & Chainpool " ] 5 | 6 | [dependencies] 7 | rustc-hex = { version = "2", optional = true } 8 | heapsize = { version = "0.4", optional = true } 9 | bitcrypto = { path = "../crypto", default-features = false } 10 | primitives = { path = "../primitives", default-features = false } 11 | serialization = { path = "../serialization", default-features = false } 12 | sr-std = { git = "https://github.com/chainx-org/sr-std", default-features = false } 13 | parity-codec = { version = "3.0", default-features = false } 14 | serde_bytes = { git = "https://github.com/serde-rs/bytes", optional = true } 15 | serde = { version = "1.0", optional = true } 16 | 17 | [features] 18 | default = ["std"] 19 | std = [ 20 | "rustc-hex", 21 | "heapsize", 22 | "bitcrypto/std", 23 | "primitives/std", 24 | "serialization/std", 25 | "sr-std/std", 26 | "serde_bytes", 27 | "serde", 28 | "parity-codec/std", 29 | ] 30 | -------------------------------------------------------------------------------- /chain/src/constants.rs: -------------------------------------------------------------------------------- 1 | 2 | // Below flags apply in the context of BIP 68 3 | // If this flag set, CTxIn::nSequence is NOT interpreted as a 4 | // relative lock-time. 5 | pub const SEQUENCE_LOCKTIME_DISABLE_FLAG: u32 = 1u32 << 31; 6 | 7 | // Setting nSequence to this value for every input in a transaction 8 | // disables nLockTime. 9 | pub const SEQUENCE_FINAL: u32 = 0xffffffff; 10 | 11 | // If CTxIn::nSequence encodes a relative lock-time and this flag 12 | // is set, the relative lock-time has units of 512 seconds, 13 | // otherwise it specifies blocks with a granularity of 1. 14 | pub const SEQUENCE_LOCKTIME_TYPE_FLAG: u32 = (1 << 22); 15 | 16 | // If CTxIn::nSequence encodes a relative lock-time, this mask is 17 | // applied to extract that lock-time from the sequence field. 18 | pub const SEQUENCE_LOCKTIME_MASK: u32 = 0x0000ffff; 19 | 20 | /// Threshold for `nLockTime`: below this value it is interpreted as block number, 21 | /// otherwise as UNIX timestamp. 22 | pub const LOCKTIME_THRESHOLD: u32 = 500000000; // Tue Nov 5 00:53:20 1985 UTC 23 | 24 | /// Number of Satoshis in single coin 25 | pub const SATOSHIS_IN_COIN: u64 = 100_000_000; 26 | -------------------------------------------------------------------------------- /chain/src/indexed_header.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2018 Chainpool 2 | 3 | use rstd::cmp; 4 | use primitives::io; 5 | use hash::H256; 6 | use ser::{Deserializable, Reader, Error as ReaderError}; 7 | use block_header::BlockHeader; 8 | use read_and_hash::ReadAndHash; 9 | 10 | #[derive(Clone)] 11 | pub struct IndexedBlockHeader { 12 | pub hash: H256, 13 | pub raw: BlockHeader, 14 | } 15 | 16 | #[cfg(feature = "std")] 17 | impl std::fmt::Debug for IndexedBlockHeader { 18 | fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { 19 | f.debug_struct("IndexedBlockHeader") 20 | .field("hash", &self.hash.clone().reverse()) 21 | .field("raw", &self.raw) 22 | .finish() 23 | } 24 | } 25 | 26 | impl From for IndexedBlockHeader { 27 | fn from(header: BlockHeader) -> Self { 28 | IndexedBlockHeader { 29 | hash: header.hash(), 30 | raw: header, 31 | } 32 | } 33 | } 34 | 35 | impl IndexedBlockHeader { 36 | pub fn new(hash: H256, header: BlockHeader) -> Self { 37 | IndexedBlockHeader { 38 | hash: hash, 39 | raw: header, 40 | } 41 | } 42 | } 43 | 44 | impl cmp::PartialEq for IndexedBlockHeader { 45 | fn eq(&self, other: &Self) -> bool { 46 | self.hash == other.hash 47 | } 48 | } 49 | 50 | impl Deserializable for IndexedBlockHeader { 51 | fn deserialize(reader: &mut Reader) -> Result where T: io::Read { 52 | let data = try!(reader.read_and_hash::()); 53 | // TODO: use len 54 | let header = IndexedBlockHeader { 55 | raw: data.data, 56 | hash: data.hash, 57 | }; 58 | 59 | Ok(header) 60 | } 61 | } 62 | -------------------------------------------------------------------------------- /chain/src/indexed_transaction.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2018 Chainpool 2 | 3 | use rstd::cmp; 4 | use primitives::io; 5 | use hash::H256; 6 | use ser::{Deserializable, Reader, Error as ReaderError}; 7 | use transaction::Transaction; 8 | use read_and_hash::ReadAndHash; 9 | 10 | #[derive(Default, Clone)] 11 | pub struct IndexedTransaction { 12 | pub hash: H256, 13 | pub raw: Transaction, 14 | } 15 | 16 | #[cfg(feature = "std")] 17 | impl std::fmt::Debug for IndexedTransaction { 18 | fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { 19 | f.debug_struct("IndexedTransaction") 20 | .field("hash", &self.hash.clone().reverse()) 21 | .field("raw", &self.raw) 22 | .finish() 23 | } 24 | } 25 | 26 | impl From for IndexedTransaction where Transaction: From { 27 | fn from(other: T) -> Self { 28 | let tx = Transaction::from(other); 29 | IndexedTransaction { 30 | hash: tx.hash(), 31 | raw: tx, 32 | } 33 | } 34 | } 35 | 36 | impl IndexedTransaction { 37 | pub fn new(hash: H256, transaction: Transaction) -> Self { 38 | IndexedTransaction { 39 | hash: hash, 40 | raw: transaction, 41 | } 42 | } 43 | } 44 | 45 | impl cmp::PartialEq for IndexedTransaction { 46 | fn eq(&self, other: &Self) -> bool { 47 | self.hash == other.hash 48 | } 49 | } 50 | 51 | impl Deserializable for IndexedTransaction { 52 | fn deserialize(reader: &mut Reader) -> Result where T: io::Read { 53 | let data = try!(reader.read_and_hash::()); 54 | // TODO: use len 55 | let tx = IndexedTransaction { 56 | raw: data.data, 57 | hash: data.hash, 58 | }; 59 | 60 | Ok(tx) 61 | } 62 | } 63 | -------------------------------------------------------------------------------- /chain/src/lib.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2018 Chainpool 2 | 3 | #![cfg_attr(not(feature = "std"), no_std)] 4 | 5 | #[cfg(feature = "std")] 6 | extern crate rustc_hex as hex; 7 | #[cfg(feature = "std")] 8 | extern crate heapsize; 9 | extern crate primitives; 10 | extern crate bitcrypto as crypto; 11 | extern crate serialization as ser; 12 | extern crate parity_codec as codec; 13 | #[cfg(feature = "std")] 14 | extern crate serde_bytes; 15 | #[cfg(feature = "std")] 16 | extern crate serde; 17 | 18 | #[allow(unused_imports)] 19 | #[macro_use] 20 | extern crate sr_std as rstd; 21 | 22 | pub mod constants; 23 | 24 | mod block; 25 | mod block_header; 26 | mod merkle_root; 27 | mod transaction; 28 | 29 | /// `IndexedBlock` extension 30 | mod read_and_hash; 31 | mod indexed_block; 32 | mod indexed_header; 33 | mod indexed_transaction; 34 | 35 | pub trait RepresentH256 { 36 | fn h256(&self) -> hash::H256; 37 | } 38 | 39 | pub use primitives::{hash, bytes, compact, io}; 40 | 41 | pub use block::Block; 42 | pub use block_header::BlockHeader; 43 | pub use merkle_root::{merkle_root, merkle_node_hash}; 44 | pub use transaction::{Transaction, TransactionInput, TransactionOutput, OutPoint}; 45 | 46 | pub use read_and_hash::{ReadAndHash, HashedData}; 47 | pub use indexed_block::IndexedBlock; 48 | pub use indexed_header::IndexedBlockHeader; 49 | pub use indexed_transaction::IndexedTransaction; 50 | 51 | pub type ShortTransactionID = hash::H48; 52 | -------------------------------------------------------------------------------- /chain/src/merkle_root.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2018 Chainpool 2 | 3 | use crypto::dhash256; 4 | use hash::{H256, H512}; 5 | use rstd::prelude::Vec; 6 | 7 | #[inline] 8 | fn concat(a: T, b: T) -> H512 where T: AsRef { 9 | let mut result = H512::default(); 10 | result[0..32].copy_from_slice(&**a.as_ref()); 11 | result[32..64].copy_from_slice(&**b.as_ref()); 12 | result 13 | } 14 | 15 | /// Calculates the root of the merkle tree 16 | /// https://en.bitcoin.it/wiki/Protocol_documentation#Merkle_Trees 17 | pub fn merkle_root(hashes: &[T]) -> H256 where T: AsRef { 18 | if hashes.len() == 1 { 19 | return hashes[0].as_ref().clone(); 20 | } 21 | 22 | let mut row = Vec::with_capacity(hashes.len() / 2); 23 | let mut i = 0; 24 | while i + 1 < hashes.len() { 25 | row.push(merkle_node_hash(&hashes[i], &hashes[i + 1])); 26 | i += 2 27 | } 28 | 29 | // duplicate the last element if len is not even 30 | if hashes.len() % 2 == 1 { 31 | let last = &hashes[hashes.len() - 1]; 32 | row.push(merkle_node_hash(last, last)); 33 | } 34 | 35 | merkle_root(&row) 36 | } 37 | 38 | /// Calculate merkle tree node hash 39 | pub fn merkle_node_hash(left: T, right: T) -> H256 where T: AsRef { 40 | dhash256(&*concat(left, right)) 41 | } 42 | 43 | #[cfg(test)] 44 | mod tests { 45 | use hash::H256; 46 | use super::merkle_root; 47 | 48 | // block 80_000 49 | // https://blockchain.info/block/000000000043a8c0fd1d6f726790caa2a406010d19efd2780db27bdbbd93baf6 50 | #[test] 51 | fn test_merkle_root_with_2_hashes() { 52 | let tx1 = H256::from_reversed_str("c06fbab289f723c6261d3030ddb6be121f7d2508d77862bb1e484f5cd7f92b25"); 53 | let tx2 = H256::from_reversed_str("5a4ebf66822b0b2d56bd9dc64ece0bc38ee7844a23ff1d7320a88c5fdb2ad3e2"); 54 | let expected = H256::from_reversed_str("8fb300e3fdb6f30a4c67233b997f99fdd518b968b9a3fd65857bfe78b2600719"); 55 | 56 | let result = merkle_root(&[&tx1, &tx2]); 57 | let result2 = merkle_root(&[tx1, tx2]); 58 | assert_eq!(result, expected); 59 | assert_eq!(result2, expected); 60 | } 61 | } 62 | -------------------------------------------------------------------------------- /chain/src/read_and_hash.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2018 Chainpool 2 | 3 | use io; 4 | use hash::H256; 5 | use crypto::{DHash256, Digest}; 6 | use ser::{Reader, Error as ReaderError, Deserializable}; 7 | 8 | pub struct HashedData { 9 | pub size: usize, 10 | pub hash: H256, 11 | pub data: T, 12 | } 13 | 14 | pub trait ReadAndHash { 15 | fn read_and_hash(&mut self) -> Result, ReaderError> where T: Deserializable; 16 | } 17 | 18 | impl ReadAndHash for Reader where R: io::Read { 19 | fn read_and_hash(&mut self) -> Result, ReaderError> where T: Deserializable { 20 | let mut size = 0usize; 21 | let mut hasher = DHash256::new(); 22 | let data = self.read_with_proxy(|bytes| { 23 | size += bytes.len(); 24 | hasher.input(bytes); 25 | })?; 26 | 27 | let result = HashedData { 28 | hash: hasher.finish(), 29 | data: data, 30 | size: size, 31 | }; 32 | 33 | Ok(result) 34 | } 35 | } 36 | -------------------------------------------------------------------------------- /chainx_btc/commands/import.rs: -------------------------------------------------------------------------------- 1 | use clap::ArgMatches; 2 | use sync::{create_sync_blocks_writer, Error}; 3 | use config::Config; 4 | use util::init_db; 5 | 6 | pub fn import(cfg: Config, matches: &ArgMatches) -> Result<(), String> { 7 | try!(init_db(&cfg)); 8 | 9 | let blk_path = matches.value_of("PATH").expect( 10 | "PATH is required in cli.yml; qed", 11 | ); 12 | 13 | let mut writer = create_sync_blocks_writer(cfg.db, cfg.consensus, cfg.verification_params); 14 | 15 | let blk_dir = try!(::import::open_blk_dir(blk_path).map_err(|_| { 16 | "Import directory does not exist".to_owned() 17 | })); 18 | let mut counter = 0; 19 | for blk in blk_dir { 20 | // TODO: verify magic! 21 | let blk = try!(blk.map_err(|_| "Cannot read block".to_owned())); 22 | match writer.append_block(blk.block) { 23 | Ok(_) => { 24 | counter += 1; 25 | if counter % 1000 == 0 { 26 | info!(target: "sync", "Imported {} blocks", counter); 27 | } 28 | } 29 | Err(Error::TooManyOrphanBlocks) => { 30 | return Err("Too many orphan (unordered) blocks".into()) 31 | } 32 | Err(_) => return Err("Cannot append block".into()), 33 | } 34 | } 35 | 36 | info!("Finished import of {} blocks", counter); 37 | 38 | Ok(()) 39 | } 40 | -------------------------------------------------------------------------------- /chainx_btc/commands/mod.rs: -------------------------------------------------------------------------------- 1 | //mod import; 2 | mod start; 3 | mod dev; 4 | mod rollback; 5 | 6 | //pub use self::import::import; 7 | pub use self::start::start; 8 | pub use self::rollback::rollback; 9 | pub use self::dev::dev; 10 | -------------------------------------------------------------------------------- /chainx_btc/commands/rollback.rs: -------------------------------------------------------------------------------- 1 | use clap::ArgMatches; 2 | use storage::BlockRef; 3 | use config::Config; 4 | use primitives::hash::H256; 5 | use util::init_db; 6 | 7 | pub fn rollback(cfg: Config, matches: &ArgMatches) -> Result<(), String> { 8 | try!(init_db(&cfg)); 9 | 10 | let block_ref = matches.value_of("BLOCK").expect( 11 | "BLOCK is required in cli.yml; qed", 12 | ); 13 | let block_ref = if block_ref.len() == 64 { 14 | BlockRef::Hash({ 15 | let hash: H256 = block_ref.parse().map_err( 16 | |e| format!("Invalid block number: {}", e), 17 | )?; 18 | hash.reversed() 19 | }) 20 | } else { 21 | BlockRef::Number(block_ref.parse().map_err( 22 | |e| format!("Invalid block hash: {}", e), 23 | )?) 24 | }; 25 | 26 | let required_block_hash = cfg.db 27 | .block_header(block_ref.clone()) 28 | .ok_or(format!("Block {:?} is unknown", block_ref))? 29 | .hash(); 30 | let genesis_hash = cfg.network.genesis_block().hash(); 31 | 32 | let mut best_block_hash = cfg.db.best_block().hash; 33 | debug_assert!(best_block_hash != H256::default()); // genesis inserted in init_db 34 | 35 | loop { 36 | if best_block_hash == required_block_hash { 37 | info!("Reverted to block {:?}", block_ref); 38 | return Ok(()); 39 | } 40 | 41 | if best_block_hash == genesis_hash { 42 | return Err(format!( 43 | "Failed to revert to block {:?}. Reverted to genesis", 44 | block_ref 45 | )); 46 | } 47 | 48 | best_block_hash = cfg.db.rollback_best().map_err(|e| format!("{:?}", e))?; 49 | } 50 | } 51 | -------------------------------------------------------------------------------- /chainx_btc/main.rs: -------------------------------------------------------------------------------- 1 | //! chainx bitcoin client for chainx relay testnet. 2 | 3 | extern crate exit_future; 4 | #[macro_use] 5 | extern crate clap; 6 | #[macro_use] 7 | extern crate log; 8 | extern crate env_logger; 9 | extern crate app_dirs; 10 | extern crate libc; 11 | 12 | extern crate storage; 13 | extern crate db; 14 | extern crate chain; 15 | extern crate keys; 16 | extern crate logs; 17 | extern crate script; 18 | extern crate message; 19 | extern crate network; 20 | extern crate p2p; 21 | extern crate miner; 22 | extern crate sync; 23 | //extern crate import; 24 | extern crate parking_lot; 25 | extern crate rpc as core_rpc; 26 | extern crate primitives; 27 | extern crate verification; 28 | extern crate node; 29 | extern crate tokio; 30 | extern crate jsonrpc_http_server; 31 | 32 | mod commands; 33 | mod config; 34 | mod seednodes; 35 | mod util; 36 | mod rpc; 37 | mod rpc_apis; 38 | 39 | use app_dirs::AppInfo; 40 | 41 | pub const APP_INFO: AppInfo = AppInfo { 42 | name: "chainx_btc", 43 | author: "Chainpool", 44 | }; 45 | pub const PROTOCOL_VERSION: u32 = 70_014; 46 | pub const PROTOCOL_MINIMUM: u32 = 70_001; 47 | pub const USER_AGENT: &'static str = "pbtc"; 48 | pub const REGTEST_USER_AGENT: &'static str = "/Satoshi:0.12.1/"; 49 | pub const LOG_INFO: &'static str = "sync=info"; 50 | 51 | fn main() { 52 | // Always print backtrace on panic. 53 | ::std::env::set_var("RUST_BACKTRACE", "1"); 54 | 55 | if let Err(err) = run() { 56 | println!("{}", err); 57 | } 58 | } 59 | 60 | fn run() -> Result<(), String> { 61 | let yaml = load_yaml!("cli.yml"); 62 | let matches = clap::App::from_yaml(yaml).get_matches(); 63 | let cfg = try!(config::parse(&matches)); 64 | 65 | if !cfg.quiet { 66 | if cfg!(windows) { 67 | logs::init(LOG_INFO, logs::DateLogFormatter); 68 | } else { 69 | logs::init(LOG_INFO, logs::DateAndColorLogFormatter); 70 | } 71 | } else { 72 | env_logger::init(); 73 | } 74 | 75 | match matches.subcommand() { 76 | //("import", Some(import_matches)) => commands::import(cfg, import_matches), 77 | ("rollback", Some(rollback_matches)) => commands::rollback(cfg, rollback_matches), 78 | ("dev", _) => commands::dev(cfg), 79 | _ => commands::start(cfg), 80 | } 81 | } 82 | -------------------------------------------------------------------------------- /chainx_btc/seednodes.rs: -------------------------------------------------------------------------------- 1 | 2 | pub fn mainnet_seednodes() -> Vec<&'static str> { 3 | vec![ 4 | // Pieter Wuille 5 | "seed.bitcoin.sipa.be:8333", 6 | // Matt Corallo 7 | "dnsseed.bluematt.me:8333", 8 | // Luke Dashjr 9 | "dnsseed.bitcoin.dashjr.org:8333", 10 | // Christian Decker 11 | "seed.bitcoinstats.com:8333", 12 | // Jonas Schnelli 13 | "seed.bitcoin.jonasschnelli.ch:8333", 14 | // Peter Todd 15 | "seed.btc.petertodd.org:8333", 16 | // 17 | "seed.voskuil.org:8333", 18 | ] 19 | } 20 | 21 | pub fn testnet_seednodes() -> Vec<&'static str> { 22 | vec![ 23 | /*("testnet-seed.bitcoin.jonasschnelli.ch:18333", 24 | "seed.tbtc.petertodd.org:18333", 25 | "testnet-seed.bluematt.me:18333", 26 | "testnet-seed.bitcoin.schildbach.de:18333", 27 | "testnet-seed.voskuil.org:18333",*/ 28 | ] 29 | } 30 | 31 | pub fn bitcoin_cash_seednodes() -> Vec<&'static str> { 32 | vec![ 33 | "seed.bitcoinabc.org:8333", 34 | "seed-abc.bitcoinforks.org:8333", 35 | "seed.bitprim.org:8333", 36 | "seed.deadalnix.me:8333", 37 | "seeder.criptolayer.net:8333", 38 | ] 39 | } 40 | 41 | pub fn bitcoin_cash_testnet_seednodes() -> Vec<&'static str> { 42 | vec![ 43 | "testnet-seed.bitcoinabc.org:18333", 44 | "testnet-seed-abc.bitcoinforks.org:18333", 45 | "testnet-seed.bitprim.org:18333", 46 | "testnet-seed.deadalnix.me:18333", 47 | "testnet-seeder.criptolayer.net:18333", 48 | ] 49 | } 50 | -------------------------------------------------------------------------------- /chainx_btc/util.rs: -------------------------------------------------------------------------------- 1 | use std::sync::Arc; 2 | use std::path::PathBuf; 3 | use std::fs::create_dir_all; 4 | use app_dirs::{app_dir, AppDataType}; 5 | use {storage, APP_INFO}; 6 | use db; 7 | use config::Config; 8 | use chain::IndexedBlock; 9 | 10 | pub fn open_db(data_dir: &Option, db_cache: usize) -> storage::SharedStore { 11 | let db_path = match *data_dir { 12 | Some(ref data_dir) => custom_path(&data_dir, "db"), 13 | None => app_dir(AppDataType::UserData, &APP_INFO, "db").expect("Failed to get app dir"), 14 | }; 15 | Arc::new( 16 | db::BlockChainDatabase::open_at_path(db_path, db_cache).expect("Failed to open database"), 17 | ) 18 | } 19 | 20 | pub fn node_table_path(cfg: &Config) -> PathBuf { 21 | let mut node_table = match cfg.data_dir { 22 | Some(ref data_dir) => custom_path(&data_dir, "p2p"), 23 | None => app_dir(AppDataType::UserData, &APP_INFO, "p2p").expect("Failed to get app dir"), 24 | }; 25 | node_table.push("nodes.csv"); 26 | node_table 27 | } 28 | 29 | pub fn init_db(cfg: &Config) -> Result<(), String> { 30 | // insert genesis block if db is empty 31 | let genesis_block: IndexedBlock = cfg.network.genesis_block().into(); 32 | match cfg.db.block_hash(0) { 33 | Some(ref db_genesis_block_hash) if db_genesis_block_hash != genesis_block.hash() => Err( 34 | "Trying to open database with incompatible genesis block".into(), 35 | ), 36 | Some(_) => Ok(()), 37 | None => { 38 | let hash = genesis_block.hash().clone(); 39 | cfg.db.insert(genesis_block).expect( 40 | "Failed to insert genesis block to the database", 41 | ); 42 | cfg.db.canonize(&hash).expect( 43 | "Failed to canonize genesis block", 44 | ); 45 | Ok(()) 46 | } 47 | } 48 | } 49 | 50 | fn custom_path(data_dir: &str, sub_dir: &str) -> PathBuf { 51 | let mut path = PathBuf::from(data_dir); 52 | path.push(sub_dir); 53 | create_dir_all(&path).expect("Failed to get app dir"); 54 | path 55 | } 56 | -------------------------------------------------------------------------------- /cmake-build-debug/CMakeFiles/clion-log.txt: -------------------------------------------------------------------------------- 1 | CMakeLists.txt not found in /home/fy/Desktop/bitcoin-rust Select CMakeLists.txt file... 2 | -------------------------------------------------------------------------------- /crypto/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "bitcrypto" 3 | version = "0.1.0" 4 | authors = ["debris & chainpool "] 5 | 6 | [dependencies] 7 | siphasher = { version = "0.1.1", optional = true } 8 | primitives = { path = "../primitives", default-features = false } 9 | sr-std = { git = "https://github.com/chainx-org/sr-std", default-features = false } 10 | rand = { version = "^0.3", optional = true } 11 | rustc-serialize = { version = "^0.3", optional = true } 12 | 13 | [features] 14 | default = ["std"] 15 | std = [ 16 | "siphasher", 17 | "rand", 18 | "rustc-serialize", 19 | "primitives/std", 20 | "sr-std/std", 21 | ] 22 | -------------------------------------------------------------------------------- /crypto/src/digest.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2018 Chainpool 2 | #[cfg(feature = "std")] 3 | use rstd::iter::repeat; 4 | 5 | pub trait Digest { 6 | fn input(&mut self, input: &[u8]); 7 | fn result(&mut self, out: &mut [u8]); 8 | fn reset(&mut self); 9 | fn output_bits(&self) -> usize; 10 | 11 | fn output_bytes(&self) -> usize { 12 | (self.output_bits() + 7) / 8 13 | } 14 | 15 | fn block_size(&self) -> usize; 16 | 17 | 18 | fn input_str(&mut self, input: &str) { 19 | self.input(input.as_bytes()); 20 | } 21 | 22 | #[cfg(feature = "std")] 23 | fn result_str(&mut self) -> String { 24 | use serialize::hex::ToHex; 25 | 26 | let mut buf: Vec = repeat(0).take((self.output_bits()+7)/8).collect(); 27 | self.result(&mut buf); 28 | buf[..].to_hex() 29 | } 30 | 31 | } 32 | -------------------------------------------------------------------------------- /crypto/src/symmetriccipher.rs: -------------------------------------------------------------------------------- 1 | // Licensed under the Apache License, Version 2.0 or the MIT license 3 | // , at your 4 | // option. This file may not be copied, modified, or distributed 5 | // except according to those terms. 6 | 7 | use buffer::{BufferResult, RefReadBuffer, RefWriteBuffer}; 8 | use cryptoutil::symm_enc_or_dec; 9 | use rstd::boxed::Box; 10 | 11 | pub trait BlockEncryptor { 12 | fn block_size(&self) -> usize; 13 | fn encrypt_block(&self, input: &[u8], output: &mut [u8]); 14 | } 15 | 16 | pub trait BlockEncryptorX8 { 17 | fn block_size(&self) -> usize; 18 | fn encrypt_block_x8(&self, input: &[u8], output: &mut [u8]); 19 | } 20 | 21 | pub trait BlockDecryptor { 22 | fn block_size(&self) -> usize; 23 | fn decrypt_block(&self, input: &[u8], output: &mut [u8]); 24 | } 25 | 26 | pub trait BlockDecryptorX8 { 27 | fn block_size(&self) -> usize; 28 | fn decrypt_block_x8(&self, input: &[u8], output: &mut [u8]); 29 | } 30 | 31 | #[derive(Debug, Clone, Copy)] 32 | pub enum SymmetricCipherError { 33 | InvalidLength, 34 | InvalidPadding 35 | } 36 | 37 | pub trait Encryptor { 38 | fn encrypt(&mut self, input: &mut RefReadBuffer, output: &mut RefWriteBuffer, eof: bool) 39 | -> Result; 40 | } 41 | 42 | pub trait Decryptor { 43 | fn decrypt(&mut self, input: &mut RefReadBuffer, output: &mut RefWriteBuffer, eof: bool) 44 | -> Result; 45 | } 46 | 47 | pub trait SynchronousStreamCipher { 48 | fn process(&mut self, input: &[u8], output: &mut [u8]); 49 | } 50 | 51 | // TODO - Its a bit unclear to me why this is necessary 52 | impl SynchronousStreamCipher for Box { 53 | fn process(&mut self, input: &[u8], output: &mut [u8]) { 54 | let me = &mut **self; 55 | me.process(input, output); 56 | } 57 | } 58 | 59 | impl Encryptor for Box { 60 | fn encrypt(&mut self, input: &mut RefReadBuffer, output: &mut RefWriteBuffer, _: bool) 61 | -> Result { 62 | symm_enc_or_dec(self, input, output) 63 | } 64 | } 65 | 66 | impl Decryptor for Box { 67 | fn decrypt(&mut self, input: &mut RefReadBuffer, output: &mut RefWriteBuffer, _: bool) 68 | -> Result { 69 | symm_enc_or_dec(self, input, output) 70 | } 71 | } 72 | -------------------------------------------------------------------------------- /db/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "db" 3 | version = "0.1.0" 4 | authors = ["Parity Technologies "] 5 | 6 | [dependencies] 7 | parity-rocksdb = { git = "https://github.com/paritytech/rust-rocksdb" } 8 | elastic-array = "0.6" 9 | parking_lot = "0.4" 10 | log = "0.4" 11 | bit-vec = "0.4" 12 | lru-cache = "0.1" 13 | primitives = { path = "../primitives" } 14 | serialization = { path = "../serialization" } 15 | chain = { path = "../chain" } 16 | storage = { path = "../storage" } 17 | 18 | [dev-dependencies] 19 | tempdir = "0.3" 20 | test-data = { path = "../test-data" } 21 | -------------------------------------------------------------------------------- /db/src/kv/cachedb.rs: -------------------------------------------------------------------------------- 1 | use lru_cache::LruCache; 2 | use parking_lot::Mutex; 3 | use hash::H256; 4 | use chain::BlockHeader; 5 | use kv::{KeyValueDatabase, KeyState, Operation, KeyValue, Key, Value, Transaction}; 6 | 7 | pub struct CacheDatabase where T: KeyValueDatabase { 8 | db: T, 9 | header: Mutex>>, 10 | } 11 | 12 | impl CacheDatabase where T: KeyValueDatabase { 13 | pub fn new(db: T) -> Self { 14 | CacheDatabase { 15 | db: db, 16 | // 144 (blocks per day) * 14 (days) + 100 (arbitrary number) 17 | header: Mutex::new(LruCache::new(2116)), 18 | } 19 | } 20 | } 21 | 22 | impl KeyValueDatabase for CacheDatabase where T: KeyValueDatabase { 23 | fn write(&self, tx: Transaction) -> Result<(), String> { 24 | for op in &tx.operations { 25 | match *op { 26 | Operation::Insert(KeyValue::BlockHeader(ref hash, ref header)) => { 27 | self.header.lock().insert(hash.clone(), KeyState::Insert(header.clone())); 28 | }, 29 | Operation::Delete(Key::BlockHeader(ref hash)) => { 30 | self.header.lock().insert(hash.clone(), KeyState::Delete); 31 | }, 32 | _ => (), 33 | } 34 | } 35 | self.db.write(tx) 36 | } 37 | 38 | fn get(&self, key: &Key) -> Result, String> { 39 | if let Key::BlockHeader(ref hash) = *key { 40 | let mut header = self.header.lock(); 41 | if let Some(state) = header.get_mut(hash) { 42 | return Ok(state.clone().map(Value::BlockHeader)) 43 | } 44 | } 45 | self.db.get(key) 46 | } 47 | } 48 | -------------------------------------------------------------------------------- /db/src/kv/db.rs: -------------------------------------------------------------------------------- 1 | use kv::{Transaction, KeyState, Key, Value}; 2 | 3 | pub trait KeyValueDatabase: Send + Sync { 4 | fn write(&self, tx: Transaction) -> Result<(), String>; 5 | 6 | fn get(&self, key: &Key) -> Result, String>; 7 | } 8 | -------------------------------------------------------------------------------- /db/src/kv/mod.rs: -------------------------------------------------------------------------------- 1 | mod cachedb; 2 | mod db; 3 | mod diskdb; 4 | mod memorydb; 5 | mod overlaydb; 6 | mod transaction; 7 | 8 | pub use self::cachedb::CacheDatabase; 9 | pub use self::db::KeyValueDatabase; 10 | pub use self::diskdb::{Database as DiskDatabase, DatabaseConfig, CompactionProfile}; 11 | pub use self::memorydb::{MemoryDatabase, SharedMemoryDatabase}; 12 | pub use self::overlaydb::{OverlayDatabase, AutoFlushingOverlayDatabase}; 13 | pub use self::transaction::{ 14 | RawTransaction, Transaction, RawOperation, Operation, Location, KeyState, 15 | Key, Value, KeyValue, RawKeyValue, RawKey, 16 | COL_COUNT, COL_META, COL_BLOCK_HASHES, COL_BLOCK_HEADERS, COL_BLOCK_TRANSACTIONS, 17 | COL_TRANSACTIONS, COL_TRANSACTIONS_META, COL_BLOCK_NUMBERS 18 | }; 19 | -------------------------------------------------------------------------------- /db/src/kv/overlaydb.rs: -------------------------------------------------------------------------------- 1 | use parking_lot::Mutex; 2 | use kv::{Transaction, Value, KeyValueDatabase, MemoryDatabase, KeyState, Key}; 3 | 4 | pub struct OverlayDatabase<'a, T> where T: 'a + KeyValueDatabase { 5 | db: &'a T, 6 | overlay: MemoryDatabase, 7 | } 8 | 9 | impl<'a, T> OverlayDatabase<'a, T> where T: 'a + KeyValueDatabase { 10 | pub fn new(db: &'a T) -> Self { 11 | OverlayDatabase { 12 | db: db, 13 | overlay: MemoryDatabase::default(), 14 | } 15 | } 16 | 17 | pub fn flush(&self) -> Result<(), String> { 18 | self.db.write(self.overlay.drain_transaction()) 19 | } 20 | } 21 | 22 | impl<'a, T> KeyValueDatabase for OverlayDatabase<'a, T> where T: 'a + KeyValueDatabase { 23 | fn write(&self, tx: Transaction) -> Result<(), String> { 24 | self.overlay.write(tx) 25 | } 26 | 27 | fn get(&self, key: &Key) -> Result, String> { 28 | match self.overlay.get(key)? { 29 | KeyState::Unknown => self.db.get(key), 30 | exists => Ok(exists) 31 | } 32 | } 33 | } 34 | 35 | pub struct AutoFlushingOverlayDatabase where T: KeyValueDatabase { 36 | db: T, 37 | overlay: MemoryDatabase, 38 | operations: Mutex, 39 | max_operations: usize, 40 | } 41 | 42 | impl AutoFlushingOverlayDatabase where T: KeyValueDatabase { 43 | pub fn new(db: T, max_operations: usize) -> Self { 44 | AutoFlushingOverlayDatabase { 45 | db: db, 46 | overlay: MemoryDatabase::default(), 47 | operations: Mutex::default(), 48 | max_operations: max_operations, 49 | } 50 | } 51 | 52 | fn flush(&self) -> Result<(), String> { 53 | self.db.write(self.overlay.drain_transaction()) 54 | } 55 | } 56 | 57 | impl KeyValueDatabase for AutoFlushingOverlayDatabase where T: KeyValueDatabase { 58 | fn write(&self, tx: Transaction) -> Result<(), String> { 59 | let mut operations = self.operations.lock(); 60 | *operations += 1; 61 | self.overlay.write(tx)?; 62 | if *operations == self.max_operations { 63 | self.flush()?; 64 | *operations = 0; 65 | } 66 | Ok(()) 67 | } 68 | 69 | fn get(&self, key: &Key) -> Result, String> { 70 | match self.overlay.get(key)? { 71 | KeyState::Unknown => self.db.get(key), 72 | exists => Ok(exists) 73 | } 74 | } 75 | } 76 | 77 | impl Drop for AutoFlushingOverlayDatabase where T: KeyValueDatabase { 78 | fn drop(&mut self) { 79 | self.flush().expect("Failed to save database"); 80 | } 81 | } 82 | -------------------------------------------------------------------------------- /db/src/lib.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2018 Chainpool 2 | 3 | extern crate parity_rocksdb as rocksdb; 4 | extern crate elastic_array; 5 | extern crate parking_lot; 6 | #[macro_use] 7 | extern crate log; 8 | extern crate bit_vec; 9 | extern crate lru_cache; 10 | 11 | extern crate primitives; 12 | extern crate serialization as ser; 13 | extern crate chain; 14 | extern crate storage; 15 | 16 | pub mod kv; 17 | mod block_chain_db; 18 | 19 | pub use block_chain_db::{BlockChainDatabase, ForkChainDatabase}; 20 | pub use primitives::{hash, bytes}; 21 | -------------------------------------------------------------------------------- /docker/hub/Dockerfile: -------------------------------------------------------------------------------- 1 | # This Dockerfile uses Docker Multi-Stage Builds 2 | # See https://docs.docker.com/engine/userguide/eng-image/multistage-build/ 3 | 4 | # Build image 5 | FROM rust:1.23-jessie AS build 6 | 7 | #ENV for build TAG 8 | ARG BUILD_TAG 9 | ENV BUILD_TAG=${BUILD_TAG:-master} \ 10 | PATH=/root/.cargo/bin:$PATH \ 11 | RUST_BACKTRACE=1 12 | 13 | WORKDIR /build 14 | RUN echo $BUILD_TAG 15 | 16 | # install tools and dependencies 17 | RUN apt-get update && \ 18 | apt-get install -y --force-yes --no-install-recommends \ 19 | g++ \ 20 | build-essential \ 21 | curl \ 22 | git \ 23 | file \ 24 | binutils \ 25 | ca-certificates \ 26 | libssl-dev \ 27 | pkg-config \ 28 | libudev-dev 29 | 30 | # build pbtc-ubuntu 31 | RUN git clone https://github.com/paritytech/parity-bitcoin 32 | 33 | WORKDIR /build/parity-bitcoin 34 | RUN git pull 35 | RUN git checkout $BUILD_TAG 36 | RUN cargo build --verbose --release 37 | RUN strip /build/parity-bitcoin/target/release/pbtc 38 | RUN file /build/parity-bitcoin/target/release/pbtc 39 | 40 | # Runtime image, copies pbtc artifact from build image 41 | FROM ubuntu:16.04 AS run 42 | LABEL maintainer "Parity Technologies " 43 | 44 | WORKDIR /pbtc-ubuntu 45 | COPY --from=build /build/parity-bitcoin/target/release/pbtc/ /pbtc-ubuntu/pbtc-ubuntu 46 | 47 | # setup ENTRYPOINT 48 | EXPOSE 8333 18333 8332 18332 49 | ENTRYPOINT ["/pbtc-ubuntu/pbtc-ubuntu"] 50 | -------------------------------------------------------------------------------- /docker/ubuntu/Dockerfile: -------------------------------------------------------------------------------- 1 | # This Dockerfile uses Docker Multi-Stage Builds 2 | # See https://docs.docker.com/engine/userguide/eng-image/multistage-build/ 3 | 4 | ### Base Image 5 | # Setup up a base image to use in Build and Runtime images 6 | FROM rust:1.23-jessie AS build 7 | 8 | # rustup directory 9 | ENV PATH=/root/.cargo/bin:$PATH \ 10 | RUST_BACKTRACE=1 11 | 12 | WORKDIR /build/parity-bitcoin 13 | COPY . /build/parity-bitcoin 14 | 15 | # install tools and dependencies 16 | RUN apt-get update && \ 17 | apt-get install -y --force-yes --no-install-recommends \ 18 | g++ \ 19 | build-essential \ 20 | curl \ 21 | git \ 22 | file \ 23 | binutils \ 24 | ca-certificates \ 25 | libssl-dev \ 26 | pkg-config \ 27 | libudev-dev 28 | 29 | # show tools 30 | RUN rustc -vV 31 | RUN cargo -V 32 | RUN gcc -v 33 | RUN g++ -v 34 | 35 | # build pbtc 36 | RUN cargo build --release --verbose 37 | RUN strip /build/parity-bitcoin/target/release/pbtc 38 | RUN file /build/parity-bitcoin/target/release/pbtc 39 | 40 | # Runtime image, copies pbtc artifact from build image 41 | FROM ubuntu:16.04 AS run 42 | LABEL maintainer "Parity Technologies " 43 | 44 | WORKDIR /pbtc-ubuntu 45 | COPY --from=build /build/parity-bitcoin/target/release/pbtc /pbtc-ubuntu/ 46 | 47 | EXPOSE 8333 18333 8332 18332 48 | ENTRYPOINT ["/pbtc-ubuntu/pbtc"] 49 | -------------------------------------------------------------------------------- /import/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "import" 3 | version = "0.1.0" 4 | authors = ["debris "] 5 | 6 | [dependencies] 7 | log = "0.4" 8 | primitives = { path = "../primitives" } 9 | chain = { path = "../chain" } 10 | serialization = { path = "../serialization" } 11 | -------------------------------------------------------------------------------- /import/src/blk.rs: -------------------------------------------------------------------------------- 1 | use std::{io, fs, path}; 2 | use std::collections::BTreeSet; 3 | use ser::{ReadIterator, deserialize_iterator, Error as ReaderError}; 4 | use block::Block; 5 | use fs::read_blk_dir; 6 | 7 | pub fn open_blk_file

(path: P) -> Result where P: AsRef { 8 | trace!("Opening blk file: {:?}", path.as_ref()); 9 | let file = try!(fs::File::open(path)); 10 | let blk_file = BlkFile { 11 | reader: deserialize_iterator(file), 12 | }; 13 | Ok(blk_file) 14 | } 15 | 16 | pub struct BlkFile { 17 | reader: ReadIterator, 18 | } 19 | 20 | impl Iterator for BlkFile { 21 | type Item = Result; 22 | 23 | fn next(&mut self) -> Option { 24 | self.reader.next() 25 | } 26 | } 27 | 28 | /// Creates iterator over bitcoind database blocks 29 | pub fn open_blk_dir

(path: P) -> Result where P: AsRef { 30 | let files = read_blk_dir(path)?.collect::, _>>()?; 31 | 32 | let iter = files.into_iter() 33 | // flatten results... 34 | .flat_map(|file| open_blk_file(file.path)) 35 | // flat iterators over each block in each file 36 | .flat_map(|file| file); 37 | 38 | let blk_dir = BlkDir { 39 | iter: Box::new(iter), 40 | }; 41 | 42 | Ok(blk_dir) 43 | } 44 | 45 | /// Bitcoind database blocks iterator 46 | pub struct BlkDir { 47 | iter: Box>>, 48 | } 49 | 50 | impl Iterator for BlkDir { 51 | type Item = Result; 52 | 53 | fn next(&mut self) -> Option { 54 | self.iter.next() 55 | } 56 | } 57 | 58 | -------------------------------------------------------------------------------- /import/src/block.rs: -------------------------------------------------------------------------------- 1 | use primitives::io; 2 | use hash::H32; 3 | use ser::{Deserializable, Reader, Error as ReaderError}; 4 | use chain::IndexedBlock; 5 | 6 | #[derive(Debug, PartialEq)] 7 | pub struct Block { 8 | pub magic: H32, 9 | pub block_size: u32, 10 | pub block: IndexedBlock, 11 | } 12 | 13 | impl Deserializable for Block { 14 | fn deserialize(reader: &mut Reader) -> Result where T: io::Read { 15 | let block = Block { 16 | magic: try!(reader.read()), 17 | block_size: try!(reader.read()), 18 | block: try!(reader.read()), 19 | }; 20 | 21 | Ok(block) 22 | } 23 | } 24 | -------------------------------------------------------------------------------- /import/src/lib.rs: -------------------------------------------------------------------------------- 1 | //! Bitcoind blockchain database importer 2 | 3 | #[macro_use] 4 | extern crate log; 5 | extern crate primitives; 6 | extern crate serialization as ser; 7 | extern crate chain; 8 | 9 | mod blk; 10 | mod block; 11 | mod fs; 12 | 13 | pub use primitives::{hash, bytes}; 14 | 15 | pub use blk::{open_blk_dir, BlkDir}; 16 | -------------------------------------------------------------------------------- /key/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "key" 3 | version = "0.1.0" 4 | authors = ["gguoss "] 5 | 6 | [dependencies] 7 | keys = { path = "../keys" } 8 | -------------------------------------------------------------------------------- /key/src/main.rs: -------------------------------------------------------------------------------- 1 | extern crate keys; 2 | 3 | use keys::generator::Generator; 4 | use keys::{Private, KeyPair}; 5 | 6 | fn main() { 7 | let random = keys::generator::Random::new(keys::Network::Testnet); 8 | let key = random.generate().unwrap(); 9 | println!("{:?}", key); 10 | let private = key.private(); 11 | let compressed_private = Private{ 12 | network: private.network.clone(), 13 | secret: private.secret.clone(), 14 | compressed: true, 15 | }; 16 | let compressed = KeyPair::from_private(compressed_private).unwrap(); 17 | println!("compressed public key: {}",compressed.public()); 18 | println!("address:{:?}", key.address().to_string()); 19 | } 20 | -------------------------------------------------------------------------------- /keys/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "keys" 3 | version = "0.1.0" 4 | authors = ["debris "] 5 | 6 | [dependencies] 7 | sr-std = { git = "https://github.com/chainx-org/sr-std", default-features = false } 8 | rand = { version = "0.4", optional = true } 9 | rustc-hex = { version = "2", optional = true } 10 | base58 = { version = "0.1", optional = true } 11 | libsecp256k1 = "0.2.2" 12 | bitcrypto = { path = "../crypto", default-features = false } 13 | primitives = { path = "../primitives", default-features = false } 14 | parity-codec = { version = "3.0", default-features = false } 15 | parity-codec-derive = { version = "3.0", default-features = false } 16 | serde_bytes = { git = "https://github.com/serde-rs/bytes", optional = true } 17 | serialization = { path = "../serialization", default-features = false } 18 | serde = { version = "1.0", optional = true } 19 | serde_derive = { version = "1.0", optional = true } 20 | 21 | [features] 22 | default = ["std"] 23 | std = [ 24 | "sr-std/std", 25 | "rand", 26 | "rustc-hex", 27 | "base58", 28 | "bitcrypto/std", 29 | "parity-codec/std", 30 | "parity-codec-derive/std", 31 | "primitives/std", 32 | "serialization/std", 33 | "serde_bytes", 34 | "serde_derive", 35 | "serde" 36 | ] 37 | -------------------------------------------------------------------------------- /keys/src/display.rs: -------------------------------------------------------------------------------- 1 | use rstd::ops::Deref; 2 | use Error; 3 | 4 | pub trait DisplayLayout { 5 | type Target: Deref; 6 | 7 | fn layout(&self) -> Self::Target; 8 | 9 | fn from_layout(data: &[u8]) -> Result where Self: Sized; 10 | } 11 | -------------------------------------------------------------------------------- /keys/src/error.rs: -------------------------------------------------------------------------------- 1 | #[cfg(feature = "std")] 2 | use std::fmt; 3 | use secp256k1::Error as SecpError; 4 | 5 | #[cfg_attr(feature = "std", derive(Debug))] 6 | #[derive(PartialEq)] 7 | pub enum Error { 8 | InvalidPublic, 9 | InvalidSecret, 10 | InvalidMessage, 11 | InvalidSignature, 12 | InvalidNetwork, 13 | InvalidChecksum, 14 | InvalidPrivate, 15 | InvalidAddress, 16 | FailedKeyGeneration, 17 | } 18 | 19 | #[cfg(feature = "std")] 20 | impl fmt::Display for Error { 21 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 22 | let msg = match *self { 23 | Error::InvalidPublic => "Invalid Public", 24 | Error::InvalidSecret => "Invalid Secret", 25 | Error::InvalidMessage => "Invalid Message", 26 | Error::InvalidSignature => "Invalid Signature", 27 | Error::InvalidNetwork => "Invalid Network", 28 | Error::InvalidChecksum => "Invalid Checksum", 29 | Error::InvalidPrivate => "Invalid Private", 30 | Error::InvalidAddress => "Invalid Address", 31 | Error::FailedKeyGeneration => "Key generation failed", 32 | }; 33 | 34 | msg.fmt(f) 35 | } 36 | } 37 | 38 | impl From for Error { 39 | fn from(e: SecpError) -> Self { 40 | match e { 41 | SecpError::InvalidPublicKey => Error::InvalidPublic, 42 | SecpError::InvalidSecretKey => Error::InvalidSecret, 43 | SecpError::InvalidMessage => Error::InvalidMessage, 44 | _ => Error::InvalidSignature, 45 | } 46 | } 47 | } 48 | -------------------------------------------------------------------------------- /keys/src/generator.rs: -------------------------------------------------------------------------------- 1 | #[cfg(feature = "std")] 2 | use rand::os::OsRng; 3 | use network::Network; 4 | use secp256k1::{PublicKey, SecretKey}; 5 | use {KeyPair, Error}; 6 | 7 | #[cfg(feature = "std")] 8 | pub trait Generator { 9 | fn generate(&self) -> Result; 10 | } 11 | 12 | pub struct Random { 13 | network: Network 14 | } 15 | 16 | impl Random { 17 | pub fn new(network: Network) -> Self { 18 | Random { 19 | network: network, 20 | } 21 | } 22 | } 23 | 24 | #[cfg(feature = "std")] 25 | impl Generator for Random { 26 | fn generate(&self) -> Result { 27 | let mut rng = OsRng::new().map_err(|_| Error::FailedKeyGeneration)?; 28 | let secret = SecretKey::random(&mut rng); 29 | let public = PublicKey::from_secret_key(&secret); 30 | Ok(KeyPair::from_keypair(secret, public, self.network)) 31 | } 32 | } 33 | -------------------------------------------------------------------------------- /keys/src/lib.rs: -------------------------------------------------------------------------------- 1 | //! Bitcoin keys. 2 | 3 | #![cfg_attr(not(feature = "std"), no_std)] 4 | #[cfg(feature = "std")] 5 | extern crate rand; 6 | #[cfg(feature = "std")] 7 | extern crate rustc_hex as hex; 8 | #[cfg(feature = "std")] 9 | extern crate base58; 10 | extern crate secp256k1; 11 | extern crate bitcrypto as crypto; 12 | extern crate primitives; 13 | extern crate sr_std as rstd; 14 | #[macro_use] 15 | extern crate parity_codec_derive; 16 | extern crate parity_codec as codec; 17 | extern crate serialization as ser; 18 | #[cfg(feature = "std")] 19 | extern crate serde_bytes; 20 | #[cfg(feature = "std")] 21 | #[macro_use] 22 | extern crate serde_derive; 23 | 24 | pub mod generator; 25 | mod address; 26 | mod display; 27 | mod keypair; 28 | mod error; 29 | mod network; 30 | mod private; 31 | mod public; 32 | mod signature; 33 | 34 | pub use primitives::{hash, bytes}; 35 | 36 | pub use address::{Type, Address}; 37 | pub use display::DisplayLayout; 38 | pub use keypair::KeyPair; 39 | pub use error::Error; 40 | pub use private::Private; 41 | pub use public::Public; 42 | pub use signature::{Signature, CompactSignature}; 43 | pub use network::Network; 44 | 45 | use hash::{H160, H256}; 46 | 47 | /// 20 bytes long hash derived from public `ripemd160(sha256(public))` 48 | pub type AddressHash = H160; 49 | /// 32 bytes long secret key 50 | pub type Secret = H256; 51 | /// 32 bytes long signable message 52 | pub type Message = H256; 53 | 54 | -------------------------------------------------------------------------------- /keys/src/network.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2018 Chainpool 2 | use ser::{serialize, deserialize, Serializable, Stream, Reader, Deserializable}; 3 | use primitives::io; 4 | 5 | #[cfg_attr(feature = "std", derive(Debug))] 6 | #[derive(PartialEq, Clone, Copy, Encode, Decode)] 7 | pub enum Network { 8 | Mainnet = 0, 9 | Testnet = 1, 10 | } 11 | 12 | impl Default for Network { 13 | fn default() -> Network { 14 | Network::Mainnet 15 | } 16 | } 17 | 18 | impl Network { 19 | pub fn from_u32(v: u32) -> Option { 20 | match v { 21 | 0 => Some(Network::Mainnet), 22 | 1 => Some(Network::Testnet), 23 | _ => None 24 | } 25 | } 26 | } 27 | 28 | impl Serializable for Network { 29 | fn serialize(&self, stream: &mut Stream) { 30 | match *self{ 31 | Network::Mainnet => stream.append(&Network::Mainnet), 32 | Network::Testnet => stream.append(&Network::Testnet), 33 | }; 34 | } 35 | } 36 | 37 | impl Deserializable for Network { 38 | fn deserialize(reader: &mut Reader) -> Result where T: io::Read { 39 | let t: u32 = try!(reader.read()); 40 | Network::from_u32(t).ok_or(io::ErrorKind::MalformedData) 41 | } 42 | } 43 | -------------------------------------------------------------------------------- /logs/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "logs" 3 | version = "0.1.0" 4 | authors = ["debris "] 5 | 6 | [dependencies] 7 | ansi_term = "0.9" 8 | log = "0.4" 9 | env_logger = "0.5" 10 | time = "0.1" 11 | -------------------------------------------------------------------------------- /logs/src/lib.rs: -------------------------------------------------------------------------------- 1 | extern crate ansi_term; 2 | extern crate log; 3 | extern crate env_logger; 4 | extern crate time; 5 | 6 | use std::env; 7 | use ansi_term::Colour as Color; 8 | use log::{Record, Level}; 9 | use env_logger::Builder; 10 | use std::io::Write; 11 | 12 | fn strftime() -> String { 13 | time::strftime("%Y-%m-%d %H:%M:%S %Z", &time::now()).expect("Time is incorrectly formatted") 14 | } 15 | 16 | pub trait LogFormatter: Send + Sync + 'static { 17 | fn format(&self, log_record: &Record) -> String; 18 | } 19 | 20 | pub struct DateLogFormatter; 21 | 22 | impl LogFormatter for DateLogFormatter { 23 | fn format(&self, record: &Record) -> String { 24 | let timestamp = strftime(); 25 | format!("{} {} {} {}", timestamp, record.level(), record.target(), record.args()) 26 | } 27 | } 28 | 29 | pub struct DateAndColorLogFormatter; 30 | 31 | impl LogFormatter for DateAndColorLogFormatter { 32 | fn format(&self, record: &Record) -> String { 33 | let timestamp = strftime(); 34 | let log_level = match record.level() { 35 | Level::Error => Color::Fixed(9).bold().paint(record.level().to_string()), 36 | Level::Warn => Color::Fixed(11).bold().paint(record.level().to_string()), 37 | Level::Info => Color::Fixed(10).paint(record.level().to_string()), 38 | Level::Debug => Color::Fixed(14).paint(record.level().to_string()), 39 | Level::Trace => Color::Fixed(12).paint(record.level().to_string()), 40 | }; 41 | format!("{} {} {} {}" 42 | , Color::Fixed(8).bold().paint(timestamp) 43 | , log_level 44 | , Color::Fixed(8).paint(record.target()) 45 | , record.args()) 46 | } 47 | } 48 | 49 | pub fn init(filters: &str, formatter: T) where T: LogFormatter { 50 | let mut builder = Builder::new(); 51 | 52 | let filters = match env::var("RUST_LOG") { 53 | Ok(env_filters) => format!("{},{}", filters, env_filters), 54 | Err(_) => filters.into(), 55 | }; 56 | 57 | builder.parse(&filters); 58 | builder.format(move |buf, record| { 59 | writeln!(buf, "{}", formatter.format(record)) 60 | }); 61 | 62 | builder.init(); 63 | } 64 | -------------------------------------------------------------------------------- /message/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "message" 3 | version = "0.1.0" 4 | authors = ["debris "] 5 | 6 | [dependencies] 7 | byteorder = "1.0" 8 | 9 | bitcrypto = { path = "../crypto" } 10 | chain = { path = "../chain" } 11 | primitives = { path = "../primitives" } 12 | serialization = { path = "../serialization" } 13 | serialization_derive = { path = "../serialization_derive" } 14 | network = { path = "../network" } 15 | -------------------------------------------------------------------------------- /message/src/common/address.rs: -------------------------------------------------------------------------------- 1 | use bytes::Bytes; 2 | use ser::deserialize; 3 | use common::{Port, IpAddress, Services}; 4 | 5 | #[derive(Debug, Default, PartialEq, Clone, Serializable, Deserializable)] 6 | pub struct NetAddress { 7 | pub services: Services, 8 | pub address: IpAddress, 9 | pub port: Port, 10 | } 11 | 12 | impl From<&'static str> for NetAddress { 13 | fn from(s: &'static str) -> Self { 14 | let bytes: Bytes = s.into(); 15 | deserialize(bytes.as_ref()).unwrap() 16 | } 17 | } 18 | 19 | #[cfg(test)] 20 | mod tests { 21 | use ser::{serialize, deserialize}; 22 | use common::Services; 23 | use super::NetAddress; 24 | 25 | #[test] 26 | fn test_net_address_serialize() { 27 | let expected = vec![ 28 | 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 29 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x0a, 0x00, 0x00, 0x01, 30 | 0x20, 0x8d 31 | ].into(); 32 | 33 | let address = NetAddress { 34 | services: Services::default().with_network(true), 35 | address: "::ffff:a00:1".into(), 36 | port: 8333.into(), 37 | }; 38 | 39 | assert_eq!(serialize(&address), expected); 40 | } 41 | 42 | #[test] 43 | fn test_net_address_deserialize() { 44 | let bytes = vec![ 45 | 0x01u8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 46 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x0a, 0x00, 0x00, 0x01, 47 | 0x20, 0x8d 48 | ]; 49 | 50 | let expected = NetAddress { 51 | services: Services::default().with_network(true), 52 | address: "::ffff:a00:1".into(), 53 | port: 8333.into(), 54 | }; 55 | 56 | assert_eq!(expected, deserialize(&bytes as &[u8]).unwrap()); 57 | } 58 | 59 | #[test] 60 | fn test_net_address_from_static_str() { 61 | let expected = NetAddress { 62 | services: Services::default().with_network(true), 63 | address: "::ffff:a00:1".into(), 64 | port: 8333.into(), 65 | 66 | }; 67 | let s = "010000000000000000000000000000000000ffff0a000001208d"; 68 | assert_eq!(expected, s.into()); 69 | } 70 | } 71 | -------------------------------------------------------------------------------- /message/src/common/block_header_and_ids.rs: -------------------------------------------------------------------------------- 1 | use chain::{BlockHeader, ShortTransactionID}; 2 | use common::PrefilledTransaction; 3 | 4 | #[derive(Debug, PartialEq, Serializable, Deserializable)] 5 | pub struct BlockHeaderAndIDs { 6 | pub header: BlockHeader, 7 | pub nonce: u64, 8 | pub short_ids: Vec, 9 | pub prefilled_transactions: Vec, 10 | } 11 | -------------------------------------------------------------------------------- /message/src/common/block_transactions.rs: -------------------------------------------------------------------------------- 1 | use hash::H256; 2 | use chain::Transaction; 3 | 4 | #[derive(Debug, PartialEq, Serializable, Deserializable)] 5 | pub struct BlockTransactions { 6 | pub blockhash: H256, 7 | pub transactions: Vec, 8 | } 9 | -------------------------------------------------------------------------------- /message/src/common/block_transactions_request.rs: -------------------------------------------------------------------------------- 1 | use primitives::io; 2 | use hash::H256; 3 | use ser::{ 4 | Serializable, Stream, CompactInteger, 5 | Deserializable, Reader, Error as ReaderError, 6 | }; 7 | 8 | #[derive(Debug, PartialEq)] 9 | pub struct BlockTransactionsRequest { 10 | pub blockhash: H256, 11 | pub indexes: Vec, 12 | } 13 | 14 | impl Serializable for BlockTransactionsRequest { 15 | fn serialize(&self, stream: &mut Stream) { 16 | let indexes: Vec = self.indexes 17 | .iter() 18 | .map(|x| (*x).into()) 19 | .collect(); 20 | 21 | stream 22 | .append(&self.blockhash) 23 | .append_list(&indexes); 24 | } 25 | } 26 | 27 | impl Deserializable for BlockTransactionsRequest { 28 | fn deserialize(reader: &mut Reader) -> Result where T: io::Read { 29 | let blockhash = try!(reader.read()); 30 | let indexes: Vec = try!(reader.read_list()); 31 | 32 | let request = BlockTransactionsRequest { 33 | blockhash: blockhash, 34 | indexes: indexes.into_iter().map(Into::into).collect(), 35 | }; 36 | 37 | Ok(request) 38 | } 39 | } 40 | -------------------------------------------------------------------------------- /message/src/common/mod.rs: -------------------------------------------------------------------------------- 1 | mod address; 2 | mod block_header_and_ids; 3 | mod block_transactions; 4 | mod block_transactions_request; 5 | mod command; 6 | mod inventory; 7 | mod ip; 8 | mod port; 9 | mod prefilled_transaction; 10 | mod service; 11 | 12 | pub use self::address::NetAddress; 13 | pub use self::block_header_and_ids::BlockHeaderAndIDs; 14 | pub use self::block_transactions::BlockTransactions; 15 | pub use self::block_transactions_request::BlockTransactionsRequest; 16 | pub use self::command::Command; 17 | pub use self::inventory::{InventoryVector, InventoryType}; 18 | pub use self::ip::IpAddress; 19 | pub use self::port::Port; 20 | pub use self::prefilled_transaction::PrefilledTransaction; 21 | pub use self::service::Services; 22 | -------------------------------------------------------------------------------- /message/src/common/port.rs: -------------------------------------------------------------------------------- 1 | 2 | use primitives::io; 3 | use primitives::io::{Read, Write}; 4 | use byteorder::{BigEndian, ReadBytesExt, WriteBytesExt}; 5 | use ser::{Serializable, Stream, Deserializable, Reader, Error as ReaderError}; 6 | 7 | #[derive(Debug, Default, PartialEq, Clone, Copy)] 8 | pub struct Port(u16); 9 | 10 | impl From for Port { 11 | fn from(port: u16) -> Self { 12 | Port(port) 13 | } 14 | } 15 | 16 | impl From for u16 { 17 | fn from(port: Port) -> Self { 18 | port.0 19 | } 20 | } 21 | 22 | impl Serializable for Port { 23 | fn serialize(&self, stream: &mut Stream) { 24 | stream.write_u16::(self.0).unwrap(); 25 | } 26 | } 27 | 28 | impl Deserializable for Port { 29 | fn deserialize(reader: &mut Reader) -> Result where T: io::Read { 30 | Ok(try!(reader.read_u16::().map(Port))) 31 | } 32 | } 33 | 34 | #[cfg(test)] 35 | mod tests { 36 | use ser::{serialize, deserialize}; 37 | use super::Port; 38 | 39 | #[test] 40 | fn test_port_serialize() { 41 | assert_eq!(serialize(&Port::from(1)), "0001".into()); 42 | assert_eq!(serialize(&Port::from(0x1234)), "1234".into()); 43 | } 44 | 45 | #[test] 46 | fn test_port_deserialize() { 47 | assert_eq!(Port::from(1), deserialize(&[0x00u8, 0x01] as &[u8]).unwrap()); 48 | assert_eq!(Port::from(0x1234), deserialize(&[0x12u8, 0x34] as &[u8]).unwrap()); 49 | } 50 | } 51 | -------------------------------------------------------------------------------- /message/src/common/prefilled_transaction.rs: -------------------------------------------------------------------------------- 1 | use primitives::io; 2 | use ser::{ 3 | Serializable, Stream, CompactInteger, 4 | Deserializable, Reader, Error as ReaderError 5 | }; 6 | use chain::Transaction; 7 | 8 | #[derive(Debug, PartialEq)] 9 | pub struct PrefilledTransaction { 10 | pub index: usize, 11 | pub transaction: Transaction, 12 | } 13 | 14 | impl Serializable for PrefilledTransaction { 15 | fn serialize(&self, stream: &mut Stream) { 16 | stream 17 | .append(&CompactInteger::from(self.index)) 18 | .append(&self.transaction); 19 | } 20 | } 21 | 22 | impl Deserializable for PrefilledTransaction { 23 | fn deserialize(reader: &mut Reader) -> Result where T: io::Read { 24 | let compact: CompactInteger = try!(reader.read()); 25 | let tx = PrefilledTransaction { 26 | index: compact.into(), 27 | transaction: try!(reader.read()), 28 | }; 29 | 30 | Ok(tx) 31 | } 32 | } 33 | -------------------------------------------------------------------------------- /message/src/common/service.rs: -------------------------------------------------------------------------------- 1 | #[derive(Debug, Default, PartialEq, Eq, Clone, Copy, Serializable, Deserializable)] 2 | pub struct Services(u64); 3 | 4 | impl From for u64 { 5 | fn from(s: Services) -> Self { 6 | s.0 7 | } 8 | } 9 | 10 | impl From for Services { 11 | fn from(v: u64) -> Self { 12 | Services(v) 13 | } 14 | } 15 | 16 | impl Services { 17 | pub fn network(&self) -> bool { 18 | self.bit_at(0) 19 | } 20 | 21 | pub fn with_network(mut self, v: bool) -> Self { 22 | self.set_bit(0, v); 23 | self 24 | } 25 | 26 | pub fn getutxo(&self) -> bool { 27 | self.bit_at(1) 28 | } 29 | 30 | pub fn with_getutxo(mut self, v: bool) -> Self { 31 | self.set_bit(1, v); 32 | self 33 | } 34 | 35 | pub fn bloom(&self) -> bool { 36 | self.bit_at(2) 37 | } 38 | 39 | pub fn with_bloom(mut self, v: bool) -> Self { 40 | self.set_bit(2, v); 41 | self 42 | } 43 | 44 | pub fn witness(&self) -> bool { 45 | self.bit_at(3) 46 | } 47 | 48 | pub fn with_witness(mut self, v: bool) -> Self { 49 | self.set_bit(3, v); 50 | self 51 | } 52 | 53 | pub fn xthin(&self) -> bool { 54 | self.bit_at(4) 55 | } 56 | 57 | pub fn with_xthin(mut self, v: bool) -> Self { 58 | self.set_bit(4, v); 59 | self 60 | } 61 | 62 | pub fn bitcoin_cash(&self) -> bool { 63 | self.bit_at(5) 64 | } 65 | 66 | pub fn with_bitcoin_cash(mut self, v: bool) -> Self { 67 | self.set_bit(5, v); 68 | self 69 | } 70 | 71 | pub fn includes(&self, other: &Self) -> bool { 72 | self.0 & other.0 == other.0 73 | } 74 | 75 | fn set_bit(&mut self, bit: usize, bit_value: bool) { 76 | if bit_value { 77 | self.0 |= 1 << bit 78 | } else { 79 | self.0 &= !(1 << bit) 80 | } 81 | } 82 | 83 | fn bit_at(&self, bit: usize) -> bool { 84 | self.0 & (1 << bit) != 0 85 | } 86 | } 87 | 88 | #[cfg(test)] 89 | mod test { 90 | use super::Services; 91 | 92 | #[test] 93 | fn test_serivces_includes() { 94 | let s1 = Services::default() 95 | .with_witness(true) 96 | .with_xthin(true); 97 | let s2 = Services::default() 98 | .with_witness(true); 99 | 100 | assert!(s1.witness()); 101 | assert!(s1.xthin()); 102 | assert!(s2.witness()); 103 | assert!(!s2.xthin()); 104 | assert!(s1.includes(&s2)); 105 | assert!(!s2.includes(&s1)); 106 | } 107 | } 108 | -------------------------------------------------------------------------------- /message/src/error.rs: -------------------------------------------------------------------------------- 1 | use std::{fmt, error}; 2 | use ser::Error as ReaderError; 3 | 4 | pub type MessageResult = Result; 5 | 6 | #[derive(Debug, PartialEq, Clone)] 7 | pub enum Error { 8 | /// Deserialization failed. 9 | Deserialize, 10 | /// Command has wrong format or is unsupported. 11 | InvalidCommand, 12 | /// Network magic comes from different network. 13 | InvalidMagic, 14 | /// Invalid checksum. 15 | InvalidChecksum, 16 | /// Invalid version. 17 | InvalidVersion, 18 | } 19 | 20 | impl From for Error { 21 | fn from(_: ReaderError) -> Self { 22 | Error::Deserialize 23 | } 24 | } 25 | 26 | impl fmt::Display for Error { 27 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 28 | f.write_str(error::Error::description(self)) 29 | } 30 | } 31 | 32 | impl error::Error for Error { 33 | fn description(&self) -> &str { 34 | match *self { 35 | Error::Deserialize => "Message Deserialization Error", 36 | Error::InvalidCommand => "Invalid Message Command", 37 | Error::InvalidMagic => "Invalid Network Magic", 38 | Error::InvalidChecksum => "Invalid message chacksum", 39 | Error::InvalidVersion => "Unsupported protocol version", 40 | } 41 | } 42 | } 43 | -------------------------------------------------------------------------------- /message/src/lib.rs: -------------------------------------------------------------------------------- 1 | extern crate byteorder; 2 | extern crate bitcrypto as crypto; 3 | extern crate chain; 4 | extern crate primitives; 5 | extern crate serialization as ser; 6 | #[macro_use] 7 | extern crate serialization_derive; 8 | extern crate network; 9 | 10 | pub mod common; 11 | mod message; 12 | mod serialization; 13 | pub mod types; 14 | mod error; 15 | 16 | pub use primitives::{hash, bytes}; 17 | 18 | pub use common::{Command, Services}; 19 | pub use message::{Message, MessageHeader, Payload, to_raw_message}; 20 | pub use serialization::{serialize_payload, deserialize_payload}; 21 | pub use error::{MessageResult, Error}; 22 | -------------------------------------------------------------------------------- /message/src/message/message.rs: -------------------------------------------------------------------------------- 1 | use ser::Stream; 2 | use bytes::{TaggedBytes, Bytes}; 3 | use network::Magic; 4 | use common::Command; 5 | use serialization::serialize_payload_with_flags; 6 | use {Payload, MessageResult, MessageHeader}; 7 | 8 | pub fn to_raw_message(magic: Magic, command: Command, payload: &Bytes) -> Bytes { 9 | let header = MessageHeader::for_data(magic, command, payload); 10 | let mut stream = Stream::default(); 11 | stream.append(&header); 12 | stream.append_slice(payload); 13 | stream.out() 14 | } 15 | 16 | pub struct Message { 17 | bytes: TaggedBytes, 18 | } 19 | 20 | impl Message where T: Payload { 21 | pub fn new(magic: Magic, version: u32, payload: &T) -> MessageResult { 22 | Self::with_flags(magic, version, payload, 0) 23 | } 24 | 25 | pub fn with_flags(magic: Magic, version: u32, payload: &T, serialization_flags: u32) -> MessageResult { 26 | let serialized = try!(serialize_payload_with_flags(payload, version, serialization_flags)); 27 | 28 | let message = Message { 29 | bytes: TaggedBytes::new(to_raw_message(magic, T::command().into(), &serialized)), 30 | }; 31 | 32 | Ok(message) 33 | } 34 | 35 | pub fn len(&self) -> usize { 36 | self.bytes.len() 37 | } 38 | } 39 | 40 | impl AsRef<[u8]> for Message { 41 | fn as_ref(&self) -> &[u8] { 42 | self.bytes.as_ref() 43 | } 44 | } 45 | 46 | impl From> for Bytes { 47 | fn from(m: Message) -> Self { 48 | m.bytes.into_raw() 49 | } 50 | } 51 | -------------------------------------------------------------------------------- /message/src/message/message_header.rs: -------------------------------------------------------------------------------- 1 | use hash::H32; 2 | use ser::{Serializable, Stream, Reader}; 3 | use crypto::checksum; 4 | use network::Magic; 5 | use common::Command; 6 | use Error; 7 | 8 | #[derive(Debug, PartialEq)] 9 | pub struct MessageHeader { 10 | pub magic: Magic, 11 | pub command: Command, 12 | pub len: u32, 13 | pub checksum: H32, 14 | } 15 | 16 | impl MessageHeader { 17 | pub fn for_data(magic: Magic, command: Command, data: &[u8]) -> Self { 18 | MessageHeader { 19 | magic: magic, 20 | command: command, 21 | len: data.len() as u32, 22 | checksum: checksum(data), 23 | } 24 | } 25 | } 26 | 27 | impl MessageHeader { 28 | pub fn deserialize(data: &[u8], expected: Magic) -> Result { 29 | if data.len() != 24 { 30 | return Err(Error::Deserialize); 31 | } 32 | 33 | let mut reader = Reader::new(data); 34 | let magic: u32 = try!(reader.read()); 35 | let magic = Magic::from(magic); 36 | if expected != magic { 37 | return Err(Error::InvalidMagic); 38 | } 39 | 40 | let header = MessageHeader { 41 | magic: magic, 42 | command: try!(reader.read()), 43 | len: try!(reader.read()), 44 | checksum: try!(reader.read()), 45 | }; 46 | 47 | Ok(header) 48 | } 49 | } 50 | 51 | impl Serializable for MessageHeader { 52 | fn serialize(&self, stream: &mut Stream) { 53 | stream 54 | .append(&self.magic) 55 | .append(&self.command) 56 | .append(&self.len) 57 | .append(&self.checksum); 58 | } 59 | } 60 | 61 | #[cfg(test)] 62 | mod tests { 63 | use bytes::Bytes; 64 | use ser::serialize; 65 | use network::{Network, ConsensusFork}; 66 | use super::MessageHeader; 67 | 68 | #[test] 69 | fn test_message_header_serialization() { 70 | let expected = "f9beb4d96164647200000000000000001f000000ed52399b".into(); 71 | let header = MessageHeader { 72 | magic: Network::Mainnet.magic(&ConsensusFork::BitcoinCore), 73 | command: "addr".into(), 74 | len: 0x1f, 75 | checksum: "ed52399b".into(), 76 | }; 77 | 78 | assert_eq!(serialize(&header), expected); 79 | } 80 | 81 | #[test] 82 | fn test_message_header_deserialization() { 83 | let raw: Bytes = "f9beb4d96164647200000000000000001f000000ed52399b".into(); 84 | let expected = MessageHeader { 85 | magic: Network::Mainnet.magic(&ConsensusFork::BitcoinCore), 86 | command: "addr".into(), 87 | len: 0x1f, 88 | checksum: "ed52399b".into(), 89 | }; 90 | 91 | assert_eq!(expected, MessageHeader::deserialize(&raw, Network::Mainnet.magic(&ConsensusFork::BitcoinCore)).unwrap()); 92 | } 93 | } 94 | -------------------------------------------------------------------------------- /message/src/message/mod.rs: -------------------------------------------------------------------------------- 1 | mod message; 2 | mod message_header; 3 | pub mod payload; 4 | 5 | pub use self::message::{Message, to_raw_message}; 6 | pub use self::message_header::MessageHeader; 7 | pub use self::payload::Payload; 8 | -------------------------------------------------------------------------------- /message/src/message/payload.rs: -------------------------------------------------------------------------------- 1 | use primitives::io; 2 | use ser::{Reader, Stream}; 3 | use MessageResult; 4 | 5 | pub trait Payload: Send + 'static { 6 | fn version() -> u32; 7 | fn command() -> &'static str; 8 | fn deserialize_payload(reader: &mut Reader, version: u32) -> MessageResult where Self: Sized, T: io::Read; 9 | fn serialize_payload(&self, stream: &mut Stream, version: u32) -> MessageResult<()>; 10 | } 11 | -------------------------------------------------------------------------------- /message/src/serialization/mod.rs: -------------------------------------------------------------------------------- 1 | mod stream; 2 | mod reader; 3 | 4 | pub use self::stream::{serialize_payload, serialize_payload_with_flags}; 5 | pub use self::reader::deserialize_payload; 6 | -------------------------------------------------------------------------------- /message/src/serialization/reader.rs: -------------------------------------------------------------------------------- 1 | 2 | use primitives::io; 3 | use ser::Reader; 4 | use Payload; 5 | use Error; 6 | 7 | pub fn deserialize_payload(buffer: &[u8], version: u32) -> Result where T: Payload { 8 | let mut reader = PayloadReader::new(buffer, version); 9 | let result = try!(reader.read()); 10 | if !reader.is_finished() { 11 | return Err(Error::Deserialize); 12 | } 13 | 14 | Ok(result) 15 | } 16 | 17 | pub struct PayloadReader { 18 | reader: Reader, 19 | version: u32, 20 | } 21 | 22 | impl<'a> PayloadReader<&'a [u8]> { 23 | pub fn new(buffer: &'a [u8], version: u32) -> Self { 24 | PayloadReader { 25 | reader: Reader::new(buffer), 26 | version: version, 27 | } 28 | } 29 | 30 | pub fn read(&mut self) -> Result where T: Payload { 31 | if T::version() > self.version { 32 | return Err(Error::InvalidVersion); 33 | } 34 | 35 | T::deserialize_payload(&mut self.reader, self.version) 36 | } 37 | 38 | pub fn is_finished(&mut self) -> bool { 39 | self.reader.is_finished() 40 | } 41 | } 42 | -------------------------------------------------------------------------------- /message/src/serialization/stream.rs: -------------------------------------------------------------------------------- 1 | use bytes::Bytes; 2 | use ser::Stream; 3 | use {Payload, Error, MessageResult}; 4 | 5 | pub fn serialize_payload(t: &T, version: u32) -> MessageResult where T: Payload { 6 | serialize_payload_with_flags(t, version, 0) 7 | } 8 | 9 | pub fn serialize_payload_with_flags(t: &T, version: u32, serialization_flags: u32) -> MessageResult where T: Payload { 10 | let mut stream = PayloadStream::new(version, serialization_flags); 11 | try!(stream.append(t)); 12 | Ok(stream.out()) 13 | } 14 | 15 | pub struct PayloadStream { 16 | stream: Stream, 17 | version: u32, 18 | } 19 | 20 | impl PayloadStream { 21 | pub fn new(version: u32, serialization_flags: u32) -> Self { 22 | PayloadStream { 23 | stream: Stream::with_flags(serialization_flags), 24 | version: version, 25 | } 26 | } 27 | 28 | pub fn append(&mut self, t: &T) -> MessageResult<()> where T: Payload { 29 | if T::version() > self.version { 30 | return Err(Error::InvalidVersion); 31 | } 32 | 33 | t.serialize_payload(&mut self.stream, self.version) 34 | } 35 | 36 | pub fn out(self) -> Bytes { 37 | self.stream.out() 38 | } 39 | } 40 | -------------------------------------------------------------------------------- /message/src/types/block.rs: -------------------------------------------------------------------------------- 1 | use primitives::io; 2 | use ser::{Stream, Reader}; 3 | use chain::Block as ChainBlock; 4 | use {Payload, MessageResult}; 5 | 6 | #[derive(Debug, PartialEq)] 7 | pub struct Block { 8 | pub block: ChainBlock, 9 | } 10 | 11 | impl Block { 12 | pub fn with_block(block: ChainBlock) -> Self { 13 | Block { 14 | block: block, 15 | } 16 | } 17 | } 18 | 19 | impl Payload for Block { 20 | fn version() -> u32 { 21 | 0 22 | } 23 | 24 | fn command() -> &'static str { 25 | "block" 26 | } 27 | 28 | fn deserialize_payload(reader: &mut Reader, _version: u32) -> MessageResult where T: io::Read { 29 | let tx = Block { 30 | block: try!(reader.read()), 31 | }; 32 | 33 | Ok(tx) 34 | } 35 | 36 | fn serialize_payload(&self, stream: &mut Stream, _version: u32) -> MessageResult<()> { 37 | stream.append(&self.block); 38 | Ok(()) 39 | } 40 | } 41 | -------------------------------------------------------------------------------- /message/src/types/blocktxn.rs: -------------------------------------------------------------------------------- 1 | use primitives::io; 2 | use ser::{Stream, Reader}; 3 | use common::BlockTransactions; 4 | use {MessageResult, Payload}; 5 | 6 | #[derive(Debug, PartialEq)] 7 | pub struct BlockTxn { 8 | pub request: BlockTransactions, 9 | } 10 | 11 | impl Payload for BlockTxn { 12 | fn version() -> u32 { 13 | 70014 14 | } 15 | 16 | fn command() -> &'static str { 17 | "blocktxn" 18 | } 19 | 20 | fn deserialize_payload(reader: &mut Reader, _version: u32) -> MessageResult where T: io::Read { 21 | let block = BlockTxn { 22 | request: try!(reader.read()), 23 | }; 24 | 25 | Ok(block) 26 | } 27 | 28 | fn serialize_payload(&self, stream: &mut Stream, _version: u32) -> MessageResult<()> { 29 | stream.append(&self.request); 30 | Ok(()) 31 | } 32 | } 33 | -------------------------------------------------------------------------------- /message/src/types/compactblock.rs: -------------------------------------------------------------------------------- 1 | use primitives::io; 2 | use ser::{Stream, Reader}; 3 | use common::BlockHeaderAndIDs; 4 | use {Payload, MessageResult}; 5 | 6 | #[derive(Debug, PartialEq)] 7 | pub struct CompactBlock { 8 | pub header: BlockHeaderAndIDs, 9 | } 10 | 11 | impl Payload for CompactBlock { 12 | fn version() -> u32 { 13 | 70014 14 | } 15 | 16 | fn command() -> &'static str { 17 | "cmpctblock" 18 | } 19 | 20 | fn deserialize_payload(reader: &mut Reader, _version: u32) -> MessageResult where T: io::Read { 21 | let block = CompactBlock { 22 | header: try!(reader.read()), 23 | }; 24 | 25 | Ok(block) 26 | } 27 | 28 | fn serialize_payload(&self, stream: &mut Stream, _version: u32) -> MessageResult<()> { 29 | stream.append(&self.header); 30 | Ok(()) 31 | } 32 | } 33 | -------------------------------------------------------------------------------- /message/src/types/feefilter.rs: -------------------------------------------------------------------------------- 1 | use primitives::io; 2 | use ser::{Stream, Reader}; 3 | use {Payload, MessageResult}; 4 | 5 | #[derive(Debug, PartialEq)] 6 | pub struct FeeFilter { 7 | pub fee_rate: u64, 8 | } 9 | 10 | impl FeeFilter { 11 | pub fn with_fee_rate(fee_rate: u64) -> Self { 12 | FeeFilter { 13 | fee_rate: fee_rate, 14 | } 15 | } 16 | } 17 | 18 | impl Payload for FeeFilter { 19 | fn version() -> u32 { 20 | 70013 21 | } 22 | 23 | fn command() -> &'static str { 24 | "feefilter" 25 | } 26 | 27 | fn deserialize_payload(reader: &mut Reader, _version: u32) -> MessageResult where T: io::Read { 28 | let fee_filter = FeeFilter { 29 | fee_rate: try!(reader.read()), 30 | }; 31 | 32 | Ok(fee_filter) 33 | } 34 | 35 | fn serialize_payload(&self, stream: &mut Stream, _version: u32) -> MessageResult<()> { 36 | stream.append(&self.fee_rate); 37 | Ok(()) 38 | } 39 | } 40 | -------------------------------------------------------------------------------- /message/src/types/filteradd.rs: -------------------------------------------------------------------------------- 1 | use primitives::io; 2 | use bytes::Bytes; 3 | use ser::{Stream, Reader}; 4 | use {Payload, MessageResult}; 5 | 6 | pub const FILTERADD_MAX_DATA_LEN: usize = 520; 7 | 8 | #[derive(Debug, PartialEq)] 9 | pub struct FilterAdd { 10 | // TODO: check how this should be serialized 11 | pub data: Bytes, 12 | } 13 | 14 | impl Payload for FilterAdd { 15 | fn version() -> u32 { 16 | 70001 17 | } 18 | 19 | fn command() -> &'static str { 20 | "filteradd" 21 | } 22 | 23 | fn deserialize_payload(reader: &mut Reader, _version: u32) -> MessageResult where T: io::Read { 24 | let filteradd = FilterAdd { 25 | data: try!(reader.read()), 26 | }; 27 | 28 | Ok(filteradd) 29 | } 30 | 31 | fn serialize_payload(&self, stream: &mut Stream, _version: u32) -> MessageResult<()> { 32 | stream.append(&self.data); 33 | Ok(()) 34 | } 35 | } 36 | -------------------------------------------------------------------------------- /message/src/types/filterclear.rs: -------------------------------------------------------------------------------- 1 | use primitives::io; 2 | use ser::{Stream, Reader}; 3 | use {Payload, MessageResult}; 4 | 5 | #[derive(Debug, PartialEq)] 6 | pub struct FilterClear; 7 | 8 | impl Payload for FilterClear { 9 | fn version() -> u32 { 10 | 70001 11 | } 12 | 13 | fn command() -> &'static str { 14 | "filterclear" 15 | } 16 | 17 | fn deserialize_payload(_reader: &mut Reader, _version: u32) -> MessageResult where T: io::Read { 18 | Ok(FilterClear) 19 | } 20 | 21 | fn serialize_payload(&self, _stream: &mut Stream, _version: u32) -> MessageResult<()> { 22 | Ok(()) 23 | } 24 | } 25 | -------------------------------------------------------------------------------- /message/src/types/getaddr.rs: -------------------------------------------------------------------------------- 1 | use primitives::io; 2 | use ser::{Stream, Reader}; 3 | use {Payload, MessageResult}; 4 | 5 | #[derive(Debug, PartialEq)] 6 | pub struct GetAddr; 7 | 8 | impl Payload for GetAddr { 9 | fn version() -> u32 { 10 | 0 11 | } 12 | 13 | fn command() -> &'static str { 14 | "getaddr" 15 | } 16 | 17 | fn deserialize_payload(_reader: &mut Reader, _version: u32) -> MessageResult where T: io::Read { 18 | Ok(GetAddr) 19 | } 20 | 21 | fn serialize_payload(&self, _stream: &mut Stream, _version: u32) -> MessageResult<()> { 22 | Ok(()) 23 | } 24 | } 25 | -------------------------------------------------------------------------------- /message/src/types/getblocks.rs: -------------------------------------------------------------------------------- 1 | use primitives::io; 2 | use hash::H256; 3 | use ser::{Stream, Reader}; 4 | use {Payload, MessageResult}; 5 | 6 | pub const GETBLOCKS_MAX_RESPONSE_HASHES: usize = 500; 7 | 8 | #[derive(Debug, PartialEq)] 9 | pub struct GetBlocks { 10 | pub version: u32, 11 | pub block_locator_hashes: Vec, 12 | pub hash_stop: H256, 13 | } 14 | 15 | impl Payload for GetBlocks { 16 | fn version() -> u32 { 17 | 0 18 | } 19 | 20 | fn command() -> &'static str { 21 | "getblocks" 22 | } 23 | 24 | fn deserialize_payload(reader: &mut Reader, _version: u32) -> MessageResult where T: io::Read { 25 | let get_blocks = GetBlocks { 26 | version: try!(reader.read()), 27 | block_locator_hashes: try!(reader.read_list_max(500)), 28 | hash_stop: try!(reader.read()), 29 | }; 30 | 31 | Ok(get_blocks) 32 | } 33 | 34 | fn serialize_payload(&self, stream: &mut Stream, _version: u32) -> MessageResult<()> { 35 | stream 36 | .append(&self.version) 37 | .append_list(&self.block_locator_hashes) 38 | .append(&self.hash_stop); 39 | Ok(()) 40 | } 41 | } 42 | 43 | -------------------------------------------------------------------------------- /message/src/types/getblocktxn.rs: -------------------------------------------------------------------------------- 1 | use primitives::io; 2 | use ser::{Stream, Reader}; 3 | use common::BlockTransactionsRequest; 4 | use {Payload, MessageResult}; 5 | 6 | #[derive(Debug, PartialEq)] 7 | pub struct GetBlockTxn { 8 | pub request: BlockTransactionsRequest, 9 | } 10 | 11 | impl Payload for GetBlockTxn { 12 | fn version() -> u32 { 13 | 70014 14 | } 15 | 16 | fn command() -> &'static str { 17 | "getblocktxn" 18 | } 19 | 20 | fn deserialize_payload(reader: &mut Reader, _version: u32) -> MessageResult where T: io::Read { 21 | let get_block = GetBlockTxn { 22 | request: try!(reader.read()), 23 | }; 24 | 25 | Ok(get_block) 26 | } 27 | 28 | fn serialize_payload(&self, stream: &mut Stream, _version: u32) -> MessageResult<()> { 29 | stream.append(&self.request); 30 | Ok(()) 31 | } 32 | } 33 | -------------------------------------------------------------------------------- /message/src/types/getdata.rs: -------------------------------------------------------------------------------- 1 | use primitives::io; 2 | use ser::{Stream, Reader}; 3 | use common::InventoryVector; 4 | use {Payload, MessageResult}; 5 | 6 | pub const GETDATA_MAX_INVENTORY_LEN: usize = 50_000; 7 | 8 | #[derive(Debug, PartialEq)] 9 | pub struct GetData { 10 | pub inventory: Vec, 11 | } 12 | 13 | impl GetData { 14 | pub fn with_inventory(inventory: Vec) -> Self { 15 | GetData { 16 | inventory: inventory, 17 | } 18 | } 19 | } 20 | 21 | impl Payload for GetData { 22 | fn version() -> u32 { 23 | 0 24 | } 25 | 26 | fn command() -> &'static str { 27 | "getdata" 28 | } 29 | 30 | fn deserialize_payload(reader: &mut Reader, _version: u32) -> MessageResult where T: io::Read { 31 | let inv = GetData { 32 | inventory: try!(reader.read_list_max(50_000)), 33 | }; 34 | 35 | Ok(inv) 36 | } 37 | 38 | fn serialize_payload(&self, stream: &mut Stream, _version: u32) -> MessageResult<()> { 39 | stream.append_list(&self.inventory); 40 | Ok(()) 41 | } 42 | } 43 | -------------------------------------------------------------------------------- /message/src/types/getheaders.rs: -------------------------------------------------------------------------------- 1 | use primitives::io; 2 | use hash::H256; 3 | use ser::{Stream, Reader}; 4 | use {Payload, MessageResult}; 5 | 6 | pub const GETHEADERS_MAX_RESPONSE_HEADERS: usize = 2_000; 7 | 8 | #[derive(Debug, PartialEq)] 9 | pub struct GetHeaders { 10 | pub version: u32, 11 | pub block_locator_hashes: Vec, 12 | pub hash_stop: H256, 13 | } 14 | 15 | impl GetHeaders { 16 | pub fn with_block_locator_hashes(block_locator_hashes: Vec) -> Self { 17 | GetHeaders { 18 | version: 0, // this field is ignored by implementations 19 | block_locator_hashes: block_locator_hashes, 20 | hash_stop: H256::default(), 21 | } 22 | } 23 | } 24 | 25 | impl Payload for GetHeaders { 26 | fn version() -> u32 { 27 | 0 28 | } 29 | 30 | fn command() -> &'static str { 31 | "getheaders" 32 | } 33 | 34 | fn deserialize_payload(reader: &mut Reader, _version: u32) -> MessageResult where T: io::Read { 35 | let get_blocks = GetHeaders { 36 | version: try!(reader.read()), 37 | block_locator_hashes: try!(reader.read_list_max(2000)), 38 | hash_stop: try!(reader.read()), 39 | }; 40 | 41 | Ok(get_blocks) 42 | } 43 | 44 | fn serialize_payload(&self, stream: &mut Stream, _version: u32) -> MessageResult<()> { 45 | stream 46 | .append(&self.version) 47 | .append_list(&self.block_locator_hashes) 48 | .append(&self.hash_stop); 49 | Ok(()) 50 | } 51 | } 52 | 53 | -------------------------------------------------------------------------------- /message/src/types/headers.rs: -------------------------------------------------------------------------------- 1 | use primitives::io; 2 | use chain::BlockHeader; 3 | use ser::{Stream, Reader, Serializable, Deserializable, CompactInteger, Error as ReaderError}; 4 | use {Payload, MessageResult}; 5 | 6 | pub const HEADERS_MAX_HEADERS_LEN: usize = 2000; 7 | 8 | #[derive(Debug, PartialEq)] 9 | pub struct Headers { 10 | pub headers: Vec, 11 | } 12 | 13 | impl Headers { 14 | pub fn with_headers(headers: Vec) -> Self { 15 | Headers { 16 | headers: headers, 17 | } 18 | } 19 | } 20 | 21 | #[derive(Debug, PartialEq)] 22 | struct HeaderWithTxnCount { 23 | header: BlockHeader, 24 | } 25 | 26 | impl From for BlockHeader { 27 | fn from(header: HeaderWithTxnCount) -> BlockHeader { 28 | header.header 29 | } 30 | } 31 | 32 | #[derive(Debug, PartialEq)] 33 | struct HeaderWithTxnCountRef<'a> { 34 | header: &'a BlockHeader, 35 | } 36 | 37 | impl<'a> From<&'a BlockHeader> for HeaderWithTxnCountRef<'a> { 38 | fn from(header: &'a BlockHeader) -> Self { 39 | HeaderWithTxnCountRef { 40 | header: header, 41 | } 42 | } 43 | } 44 | 45 | impl Payload for Headers { 46 | fn version() -> u32 { 47 | 0 48 | } 49 | 50 | fn command() -> &'static str { 51 | "headers" 52 | } 53 | 54 | fn deserialize_payload(reader: &mut Reader, _version: u32) -> MessageResult where T: io::Read { 55 | let headers_with_txn_count: Vec = try!(reader.read_list()); 56 | let headers = Headers { 57 | headers: headers_with_txn_count.into_iter().map(Into::into).collect(), 58 | }; 59 | 60 | Ok(headers) 61 | } 62 | 63 | fn serialize_payload(&self, stream: &mut Stream, _version: u32) -> MessageResult<()> { 64 | let headers_with_txn_count: Vec = self.headers.iter().map(Into::into).collect(); 65 | stream.append_list(&headers_with_txn_count); 66 | Ok(()) 67 | } 68 | } 69 | 70 | impl<'a> Serializable for HeaderWithTxnCountRef<'a> { 71 | fn serialize(&self, stream: &mut Stream) { 72 | stream 73 | .append(self.header) 74 | .append(&CompactInteger::from(0u32)); 75 | } 76 | } 77 | 78 | impl Deserializable for HeaderWithTxnCount { 79 | fn deserialize(reader: &mut Reader) -> Result where T: io::Read { 80 | let header = HeaderWithTxnCount { 81 | header: try!(reader.read()), 82 | }; 83 | 84 | let txn_count: CompactInteger = try!(reader.read()); 85 | if txn_count != 0u32.into() { 86 | return Err(io::ErrorKind::MalformedData); 87 | } 88 | 89 | Ok(header) 90 | } 91 | } 92 | -------------------------------------------------------------------------------- /message/src/types/inv.rs: -------------------------------------------------------------------------------- 1 | use primitives::io; 2 | use ser::{Stream, Reader}; 3 | use common::InventoryVector; 4 | use {Payload, MessageResult}; 5 | 6 | pub const INV_MAX_INVENTORY_LEN: usize = 50_000; 7 | 8 | #[derive(Debug, PartialEq)] 9 | pub struct Inv { 10 | pub inventory: Vec, 11 | } 12 | 13 | impl Inv { 14 | pub fn with_inventory(inventory: Vec) -> Self { 15 | Inv { 16 | inventory: inventory, 17 | } 18 | } 19 | } 20 | 21 | impl Payload for Inv { 22 | fn version() -> u32 { 23 | 0 24 | } 25 | 26 | fn command() -> &'static str { 27 | "inv" 28 | } 29 | 30 | fn deserialize_payload(reader: &mut Reader, _version: u32) -> MessageResult where T: io::Read { 31 | let inv = Inv { 32 | inventory: try!(reader.read_list_max(50_000)), 33 | }; 34 | 35 | Ok(inv) 36 | } 37 | 38 | fn serialize_payload(&self, stream: &mut Stream, _version: u32) -> MessageResult<()> { 39 | stream.append_list(&self.inventory); 40 | Ok(()) 41 | } 42 | } 43 | -------------------------------------------------------------------------------- /message/src/types/mempool.rs: -------------------------------------------------------------------------------- 1 | use primitives::io; 2 | use ser::{Stream, Reader}; 3 | use {Payload, MessageResult}; 4 | 5 | #[derive(Debug, PartialEq)] 6 | pub struct MemPool; 7 | 8 | impl Payload for MemPool { 9 | fn version() -> u32 { 10 | 60002 11 | } 12 | 13 | fn command() -> &'static str { 14 | "mempool" 15 | } 16 | 17 | fn deserialize_payload(_reader: &mut Reader, _version: u32) -> MessageResult where T: io::Read { 18 | Ok(MemPool) 19 | } 20 | 21 | fn serialize_payload(&self, _stream: &mut Stream, _version: u32) -> MessageResult<()> { 22 | Ok(()) 23 | } 24 | } 25 | -------------------------------------------------------------------------------- /message/src/types/merkle_block.rs: -------------------------------------------------------------------------------- 1 | use primitives::io; 2 | use hash::H256; 3 | use bytes::Bytes; 4 | use ser::{Stream, Reader}; 5 | use chain::BlockHeader; 6 | use {Payload, MessageResult}; 7 | 8 | #[derive(Debug, PartialEq)] 9 | pub struct MerkleBlock { 10 | pub block_header: BlockHeader, 11 | pub total_transactions: u32, 12 | pub hashes: Vec, 13 | pub flags: Bytes, 14 | } 15 | 16 | impl Payload for MerkleBlock { 17 | fn version() -> u32 { 18 | 70014 19 | } 20 | 21 | fn command() -> &'static str { 22 | "merkleblock" 23 | } 24 | 25 | fn deserialize_payload(reader: &mut Reader, _version: u32) -> MessageResult where T: io::Read { 26 | let merkle_block = MerkleBlock { 27 | block_header: try!(reader.read()), 28 | total_transactions: try!(reader.read()), 29 | hashes: try!(reader.read_list()), 30 | flags: try!(reader.read()), 31 | }; 32 | 33 | Ok(merkle_block) 34 | } 35 | 36 | fn serialize_payload(&self, stream: &mut Stream, _version: u32) -> MessageResult<()> { 37 | stream 38 | .append(&self.block_header) 39 | .append(&self.total_transactions) 40 | .append_list(&self.hashes) 41 | .append(&self.flags); 42 | Ok(()) 43 | } 44 | } 45 | -------------------------------------------------------------------------------- /message/src/types/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod addr; 2 | mod block; 3 | mod blocktxn; 4 | mod compactblock; 5 | mod feefilter; 6 | mod filteradd; 7 | mod filterclear; 8 | mod filterload; 9 | mod getaddr; 10 | mod getblocks; 11 | mod getblocktxn; 12 | mod getdata; 13 | mod getheaders; 14 | mod headers; 15 | mod inv; 16 | mod mempool; 17 | mod merkle_block; 18 | mod notfound; 19 | mod ping; 20 | mod pong; 21 | pub mod reject; 22 | mod sendcompact; 23 | mod sendheaders; 24 | mod tx; 25 | mod verack; 26 | pub mod version; 27 | 28 | pub use self::addr::Addr; 29 | pub use self::block::Block; 30 | pub use self::blocktxn::BlockTxn; 31 | pub use self::compactblock::CompactBlock; 32 | pub use self::feefilter::FeeFilter; 33 | pub use self::filterload::{FilterLoad, FILTERLOAD_MAX_FILTER_LEN, FILTERLOAD_MAX_HASH_FUNCS}; 34 | pub use self::filterload::FilterFlags; 35 | pub use self::filterclear::FilterClear; 36 | pub use self::filteradd::{FilterAdd, FILTERADD_MAX_DATA_LEN}; 37 | pub use self::getaddr::GetAddr; 38 | pub use self::getblocks::{GetBlocks, GETBLOCKS_MAX_RESPONSE_HASHES}; 39 | pub use self::getblocktxn::GetBlockTxn; 40 | pub use self::getdata::{GetData, GETDATA_MAX_INVENTORY_LEN}; 41 | pub use self::getheaders::{GetHeaders, GETHEADERS_MAX_RESPONSE_HEADERS}; 42 | pub use self::headers::{Headers, HEADERS_MAX_HEADERS_LEN}; 43 | pub use self::inv::{Inv, INV_MAX_INVENTORY_LEN}; 44 | pub use self::mempool::MemPool; 45 | pub use self::merkle_block::MerkleBlock; 46 | pub use self::notfound::NotFound; 47 | pub use self::ping::Ping; 48 | pub use self::pong::Pong; 49 | pub use self::reject::Reject; 50 | pub use self::sendcompact::SendCompact; 51 | pub use self::sendheaders::SendHeaders; 52 | pub use self::tx::Tx; 53 | pub use self::verack::Verack; 54 | pub use self::version::Version; 55 | -------------------------------------------------------------------------------- /message/src/types/notfound.rs: -------------------------------------------------------------------------------- 1 | use primitives::io; 2 | use ser::{Stream, Reader}; 3 | use common::InventoryVector; 4 | use {Payload, MessageResult}; 5 | 6 | #[derive(Debug, PartialEq)] 7 | pub struct NotFound { 8 | pub inventory: Vec, 9 | } 10 | 11 | impl NotFound { 12 | pub fn with_inventory(inventory: Vec) -> Self { 13 | NotFound { 14 | inventory: inventory, 15 | } 16 | } 17 | } 18 | 19 | impl Payload for NotFound { 20 | fn version() -> u32 { 21 | 0 22 | } 23 | 24 | fn command() -> &'static str { 25 | "notfound" 26 | } 27 | 28 | fn deserialize_payload(reader: &mut Reader, _version: u32) -> MessageResult where T: io::Read { 29 | let inv = NotFound { 30 | inventory: try!(reader.read_list_max(50_000)), 31 | }; 32 | 33 | Ok(inv) 34 | } 35 | 36 | fn serialize_payload(&self, stream: &mut Stream, _version: u32) -> MessageResult<()> { 37 | stream.append_list(&self.inventory); 38 | Ok(()) 39 | } 40 | } 41 | -------------------------------------------------------------------------------- /message/src/types/ping.rs: -------------------------------------------------------------------------------- 1 | use primitives::io; 2 | use ser::{Stream, Reader}; 3 | use {MessageResult, Payload}; 4 | 5 | #[derive(Debug, PartialEq)] 6 | pub struct Ping { 7 | pub nonce: u64, 8 | } 9 | 10 | impl Ping { 11 | pub fn new(nonce: u64) -> Self { 12 | Ping { 13 | nonce: nonce, 14 | } 15 | } 16 | } 17 | 18 | impl Payload for Ping { 19 | fn version() -> u32 { 20 | 0 21 | } 22 | 23 | fn command() -> &'static str { 24 | "ping" 25 | } 26 | 27 | fn deserialize_payload(reader: &mut Reader, _version: u32) -> MessageResult where T: io::Read { 28 | let ping = Ping { 29 | nonce: try!(reader.read()), 30 | }; 31 | 32 | Ok(ping) 33 | } 34 | 35 | fn serialize_payload(&self, stream: &mut Stream, _version: u32) -> MessageResult<()> { 36 | stream.append(&self.nonce); 37 | Ok(()) 38 | } 39 | } 40 | -------------------------------------------------------------------------------- /message/src/types/pong.rs: -------------------------------------------------------------------------------- 1 | use primitives::io; 2 | use ser::{Stream, Reader}; 3 | use {Payload, MessageResult}; 4 | 5 | #[derive(Debug, PartialEq)] 6 | pub struct Pong { 7 | pub nonce: u64, 8 | } 9 | 10 | impl Pong { 11 | pub fn new(nonce: u64) -> Self { 12 | Pong { 13 | nonce: nonce, 14 | } 15 | } 16 | } 17 | 18 | impl Payload for Pong { 19 | fn version() -> u32 { 20 | 0 21 | } 22 | 23 | fn command() -> &'static str { 24 | "pong" 25 | } 26 | 27 | fn deserialize_payload(reader: &mut Reader, _version: u32) -> MessageResult where T: io::Read { 28 | let pong = Pong { 29 | nonce: try!(reader.read()), 30 | }; 31 | 32 | Ok(pong) 33 | } 34 | 35 | fn serialize_payload(&self, stream: &mut Stream, _version: u32) -> MessageResult<()> { 36 | stream.append(&self.nonce); 37 | Ok(()) 38 | } 39 | } 40 | -------------------------------------------------------------------------------- /message/src/types/reject.rs: -------------------------------------------------------------------------------- 1 | use primitives::io; 2 | use ser::{Serializable, Stream, Deserializable, Reader, Error as ReaderError}; 3 | use {Payload, MessageResult}; 4 | 5 | #[derive(Debug, PartialEq, Clone, Copy)] 6 | #[repr(u8)] 7 | pub enum RejectCode { 8 | Malformed = 0x01, 9 | Invalid = 0x10, 10 | Obsolate = 0x11, 11 | Duplicate = 0x12, 12 | Nonstandard = 0x40, 13 | Dust = 0x41, 14 | InsuficientFee = 0x42, 15 | Checkpoint = 0x43, 16 | } 17 | 18 | impl From for u8 { 19 | fn from(c: RejectCode) -> Self { 20 | c as u8 21 | } 22 | } 23 | 24 | impl RejectCode { 25 | pub fn from_u8(v: u8) -> Option { 26 | let some = match v { 27 | 0x01 => RejectCode::Malformed, 28 | 0x10 => RejectCode::Invalid, 29 | 0x11 => RejectCode::Obsolate, 30 | 0x12 => RejectCode::Duplicate, 31 | 0x40 => RejectCode::Nonstandard, 32 | 0x41 => RejectCode::Dust, 33 | 0x42 => RejectCode::InsuficientFee, 34 | 0x43 => RejectCode::Checkpoint, 35 | _ => return None, 36 | }; 37 | 38 | Some(some) 39 | } 40 | } 41 | 42 | impl Serializable for RejectCode { 43 | fn serialize(&self, stream: &mut Stream) { 44 | stream.append(&u8::from(*self)); 45 | } 46 | } 47 | 48 | impl Deserializable for RejectCode { 49 | fn deserialize(reader: &mut Reader) -> Result where T: io::Read { 50 | let v: u8 = try!(reader.read()); 51 | RejectCode::from_u8(v).ok_or_else(|| io::ErrorKind::MalformedData) 52 | } 53 | } 54 | 55 | #[derive(Debug, PartialEq)] 56 | pub struct Reject { 57 | pub message: String, 58 | pub code: RejectCode, 59 | pub reason: String, 60 | // TODO: data 61 | } 62 | 63 | impl Payload for Reject { 64 | fn version() -> u32 { 65 | 0 66 | } 67 | 68 | fn command() -> &'static str { 69 | "reject" 70 | } 71 | 72 | fn deserialize_payload(reader: &mut Reader, _version: u32) -> MessageResult where T: io::Read { 73 | let reject = Reject { 74 | message: try!(reader.read()), 75 | code: try!(reader.read()), 76 | reason: try!(reader.read()), 77 | }; 78 | 79 | Ok(reject) 80 | } 81 | 82 | fn serialize_payload(&self, stream: &mut Stream, _version: u32) -> MessageResult<()> { 83 | stream 84 | .append(&self.message) 85 | .append(&self.code) 86 | .append(&self.reason); 87 | Ok(()) 88 | } 89 | } 90 | -------------------------------------------------------------------------------- /message/src/types/sendcompact.rs: -------------------------------------------------------------------------------- 1 | use primitives::io; 2 | use ser::{Stream, Reader}; 3 | use {Payload, MessageResult}; 4 | 5 | #[derive(Debug, PartialEq)] 6 | pub struct SendCompact { 7 | pub first: bool, 8 | pub second: u64, 9 | } 10 | 11 | impl Payload for SendCompact { 12 | fn version() -> u32 { 13 | 70014 14 | } 15 | 16 | fn command() -> &'static str { 17 | "sendcmpct" 18 | } 19 | 20 | fn deserialize_payload(reader: &mut Reader, _version: u32) -> MessageResult where T: io::Read { 21 | let send_compact = SendCompact { 22 | first: try!(reader.read()), 23 | second: try!(reader.read()), 24 | }; 25 | 26 | Ok(send_compact) 27 | } 28 | 29 | fn serialize_payload(&self, stream: &mut Stream, _version: u32) -> MessageResult<()> { 30 | stream 31 | .append(&self.first) 32 | .append(&self.second); 33 | Ok(()) 34 | } 35 | } 36 | -------------------------------------------------------------------------------- /message/src/types/sendheaders.rs: -------------------------------------------------------------------------------- 1 | use primitives::io; 2 | use ser::{Stream, Reader}; 3 | use {Payload, MessageResult}; 4 | 5 | #[derive(Debug, PartialEq)] 6 | pub struct SendHeaders; 7 | 8 | impl Payload for SendHeaders { 9 | fn version() -> u32 { 10 | 70012 11 | } 12 | 13 | fn command() -> &'static str { 14 | "sendheaders" 15 | } 16 | 17 | fn deserialize_payload(_reader: &mut Reader, _version: u32) -> MessageResult where T: io::Read { 18 | Ok(SendHeaders) 19 | } 20 | 21 | fn serialize_payload(&self, _stream: &mut Stream, _version: u32) -> MessageResult<()> { 22 | Ok(()) 23 | } 24 | } 25 | -------------------------------------------------------------------------------- /message/src/types/tx.rs: -------------------------------------------------------------------------------- 1 | use primitives::io; 2 | use ser::{Stream, Reader}; 3 | use chain::Transaction; 4 | use {Payload, MessageResult}; 5 | 6 | #[derive(Debug, PartialEq)] 7 | pub struct Tx { 8 | pub transaction: Transaction, 9 | } 10 | 11 | impl Tx { 12 | pub fn with_transaction(transaction: Transaction) -> Self { 13 | Tx { 14 | transaction: transaction, 15 | } 16 | } 17 | } 18 | 19 | impl Payload for Tx { 20 | fn version() -> u32 { 21 | 0 22 | } 23 | 24 | fn command() -> &'static str { 25 | "tx" 26 | } 27 | 28 | fn deserialize_payload(reader: &mut Reader, _version: u32) -> MessageResult where T: io::Read { 29 | let tx = Tx { 30 | transaction: try!(reader.read()), 31 | }; 32 | 33 | Ok(tx) 34 | } 35 | 36 | fn serialize_payload(&self, stream: &mut Stream, _version: u32) -> MessageResult<()> { 37 | stream.append(&self.transaction); 38 | Ok(()) 39 | } 40 | } 41 | -------------------------------------------------------------------------------- /message/src/types/verack.rs: -------------------------------------------------------------------------------- 1 | use primitives::io; 2 | use ser::{Stream, Reader}; 3 | use {Payload, MessageResult}; 4 | 5 | #[derive(Debug, PartialEq)] 6 | pub struct Verack; 7 | 8 | impl Payload for Verack { 9 | fn version() -> u32 { 10 | 0 11 | } 12 | 13 | fn command() -> &'static str { 14 | "verack" 15 | } 16 | 17 | fn deserialize_payload(_reader: &mut Reader, _version: u32) -> MessageResult where T: io::Read { 18 | Ok(Verack) 19 | } 20 | 21 | fn serialize_payload(&self, _stream: &mut Stream, _version: u32) -> MessageResult<()> { 22 | Ok(()) 23 | } 24 | } 25 | -------------------------------------------------------------------------------- /miner/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "miner" 3 | version = "0.1.0" 4 | authors = ["Ethcore "] 5 | 6 | [dependencies] 7 | byteorder = "1.0" 8 | heapsize = "0.4" 9 | bitcrypto = { path = "../crypto" } 10 | chain = { path = "../chain" } 11 | storage = { path = "../storage" } 12 | db = { path = "../db" } 13 | network = { path = "../network" } 14 | primitives = { path = "../primitives" } 15 | serialization = { path = "../serialization" } 16 | verification = { path = "../verification" } 17 | keys = { path = "../keys" } 18 | script = { path = "../script" } 19 | rand = "0.4" 20 | log = "0.4" 21 | 22 | [dev-dependencies] 23 | test-data = { path = "../test-data" } 24 | -------------------------------------------------------------------------------- /miner/src/fee.rs: -------------------------------------------------------------------------------- 1 | use chain::Transaction; 2 | use ser::Serializable; 3 | use storage::TransactionProvider; 4 | 5 | pub fn transaction_fee(store: &TransactionProvider, transaction: &Transaction) -> u64 { 6 | let inputs_sum = transaction.inputs.iter().map(|input| { 7 | let input_transaction = store.transaction(&input.previous_output.hash) 8 | .expect("transaction must be verified by caller"); 9 | input_transaction.outputs[input.previous_output.index as usize].value 10 | }).sum::(); 11 | let outputs_sum = transaction.outputs.iter().map(|output| output.value).sum(); 12 | inputs_sum.saturating_sub(outputs_sum) 13 | } 14 | 15 | pub fn transaction_fee_rate(store: &TransactionProvider, transaction: &Transaction) -> u64 { 16 | transaction_fee(store, transaction) / transaction.serialized_size() as u64 17 | } 18 | 19 | #[cfg(test)] 20 | mod tests { 21 | extern crate test_data; 22 | 23 | use std::sync::Arc; 24 | use storage::{AsSubstore}; 25 | use db::BlockChainDatabase; 26 | use super::*; 27 | 28 | #[test] 29 | fn test_transaction_fee() { 30 | let b0 = test_data::block_builder().header().nonce(1).build() 31 | .transaction() 32 | .output().value(1_000_000).build() 33 | .output().value(2_000_000).build() 34 | .build() 35 | .build(); 36 | let tx0 = b0.transactions[0].clone(); 37 | let tx0_hash = tx0.hash(); 38 | let b1 = test_data::block_builder().header().parent(b0.hash().clone()).nonce(2).build() 39 | .transaction() 40 | .input().hash(tx0_hash.clone()).index(0).build() 41 | .input().hash(tx0_hash).index(1).build() 42 | .output().value(2_500_000).build() 43 | .build() 44 | .build(); 45 | let tx2 = b1.transactions[0].clone(); 46 | 47 | let db = Arc::new(BlockChainDatabase::init_test_chain(vec![b0.into(), b1.into()])); 48 | 49 | assert_eq!(transaction_fee(db.as_transaction_provider(), &tx0), 0); 50 | assert_eq!(transaction_fee(db.as_transaction_provider(), &tx2), 500_000); 51 | 52 | assert_eq!(transaction_fee_rate(db.as_transaction_provider(), &tx0), 0); 53 | assert_eq!(transaction_fee_rate(db.as_transaction_provider(), &tx2), 4_901); 54 | } 55 | } 56 | -------------------------------------------------------------------------------- /miner/src/lib.rs: -------------------------------------------------------------------------------- 1 | extern crate byteorder; 2 | extern crate heapsize; 3 | 4 | extern crate bitcrypto as crypto; 5 | extern crate chain; 6 | extern crate storage; 7 | extern crate db; 8 | extern crate keys; 9 | extern crate script; 10 | extern crate network; 11 | extern crate primitives; 12 | extern crate serialization as ser; 13 | extern crate verification; 14 | extern crate rand; 15 | #[macro_use] 16 | extern crate log; 17 | 18 | mod block_assembler; 19 | mod cpu_miner; 20 | mod fee; 21 | mod memory_pool; 22 | 23 | pub use block_assembler::{BlockAssembler, BlockTemplate}; 24 | pub use cpu_miner::{find_solution, CoinbaseTransactionBuilder}; 25 | pub use memory_pool::{MemoryPool, HashedOutPoint, Information as MemoryPoolInformation, 26 | OrderingStrategy as MemoryPoolOrderingStrategy, DoubleSpendCheckResult, NonFinalDoubleSpendSet}; 27 | pub use fee::{transaction_fee, transaction_fee_rate}; 28 | -------------------------------------------------------------------------------- /network/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "network" 3 | version = "0.1.0" 4 | authors = ["debris "] 5 | 6 | [dependencies] 7 | lazy_static = "1.0" 8 | chain = { path = "../chain" } 9 | primitives = { path = "../primitives" } 10 | -------------------------------------------------------------------------------- /network/src/deployments.rs: -------------------------------------------------------------------------------- 1 | const VERSIONBITS_TOP_MASK: u32 = 0xe0000000; 2 | const VERSIONBITS_TOP_BITS: u32 = 0x20000000; 3 | 4 | #[derive(Debug, Clone, Copy)] 5 | pub struct Deployment { 6 | /// Deployment's name 7 | pub name: &'static str, 8 | /// Bit 9 | pub bit: u8, 10 | /// Start time 11 | pub start_time: u32, 12 | /// Timeout 13 | pub timeout: u32, 14 | /// Activation block number (if already activated) 15 | pub activation: Option, 16 | } 17 | 18 | impl Deployment { 19 | pub fn matches(&self, version: u32) -> bool { 20 | (version & VERSIONBITS_TOP_MASK) == VERSIONBITS_TOP_BITS && (version & (1 << self.bit)) != 0 21 | } 22 | } 23 | 24 | -------------------------------------------------------------------------------- /network/src/lib.rs: -------------------------------------------------------------------------------- 1 | #[macro_use] 2 | extern crate lazy_static; 3 | 4 | extern crate chain; 5 | extern crate primitives; 6 | 7 | mod consensus; 8 | mod deployments; 9 | mod network; 10 | 11 | pub use primitives::{hash, compact}; 12 | 13 | pub use consensus::{ConsensusParams, ConsensusFork, BitcoinCashConsensusParams}; 14 | pub use deployments::Deployment; 15 | pub use network::{Magic, Network}; 16 | -------------------------------------------------------------------------------- /node/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "node" 3 | version = "0.1.0" 4 | authors = ["Chainpool "] 5 | 6 | [dependencies] 7 | primitives = { path = "../primitives" } 8 | storage = { path = "../storage" } 9 | sync = { path = "../sync" } 10 | chain = { path = "../chain" } 11 | miner = { path = "../miner" } 12 | keys = { path = "../keys" } 13 | script = { path = "../script" } 14 | log = "0.4" 15 | -------------------------------------------------------------------------------- /p2p/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "p2p" 3 | version = "0.1.0" 4 | authors = ["debris "] 5 | 6 | [dependencies] 7 | tokio-core = "0.1.6" 8 | tokio-io = "0.1.1" 9 | parking_lot = "0.4" 10 | futures = "0.1" 11 | futures-cpupool = "0.1" 12 | time = "0.1" 13 | rand = "0.4" 14 | log = "0.4" 15 | abstract-ns = "0.3" 16 | ns-dns-tokio = "0.3" 17 | csv = "1" 18 | 19 | primitives = { path = "../primitives" } 20 | bitcrypto = { path = "../crypto" } 21 | message = { path = "../message" } 22 | serialization = { path = "../serialization" } 23 | network = { path = "../network" } 24 | -------------------------------------------------------------------------------- /p2p/src/config.rs: -------------------------------------------------------------------------------- 1 | use std::{net, path}; 2 | use message::common::Services; 3 | use net::Config as NetConfig; 4 | use util::InternetProtocol; 5 | 6 | #[derive(Debug, Clone)] 7 | pub struct Config { 8 | /// Number of threads used by p2p thread pool. 9 | pub threads: usize, 10 | /// Number of inbound connections. 11 | pub inbound_connections: u32, 12 | /// Number of outbound connections. 13 | pub outbound_connections: u32, 14 | /// Configuration for every connection. 15 | pub connection: NetConfig, 16 | /// Connect only to these nodes. 17 | pub peers: Vec, 18 | /// Connect to these nodes to retrieve peer addresses, and disconnect. 19 | pub seeds: Vec, 20 | /// p2p/nodes.csv file path. 21 | pub node_table_path: path::PathBuf, 22 | /// Peers with this services will get a boost in node_table. 23 | pub preferable_services: Services, 24 | /// Internet protocol. 25 | pub internet_protocol: InternetProtocol, 26 | } 27 | -------------------------------------------------------------------------------- /p2p/src/event_loop.rs: -------------------------------------------------------------------------------- 1 | use futures::{empty, Empty}; 2 | use tokio_core::reactor::Core; 3 | 4 | pub fn event_loop() -> Core { 5 | Core::new().unwrap() 6 | } 7 | 8 | pub fn forever() -> Empty<(), ()> { 9 | empty() 10 | } 11 | -------------------------------------------------------------------------------- /p2p/src/io/deadline.rs: -------------------------------------------------------------------------------- 1 | use std::io; 2 | use std::time::Duration; 3 | use futures::{Future, Select, Poll, Async}; 4 | use tokio_core::reactor::{Handle, Timeout}; 5 | 6 | type DeadlineBox = Box::Item>, Error = ::Error> + Send>; 7 | 8 | pub fn deadline(duration: Duration, handle: &Handle, future: F) -> Result, io::Error> 9 | where F: Future + Send + 'static, T: 'static { 10 | let timeout: DeadlineBox = Box::new(try!(Timeout::new(duration, handle)).map(|_| DeadlineStatus::Timeout)); 11 | let future: DeadlineBox = Box::new(future.map(DeadlineStatus::Meet)); 12 | let deadline = Deadline { 13 | future: timeout.select(future), 14 | }; 15 | Ok(deadline) 16 | } 17 | 18 | pub enum DeadlineStatus { 19 | Meet(T), 20 | Timeout, 21 | } 22 | 23 | pub struct Deadline where F: Future + Send { 24 | future: Select, DeadlineBox>, 25 | } 26 | 27 | impl Future for Deadline where F: Future + Send { 28 | type Item = DeadlineStatus; 29 | type Error = io::Error; 30 | 31 | fn poll(&mut self) -> Poll { 32 | match self.future.poll() { 33 | Ok(Async::Ready((result, _other))) => Ok(Async::Ready(result)), 34 | Ok(Async::NotReady) => Ok(Async::NotReady), 35 | Err((err, _other)) => Err(err), 36 | } 37 | } 38 | } 39 | -------------------------------------------------------------------------------- /p2p/src/io/mod.rs: -------------------------------------------------------------------------------- 1 | mod deadline; 2 | mod handshake; 3 | mod read_header; 4 | mod read_message; 5 | mod read_any_message; 6 | mod read_payload; 7 | mod sharedtcpstream; 8 | mod write_message; 9 | 10 | pub use self::deadline::{deadline, Deadline, DeadlineStatus}; 11 | pub use self::handshake::{ 12 | handshake, accept_handshake, Handshake, AcceptHandshake, HandshakeResult 13 | }; 14 | pub use self::read_header::{read_header, ReadHeader}; 15 | pub use self::read_payload::{read_payload, ReadPayload}; 16 | pub use self::read_message::{read_message, ReadMessage}; 17 | pub use self::read_any_message::{read_any_message, ReadAnyMessage}; 18 | pub use self::sharedtcpstream::SharedTcpStream; 19 | pub use self::write_message::{write_message, WriteMessage}; 20 | -------------------------------------------------------------------------------- /p2p/src/io/read_header.rs: -------------------------------------------------------------------------------- 1 | use std::io; 2 | use futures::{Future, Poll, Async}; 3 | use tokio_io::AsyncRead; 4 | use tokio_io::io::{ReadExact, read_exact}; 5 | use message::{MessageHeader, MessageResult}; 6 | use network::Magic; 7 | 8 | pub fn read_header(a: A, magic: Magic) -> ReadHeader where A: AsyncRead { 9 | ReadHeader { 10 | reader: read_exact(a, [0u8; 24]), 11 | magic: magic, 12 | } 13 | } 14 | 15 | pub struct ReadHeader { 16 | reader: ReadExact, 17 | magic: Magic, 18 | } 19 | 20 | impl Future for ReadHeader where A: AsyncRead { 21 | type Item = (A, MessageResult); 22 | type Error = io::Error; 23 | 24 | fn poll(&mut self) -> Poll { 25 | let (read, data) = try_ready!(self.reader.poll()); 26 | let header = MessageHeader::deserialize(&data, self.magic); 27 | Ok(Async::Ready((read, header))) 28 | } 29 | } 30 | 31 | #[cfg(test)] 32 | mod tests { 33 | use futures::Future; 34 | use bytes::Bytes; 35 | use network::{Network, ConsensusFork}; 36 | use message::{MessageHeader, Error}; 37 | use super::read_header; 38 | 39 | #[test] 40 | fn test_read_header() { 41 | let raw: Bytes = "f9beb4d96164647200000000000000001f000000ed52399b".into(); 42 | let expected = MessageHeader { 43 | magic: Network::Mainnet.magic(&ConsensusFork::BitcoinCore), 44 | command: "addr".into(), 45 | len: 0x1f, 46 | checksum: "ed52399b".into(), 47 | }; 48 | 49 | assert_eq!(read_header(raw.as_ref(), Network::Mainnet.magic(&ConsensusFork::BitcoinCore)).wait().unwrap().1, Ok(expected)); 50 | assert_eq!(read_header(raw.as_ref(), Network::Testnet.magic(&ConsensusFork::BitcoinCore)).wait().unwrap().1, Err(Error::InvalidMagic)); 51 | } 52 | 53 | #[test] 54 | fn test_read_header_with_invalid_magic() { 55 | let raw: Bytes = "f9beb4d86164647200000000000000001f000000ed52399b".into(); 56 | assert_eq!(read_header(raw.as_ref(), Network::Testnet.magic(&ConsensusFork::BitcoinCore)).wait().unwrap().1, Err(Error::InvalidMagic)); 57 | } 58 | 59 | #[test] 60 | fn test_read_too_short_header() { 61 | let raw: Bytes = "f9beb4d96164647200000000000000001f000000ed5239".into(); 62 | assert!(read_header(raw.as_ref(), Network::Mainnet.magic(&ConsensusFork::BitcoinCore)).wait().is_err()); 63 | } 64 | } 65 | -------------------------------------------------------------------------------- /p2p/src/io/read_payload.rs: -------------------------------------------------------------------------------- 1 | use std::io; 2 | use std::marker::PhantomData; 3 | use futures::{Poll, Future}; 4 | use tokio_io::AsyncRead; 5 | use tokio_io::io::{read_exact, ReadExact}; 6 | use bytes::Bytes; 7 | use hash::H32; 8 | use crypto::checksum; 9 | use message::{Error, MessageResult, Payload, deserialize_payload}; 10 | 11 | pub fn read_payload(a: A, version: u32, len: usize, checksum: H32) -> ReadPayload 12 | where A: AsyncRead, M: Payload { 13 | ReadPayload { 14 | reader: read_exact(a, Bytes::new_with_len(len)), 15 | version: version, 16 | checksum: checksum, 17 | payload_type: PhantomData, 18 | } 19 | } 20 | 21 | pub struct ReadPayload { 22 | reader: ReadExact, 23 | version: u32, 24 | checksum: H32, 25 | payload_type: PhantomData, 26 | } 27 | 28 | impl Future for ReadPayload where A: AsyncRead, M: Payload { 29 | type Item = (A, MessageResult); 30 | type Error = io::Error; 31 | 32 | fn poll(&mut self) -> Poll { 33 | let (read, data) = try_ready!(self.reader.poll()); 34 | if checksum(&data) != self.checksum { 35 | return Ok((read, Err(Error::InvalidChecksum)).into()); 36 | } 37 | let payload = deserialize_payload(&data, self.version); 38 | Ok((read, payload).into()) 39 | } 40 | } 41 | 42 | #[cfg(test)] 43 | mod tests { 44 | use futures::Future; 45 | use bytes::Bytes; 46 | use message::Error; 47 | use message::types::Ping; 48 | use super::read_payload; 49 | 50 | #[test] 51 | fn test_read_payload() { 52 | let raw: Bytes = "5845303b6da97786".into(); 53 | let ping = Ping::new(u64::from_str_radix("8677a96d3b304558", 16).unwrap()); 54 | assert_eq!(read_payload(raw.as_ref(), 0, 8, "83c00c76".into()).wait().unwrap().1, Ok(ping)); 55 | } 56 | 57 | #[test] 58 | fn test_read_payload_with_invalid_checksum() { 59 | let raw: Bytes = "5845303b6da97786".into(); 60 | assert_eq!(read_payload::(raw.as_ref(), 0, 8, "83c00c75".into()).wait().unwrap().1, Err(Error::InvalidChecksum)); 61 | } 62 | 63 | #[test] 64 | fn test_read_too_short_payload() { 65 | let raw: Bytes = "5845303b6da977".into(); 66 | assert!(read_payload::(raw.as_ref(), 0, 8, "83c00c76".into()).wait().is_err()); 67 | } 68 | } 69 | -------------------------------------------------------------------------------- /p2p/src/io/sharedtcpstream.rs: -------------------------------------------------------------------------------- 1 | use std::sync::Arc; 2 | use std::net::Shutdown; 3 | use std::io::{Read, Write, Error}; 4 | use futures::Poll; 5 | use tokio_io::{AsyncRead, AsyncWrite}; 6 | use tokio_core::net::TcpStream; 7 | 8 | pub struct SharedTcpStream { 9 | io: Arc, 10 | } 11 | 12 | impl SharedTcpStream { 13 | pub fn new(a: Arc) -> Self { 14 | SharedTcpStream { 15 | io: a, 16 | } 17 | } 18 | 19 | pub fn shutdown(&self) { 20 | // error is irrelevant here, the connection is dropped anyway 21 | let _ = self.io.shutdown(Shutdown::Both); 22 | } 23 | } 24 | 25 | impl From for SharedTcpStream { 26 | fn from(a: TcpStream) -> Self { 27 | SharedTcpStream::new(Arc::new(a)) 28 | } 29 | } 30 | 31 | impl Read for SharedTcpStream { 32 | fn read(&mut self, buf: &mut [u8]) -> Result { 33 | Read::read(&mut (&*self.io as &TcpStream), buf) 34 | } 35 | } 36 | 37 | impl AsyncRead for SharedTcpStream {} 38 | 39 | impl AsyncWrite for SharedTcpStream { 40 | fn shutdown(&mut self) -> Poll<(), Error> { 41 | self.io.shutdown(Shutdown::Both).map(Into::into) 42 | } 43 | } 44 | 45 | impl Write for SharedTcpStream { 46 | fn write(&mut self, buf: &[u8]) -> Result { 47 | Write::write(&mut (&*self.io as &TcpStream), buf) 48 | } 49 | 50 | fn flush(&mut self) -> Result<(), Error> { 51 | Write::flush(&mut (&*self.io as &TcpStream)) 52 | } 53 | } 54 | 55 | impl Clone for SharedTcpStream { 56 | fn clone(&self) -> Self { 57 | SharedTcpStream::new(self.io.clone()) 58 | } 59 | } 60 | -------------------------------------------------------------------------------- /p2p/src/io/write_message.rs: -------------------------------------------------------------------------------- 1 | use std::io; 2 | use futures::{Future, Poll}; 3 | use tokio_io::AsyncWrite; 4 | use tokio_io::io::{WriteAll, write_all}; 5 | use message::Message; 6 | 7 | pub fn write_message(a: A, message: Message) -> WriteMessage where A: AsyncWrite { 8 | WriteMessage { 9 | future: write_all(a, message), 10 | } 11 | } 12 | 13 | pub struct WriteMessage { 14 | future: WriteAll>, 15 | } 16 | 17 | impl Future for WriteMessage where A: AsyncWrite { 18 | type Item = (A, Message); 19 | type Error = io::Error; 20 | 21 | fn poll(&mut self) -> Poll { 22 | self.future.poll() 23 | } 24 | } 25 | -------------------------------------------------------------------------------- /p2p/src/lib.rs: -------------------------------------------------------------------------------- 1 | #[macro_use] 2 | extern crate futures; 3 | extern crate futures_cpupool; 4 | extern crate rand; 5 | extern crate time; 6 | extern crate tokio_core; 7 | extern crate tokio_io; 8 | extern crate parking_lot; 9 | #[macro_use] 10 | extern crate log; 11 | extern crate abstract_ns; 12 | extern crate ns_dns_tokio; 13 | extern crate csv; 14 | 15 | extern crate bitcrypto as crypto; 16 | extern crate message; 17 | extern crate primitives; 18 | extern crate serialization as ser; 19 | extern crate network; 20 | 21 | mod io; 22 | mod net; 23 | mod protocol; 24 | mod session; 25 | mod util; 26 | mod config; 27 | mod event_loop; 28 | mod p2p; 29 | 30 | pub use primitives::{hash, bytes}; 31 | 32 | pub use config::Config; 33 | pub use net::Config as NetConfig; 34 | pub use p2p::{P2P, Context}; 35 | pub use event_loop::{event_loop, forever}; 36 | pub use util::{NodeTableError, PeerId, PeerInfo, InternetProtocol, Direction}; 37 | pub use protocol::{InboundSyncConnection, InboundSyncConnectionRef, OutboundSyncConnection, OutboundSyncConnectionRef, LocalSyncNode, LocalSyncNodeRef}; 38 | -------------------------------------------------------------------------------- /p2p/src/net/accept_connection.rs: -------------------------------------------------------------------------------- 1 | use std::{net, io}; 2 | use std::time::Duration; 3 | use futures::{Future, Poll}; 4 | use tokio_core::reactor::Handle; 5 | use tokio_core::net::TcpStream; 6 | use network::Magic; 7 | use message::{MessageResult}; 8 | use io::{accept_handshake, AcceptHandshake, Deadline, deadline}; 9 | use net::{Config, Connection}; 10 | 11 | pub fn accept_connection(stream: TcpStream, handle: &Handle, config: &Config, address: net::SocketAddr) -> Deadline { 12 | let accept = AcceptConnection { 13 | handshake: accept_handshake(stream, config.magic, config.version(&address), config.protocol_minimum), 14 | magic: config.magic, 15 | address: address, 16 | }; 17 | 18 | deadline(Duration::new(5, 0), handle, accept).expect("Failed to create timeout") 19 | } 20 | 21 | pub struct AcceptConnection { 22 | handshake: AcceptHandshake, 23 | magic: Magic, 24 | address: net::SocketAddr, 25 | } 26 | 27 | impl Future for AcceptConnection { 28 | type Item = MessageResult; 29 | type Error = io::Error; 30 | 31 | fn poll(&mut self) -> Poll { 32 | let (stream, result) = try_ready!(self.handshake.poll()); 33 | let result = match result { 34 | Ok(result) => result, 35 | Err(err) => return Ok(Err(err).into()), 36 | }; 37 | let connection = Connection { 38 | stream: stream.into(), 39 | services: result.version.services(), 40 | version: result.negotiated_version, 41 | version_message: result.version, 42 | magic: self.magic, 43 | address: self.address, 44 | }; 45 | Ok(Ok(connection).into()) 46 | } 47 | } 48 | -------------------------------------------------------------------------------- /p2p/src/net/channel.rs: -------------------------------------------------------------------------------- 1 | use tokio_io::io::{write_all, WriteAll}; 2 | use session::Session; 3 | use io::{SharedTcpStream, read_any_message, ReadAnyMessage}; 4 | use util::PeerInfo; 5 | 6 | pub struct Channel { 7 | stream: SharedTcpStream, 8 | peer_info: PeerInfo, 9 | session: Session, 10 | } 11 | 12 | impl Channel { 13 | pub fn new(stream: SharedTcpStream, peer_info: PeerInfo, session: Session) -> Self { 14 | Channel { 15 | stream: stream, 16 | peer_info: peer_info, 17 | session: session, 18 | } 19 | } 20 | 21 | pub fn write_message(&self, message: T) -> WriteAll where T: AsRef<[u8]> { 22 | write_all(self.stream.clone(), message) 23 | } 24 | 25 | pub fn read_message(&self) -> ReadAnyMessage { 26 | read_any_message(self.stream.clone(), self.peer_info.magic) 27 | } 28 | 29 | pub fn shutdown(&self) { 30 | self.stream.shutdown(); 31 | } 32 | 33 | pub fn version(&self) -> u32 { 34 | self.peer_info.version 35 | } 36 | 37 | pub fn peer_info(&self) -> PeerInfo { 38 | self.peer_info.clone() 39 | } 40 | 41 | pub fn session(&self) -> &Session { 42 | &self.session 43 | } 44 | } 45 | -------------------------------------------------------------------------------- /p2p/src/net/config.rs: -------------------------------------------------------------------------------- 1 | use std::net::SocketAddr; 2 | use network::Magic; 3 | use message::common::{Services, NetAddress}; 4 | use message::types::version::{Version, V0, V106, V70001}; 5 | use util::time::{Time, RealTime}; 6 | use util::nonce::{NonceGenerator, RandomNonce}; 7 | 8 | #[derive(Debug, Clone)] 9 | pub struct Config { 10 | pub protocol_version: u32, 11 | pub protocol_minimum: u32, 12 | pub magic: Magic, 13 | pub local_address: SocketAddr, 14 | pub services: Services, 15 | pub user_agent: String, 16 | pub start_height: i32, 17 | pub relay: bool, 18 | } 19 | 20 | impl Config { 21 | pub fn version(&self, to: &SocketAddr) -> Version { 22 | Version::V70001(V0 { 23 | version: self.protocol_version, 24 | services: self.services, 25 | timestamp: RealTime.get().sec, 26 | receiver: NetAddress { 27 | services: self.services, 28 | address: to.ip().into(), 29 | port: to.port().into(), 30 | }, 31 | }, V106 { 32 | from: NetAddress { 33 | services: self.services, 34 | address: self.local_address.ip().into(), 35 | port: self.local_address.port().into(), 36 | }, 37 | nonce: RandomNonce.get(), 38 | user_agent: self.user_agent.clone(), 39 | start_height: self.start_height, 40 | }, V70001 { 41 | relay: self.relay, 42 | }) 43 | } 44 | } 45 | -------------------------------------------------------------------------------- /p2p/src/net/connection.rs: -------------------------------------------------------------------------------- 1 | use std::net; 2 | use network::Magic; 3 | use message::common::Services; 4 | use message::types; 5 | use io::SharedTcpStream; 6 | 7 | pub struct Connection { 8 | pub stream: SharedTcpStream, 9 | pub version: u32, 10 | pub version_message: types::Version, 11 | pub magic: Magic, 12 | pub services: Services, 13 | pub address: net::SocketAddr, 14 | } 15 | -------------------------------------------------------------------------------- /p2p/src/net/mod.rs: -------------------------------------------------------------------------------- 1 | mod accept_connection; 2 | mod channel; 3 | mod config; 4 | mod connect; 5 | mod connection; 6 | mod connection_counter; 7 | mod connections; 8 | mod peer_context; 9 | mod stats; 10 | 11 | pub use self::accept_connection::{AcceptConnection, accept_connection}; 12 | pub use self::channel::Channel; 13 | pub use self::config::Config; 14 | pub use self::connect::{Connect, connect}; 15 | pub use self::connection::Connection; 16 | pub use self::connection_counter::ConnectionCounter; 17 | pub use self::connections::Connections; 18 | pub use self::peer_context::PeerContext; 19 | pub use self::stats::PeerStats; 20 | -------------------------------------------------------------------------------- /p2p/src/protocol/mod.rs: -------------------------------------------------------------------------------- 1 | mod addr; 2 | mod ping; 3 | mod sync; 4 | 5 | use bytes::Bytes; 6 | use message::Error; 7 | use message::common::Command; 8 | 9 | pub use self::addr::{AddrProtocol, SeednodeProtocol}; 10 | pub use self::ping::PingProtocol; 11 | pub use self::sync::{SyncProtocol, InboundSyncConnection, InboundSyncConnectionRef, OutboundSyncConnection, OutboundSyncConnectionRef, LocalSyncNode, LocalSyncNodeRef}; 12 | 13 | pub trait Protocol: Send { 14 | /// Initialize the protocol. 15 | fn initialize(&mut self) {} 16 | 17 | /// Maintain the protocol. 18 | fn maintain(&mut self) {} 19 | 20 | /// Handle the message. 21 | fn on_message(&mut self, command: &Command, payload: &Bytes) -> Result<(), Error>; 22 | 23 | /// On disconnect. 24 | fn on_close(&mut self) {} 25 | 26 | /// Boxes the protocol. 27 | fn boxed(self) -> Box where Self: Sized + 'static { 28 | Box::new(self) 29 | } 30 | } 31 | -------------------------------------------------------------------------------- /p2p/src/util/internet_protocol.rs: -------------------------------------------------------------------------------- 1 | use std::{str, net}; 2 | 3 | #[derive(Debug, PartialEq, Clone, Copy)] 4 | pub enum InternetProtocol { 5 | Any, 6 | IpV4, 7 | IpV6, 8 | } 9 | 10 | impl Default for InternetProtocol { 11 | fn default() -> Self { 12 | InternetProtocol::Any 13 | } 14 | } 15 | 16 | impl str::FromStr for InternetProtocol { 17 | type Err = &'static str; 18 | 19 | fn from_str(s: &str) -> Result { 20 | match s { 21 | "ipv4" => Ok(InternetProtocol::IpV4), 22 | "ipv6" => Ok(InternetProtocol::IpV6), 23 | _ => Err("Invalid internet protocol"), 24 | } 25 | } 26 | } 27 | 28 | impl InternetProtocol { 29 | pub fn is_allowed(&self, addr: &net::SocketAddr) -> bool { 30 | match *self { 31 | InternetProtocol::Any => true, 32 | InternetProtocol::IpV4 => match *addr { 33 | net::SocketAddr::V4(_) => true, 34 | _ => false, 35 | }, 36 | InternetProtocol::IpV6 => match *addr { 37 | net::SocketAddr::V6(_) => true, 38 | _ => false, 39 | } 40 | } 41 | } 42 | } 43 | 44 | #[cfg(test)] 45 | mod tests { 46 | use super::InternetProtocol; 47 | 48 | #[test] 49 | fn test_default_internet_protocol() { 50 | assert_eq!(InternetProtocol::default(), InternetProtocol::Any); 51 | } 52 | 53 | #[test] 54 | fn test_parsing_internet_protocol() { 55 | assert_eq!(InternetProtocol::IpV4, "ipv4".parse().unwrap()); 56 | assert_eq!(InternetProtocol::IpV6, "ipv6".parse().unwrap()); 57 | assert!("sa".parse::().is_err()); 58 | } 59 | } 60 | -------------------------------------------------------------------------------- /p2p/src/util/interval.rs: -------------------------------------------------------------------------------- 1 | use std::time::{Instant, Duration}; 2 | 3 | pub trait Interval : Default { 4 | fn now(&self) -> Instant { 5 | Instant::now() 6 | } 7 | 8 | fn elapsed(&self, instant: Instant) -> Duration { 9 | instant.elapsed() 10 | } 11 | } 12 | 13 | #[derive(Default)] 14 | pub struct RealInterval; 15 | 16 | impl Interval for RealInterval { } 17 | 18 | #[derive(Default)] 19 | #[cfg(test)] 20 | pub struct FixedIntervalSpawner { 21 | step_millis: u64, 22 | } 23 | 24 | #[cfg(test)] 25 | impl FixedIntervalSpawner { 26 | pub fn new(step_millis: u64) -> Self { 27 | FixedIntervalSpawner { step_millis : step_millis } 28 | } 29 | } 30 | 31 | #[cfg(test)] 32 | impl Interval for FixedIntervalSpawner { 33 | fn now(&self) -> Instant { 34 | Instant::now() 35 | } 36 | 37 | fn elapsed(&self, instant: Instant) -> Duration { 38 | (instant - Duration::from_millis(self.step_millis)).elapsed() 39 | } 40 | } 41 | -------------------------------------------------------------------------------- /p2p/src/util/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod nonce; 2 | pub mod time; 3 | pub mod interval; 4 | mod internet_protocol; 5 | mod node_table; 6 | mod peer; 7 | mod response_queue; 8 | mod synchronizer; 9 | 10 | pub use self::internet_protocol::InternetProtocol; 11 | pub use self::node_table::{NodeTable, NodeTableError, Node}; 12 | pub use self::peer::{PeerId, PeerInfo, Direction}; 13 | pub use self::response_queue::{ResponseQueue, Responses}; 14 | pub use self::synchronizer::{Synchronizer, ConfigurableSynchronizer}; 15 | -------------------------------------------------------------------------------- /p2p/src/util/nonce.rs: -------------------------------------------------------------------------------- 1 | use rand; 2 | 3 | pub trait NonceGenerator { 4 | fn get(&self) -> u64; 5 | } 6 | 7 | #[derive(Default)] 8 | pub struct RandomNonce; 9 | 10 | impl NonceGenerator for RandomNonce { 11 | fn get(&self) -> u64 { 12 | rand::random() 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /p2p/src/util/peer.rs: -------------------------------------------------------------------------------- 1 | use std::net::SocketAddr; 2 | use message::types; 3 | use network::Magic; 4 | 5 | pub type PeerId = usize; 6 | 7 | #[derive(Debug, PartialEq, Eq, Clone, Copy)] 8 | pub enum Direction { 9 | Inbound, 10 | Outbound, 11 | } 12 | 13 | #[derive(Debug, PartialEq, Clone)] 14 | pub struct PeerInfo { 15 | pub id: PeerId, 16 | pub address: SocketAddr, 17 | pub user_agent: String, 18 | pub direction: Direction, 19 | pub version: u32, 20 | pub version_message: types::Version, 21 | pub magic: Magic, 22 | } 23 | 24 | -------------------------------------------------------------------------------- /p2p/src/util/response_queue.rs: -------------------------------------------------------------------------------- 1 | use std::collections::{HashMap, HashSet}; 2 | use bytes::Bytes; 3 | 4 | /// Queue of out-of-order responses. Each peer has it's own queue. 5 | #[derive(Debug, Default)] 6 | pub struct ResponseQueue { 7 | unfinished: HashMap>, 8 | finished: HashMap>, 9 | ignored: HashSet, 10 | } 11 | 12 | pub enum Responses { 13 | Unfinished(Vec), 14 | Finished(Vec), 15 | Ignored, 16 | } 17 | 18 | impl ResponseQueue { 19 | pub fn push_unfinished_response(&mut self, id: u32, response: Bytes) { 20 | self.unfinished.entry(id).or_insert_with(Vec::new).push(response) 21 | } 22 | 23 | pub fn push_finished_response(&mut self, id: u32, response: Bytes) { 24 | let mut responses = self.unfinished.remove(&id).unwrap_or_default(); 25 | responses.push(response); 26 | let previous = self.finished.insert(id, responses); 27 | assert!(previous.is_none(), "logic error; same finished response should never be pushed twice"); 28 | } 29 | 30 | pub fn push_ignored_response(&mut self, id: u32) { 31 | assert!(self.ignored.insert(id), "logic error; same response should never be ignored twice"); 32 | } 33 | 34 | pub fn responses(&mut self, id: u32) -> Option { 35 | self.unfinished.remove(&id).map(Responses::Unfinished) 36 | .or_else(|| self.finished.remove(&id).map(Responses::Finished)) 37 | .or_else(|| { 38 | if self.ignored.remove(&id) { 39 | Some(Responses::Ignored) 40 | } else { 41 | None 42 | } 43 | }) 44 | } 45 | } 46 | -------------------------------------------------------------------------------- /p2p/src/util/time.rs: -------------------------------------------------------------------------------- 1 | use std::cell::Cell; 2 | use time; 3 | 4 | pub trait Time { 5 | fn get(&self) -> time::Timespec; 6 | } 7 | 8 | #[derive(Default, Debug)] 9 | pub struct RealTime; 10 | 11 | impl Time for RealTime { 12 | fn get(&self) -> time::Timespec { 13 | time::get_time() 14 | } 15 | } 16 | 17 | #[derive(Default)] 18 | pub struct IncrementalTime { 19 | counter: Cell, 20 | } 21 | 22 | impl Time for IncrementalTime { 23 | fn get(&self) -> time::Timespec { 24 | let c = self.counter.get(); 25 | let result = time::Timespec::new(c, 0); 26 | self.counter.set(c + 1); 27 | result 28 | } 29 | } 30 | 31 | #[derive(Default)] 32 | pub struct ZeroTime { 33 | } 34 | 35 | impl Time for ZeroTime { 36 | fn get(&self) -> time::Timespec { 37 | time::Timespec::new(0, 0) 38 | } 39 | } 40 | -------------------------------------------------------------------------------- /pbtc/commands/import.rs: -------------------------------------------------------------------------------- 1 | use clap::ArgMatches; 2 | use sync::{create_sync_blocks_writer, Error}; 3 | use config::Config; 4 | use util::init_db; 5 | 6 | pub fn import(cfg: Config, matches: &ArgMatches) -> Result<(), String> { 7 | try!(init_db(&cfg)); 8 | 9 | let blk_path = matches.value_of("PATH").expect("PATH is required in cli.yml; qed"); 10 | 11 | let mut writer = create_sync_blocks_writer(cfg.db, cfg.consensus, cfg.verification_params); 12 | 13 | let blk_dir = try!(::import::open_blk_dir(blk_path).map_err(|_| "Import directory does not exist".to_owned())); 14 | let mut counter = 0; 15 | for blk in blk_dir { 16 | // TODO: verify magic! 17 | let blk = try!(blk.map_err(|_| "Cannot read block".to_owned())); 18 | match writer.append_block(blk.block) { 19 | Ok(_) => { 20 | counter += 1; 21 | if counter % 1000 == 0 { 22 | info!(target: "sync", "Imported {} blocks", counter); 23 | } 24 | } 25 | Err(Error::TooManyOrphanBlocks) => return Err("Too many orphan (unordered) blocks".into()), 26 | Err(_) => return Err("Cannot append block".into()), 27 | } 28 | } 29 | 30 | info!("Finished import of {} blocks", counter); 31 | 32 | Ok(()) 33 | } 34 | -------------------------------------------------------------------------------- /pbtc/commands/mod.rs: -------------------------------------------------------------------------------- 1 | mod import; 2 | mod start; 3 | mod rollback; 4 | 5 | pub use self::import::import; 6 | pub use self::start::start; 7 | pub use self::rollback::rollback; -------------------------------------------------------------------------------- /pbtc/commands/rollback.rs: -------------------------------------------------------------------------------- 1 | use clap::ArgMatches; 2 | use storage::BlockRef; 3 | use config::Config; 4 | use primitives::hash::H256; 5 | use util::init_db; 6 | 7 | pub fn rollback(cfg: Config, matches: &ArgMatches) -> Result<(), String> { 8 | try!(init_db(&cfg)); 9 | 10 | let block_ref = matches.value_of("BLOCK").expect("BLOCK is required in cli.yml; qed"); 11 | let block_ref = if block_ref.len() == 64 { 12 | BlockRef::Hash({ 13 | let hash: H256 = block_ref.parse().map_err(|e| format!("Invalid block number: {}", e))?; 14 | hash.reversed() 15 | }) 16 | } else { 17 | BlockRef::Number(block_ref.parse().map_err(|e| format!("Invalid block hash: {}", e))?) 18 | }; 19 | 20 | let required_block_hash = cfg.db.block_header(block_ref.clone()).ok_or(format!("Block {:?} is unknown", block_ref))?.hash(); 21 | let genesis_hash = cfg.network.genesis_block().hash(); 22 | 23 | let mut best_block_hash = cfg.db.best_block().hash; 24 | debug_assert!(best_block_hash != H256::default()); // genesis inserted in init_db 25 | 26 | loop { 27 | if best_block_hash == required_block_hash { 28 | info!("Reverted to block {:?}", block_ref); 29 | return Ok(()); 30 | } 31 | 32 | if best_block_hash == genesis_hash { 33 | return Err(format!("Failed to revert to block {:?}. Reverted to genesis", block_ref)); 34 | } 35 | 36 | best_block_hash = cfg.db.rollback_best().map_err(|e| format!("{:?}", e))?; 37 | } 38 | } 39 | -------------------------------------------------------------------------------- /pbtc/main.rs: -------------------------------------------------------------------------------- 1 | //! Parity bitcoin client. 2 | 3 | #[macro_use] 4 | extern crate clap; 5 | #[macro_use] 6 | extern crate log; 7 | extern crate env_logger; 8 | extern crate app_dirs; 9 | extern crate libc; 10 | 11 | extern crate storage; 12 | extern crate db; 13 | extern crate chain; 14 | extern crate keys; 15 | extern crate logs; 16 | extern crate script; 17 | extern crate message; 18 | extern crate network; 19 | extern crate p2p; 20 | extern crate sync; 21 | extern crate import; 22 | extern crate rpc as ethcore_rpc; 23 | extern crate primitives; 24 | extern crate verification; 25 | 26 | mod commands; 27 | mod config; 28 | mod seednodes; 29 | mod util; 30 | mod rpc; 31 | mod rpc_apis; 32 | 33 | use app_dirs::AppInfo; 34 | 35 | pub const APP_INFO: AppInfo = AppInfo { name: "pbtc", author: "Parity" }; 36 | pub const PROTOCOL_VERSION: u32 = 70_014; 37 | pub const PROTOCOL_MINIMUM: u32 = 70_001; 38 | pub const USER_AGENT: &'static str = "pbtc"; 39 | pub const REGTEST_USER_AGENT: &'static str = "/Satoshi:0.12.1/"; 40 | pub const LOG_INFO: &'static str = "sync=info"; 41 | 42 | fn main() { 43 | // Always print backtrace on panic. 44 | ::std::env::set_var("RUST_BACKTRACE", "1"); 45 | 46 | if let Err(err) = run() { 47 | println!("{}", err); 48 | } 49 | } 50 | 51 | fn run() -> Result<(), String> { 52 | let yaml = load_yaml!("cli.yml"); 53 | let matches = clap::App::from_yaml(yaml).get_matches(); 54 | let cfg = try!(config::parse(&matches)); 55 | 56 | if !cfg.quiet { 57 | if cfg!(windows) { 58 | logs::init(LOG_INFO, logs::DateLogFormatter); 59 | } else { 60 | logs::init(LOG_INFO, logs::DateAndColorLogFormatter); 61 | } 62 | } else { 63 | env_logger::init(); 64 | } 65 | 66 | match matches.subcommand() { 67 | ("import", Some(import_matches)) => commands::import(cfg, import_matches), 68 | ("rollback", Some(rollback_matches)) => commands::rollback(cfg, rollback_matches), 69 | _ => commands::start(cfg), 70 | } 71 | } 72 | -------------------------------------------------------------------------------- /pbtc/rpc.rs: -------------------------------------------------------------------------------- 1 | use std::net::SocketAddr; 2 | use std::sync::Arc; 3 | use rpc_apis::{self, ApiSet}; 4 | use ethcore_rpc::{Server, start_http, MetaIoHandler, Compatibility, Remote}; 5 | use network::Network; 6 | use std::io; 7 | use sync; 8 | use storage; 9 | use p2p; 10 | 11 | pub struct Dependencies { 12 | pub network: Network, 13 | pub local_sync_node: sync::LocalNodeRef, 14 | pub storage: storage::SharedStore, 15 | pub p2p_context: Arc, 16 | pub remote: Remote, 17 | } 18 | 19 | #[derive(Debug, PartialEq)] 20 | pub struct HttpConfiguration { 21 | pub enabled: bool, 22 | pub interface: String, 23 | pub port: u16, 24 | pub apis: ApiSet, 25 | pub cors: Option>, 26 | pub hosts: Option>, 27 | } 28 | 29 | impl HttpConfiguration { 30 | pub fn with_port(port: u16) -> Self { 31 | HttpConfiguration { 32 | enabled: true, 33 | interface: "127.0.0.1".into(), 34 | port: port, 35 | apis: ApiSet::default(), 36 | cors: None, 37 | hosts: Some(Vec::new()), 38 | } 39 | } 40 | } 41 | 42 | pub fn new_http(conf: HttpConfiguration, deps: Dependencies) -> Result, String> { 43 | if !conf.enabled { 44 | return Ok(None); 45 | } 46 | 47 | let url = format!("{}:{}", conf.interface, conf.port); 48 | let addr = try!(url.parse().map_err(|_| format!("Invalid JSONRPC listen host/port given: {}", url))); 49 | Ok(Some(try!(setup_http_rpc_server(&addr, conf.cors, conf.hosts, conf.apis, deps)))) 50 | } 51 | 52 | pub fn setup_http_rpc_server( 53 | url: &SocketAddr, 54 | cors_domains: Option>, 55 | allowed_hosts: Option>, 56 | apis: ApiSet, 57 | deps: Dependencies, 58 | ) -> Result { 59 | let server = setup_rpc_server(apis, deps); 60 | let start_result = start_http(url, cors_domains, allowed_hosts, server); 61 | match start_result { 62 | Err(ref err) if err.kind() == io::ErrorKind::AddrInUse => { 63 | Err(format!("RPC address {} is already in use, make sure that another instance of a Bitcoin node is not running or change the address using the --jsonrpc-port and --jsonrpc-interface options.", url)) 64 | }, 65 | Err(e) => Err(format!("RPC error: {:?}", e)), 66 | Ok(server) => Ok(server), 67 | } 68 | } 69 | 70 | fn setup_rpc_server(apis: ApiSet, deps: Dependencies) -> MetaIoHandler<()> { 71 | rpc_apis::setup_rpc(MetaIoHandler::with_compatibility(Compatibility::Both), apis, deps) 72 | } 73 | -------------------------------------------------------------------------------- /pbtc/rpc_apis.rs: -------------------------------------------------------------------------------- 1 | use std::str::FromStr; 2 | use std::collections::HashSet; 3 | use rpc::Dependencies; 4 | use ethcore_rpc::MetaIoHandler; 5 | 6 | #[derive(Debug, PartialEq, Eq, Hash, Copy, Clone)] 7 | pub enum Api { 8 | /// Raw methods 9 | Raw, 10 | /// Miner-related methods 11 | Miner, 12 | /// BlockChain-related methods 13 | BlockChain, 14 | /// Network 15 | Network, 16 | } 17 | 18 | #[derive(Debug, PartialEq, Eq)] 19 | pub enum ApiSet { 20 | List(HashSet), 21 | } 22 | 23 | impl Default for ApiSet { 24 | fn default() -> Self { 25 | ApiSet::List(vec![Api::Raw, Api::Miner, Api::BlockChain, Api::Network].into_iter().collect()) 26 | } 27 | } 28 | 29 | impl FromStr for Api { 30 | type Err = String; 31 | 32 | fn from_str(s: &str) -> Result { 33 | match s { 34 | "raw" => Ok(Api::Raw), 35 | "miner" => Ok(Api::Miner), 36 | "blockchain" => Ok(Api::BlockChain), 37 | "network" => Ok(Api::Network), 38 | api => Err(format!("Unknown api: {}", api)), 39 | } 40 | } 41 | } 42 | 43 | impl ApiSet { 44 | pub fn list_apis(&self) -> HashSet { 45 | match *self { 46 | ApiSet::List(ref apis) => apis.clone(), 47 | } 48 | } 49 | } 50 | 51 | pub fn setup_rpc(mut handler: MetaIoHandler<()>, apis: ApiSet, deps: Dependencies) -> MetaIoHandler<()> { 52 | use ethcore_rpc::v1::*; 53 | 54 | for api in apis.list_apis() { 55 | match api { 56 | Api::Raw => handler.extend_with(RawClient::new(RawClientCore::new(deps.local_sync_node.clone())).to_delegate()), 57 | Api::Miner => handler.extend_with(MinerClient::new(MinerClientCore::new(deps.local_sync_node.clone())).to_delegate()), 58 | Api::BlockChain => handler.extend_with(BlockChainClient::new(BlockChainClientCore::new(deps.network, deps.storage.clone())).to_delegate()), 59 | Api::Network => handler.extend_with(NetworkClient::new(NetworkClientCore::new(deps.p2p_context.clone())).to_delegate()), 60 | } 61 | } 62 | 63 | handler 64 | } 65 | -------------------------------------------------------------------------------- /pbtc/seednodes.rs: -------------------------------------------------------------------------------- 1 | 2 | pub fn mainnet_seednodes() -> Vec<&'static str> { 3 | vec![ 4 | // Pieter Wuille 5 | "seed.bitcoin.sipa.be:8333", 6 | // Matt Corallo 7 | "dnsseed.bluematt.me:8333", 8 | // Luke Dashjr 9 | "dnsseed.bitcoin.dashjr.org:8333", 10 | // Christian Decker 11 | "seed.bitcoinstats.com:8333", 12 | // Jonas Schnelli 13 | "seed.bitcoin.jonasschnelli.ch:8333", 14 | // Peter Todd 15 | "seed.btc.petertodd.org:8333", 16 | // 17 | "seed.voskuil.org:8333", 18 | ] 19 | } 20 | 21 | pub fn testnet_seednodes() -> Vec<&'static str> { 22 | vec![ 23 | "testnet-seed.bitcoin.jonasschnelli.ch:18333", 24 | "seed.tbtc.petertodd.org:18333", 25 | "testnet-seed.bluematt.me:18333", 26 | "testnet-seed.bitcoin.schildbach.de:18333", 27 | "testnet-seed.voskuil.org:18333", 28 | ] 29 | } 30 | 31 | pub fn bitcoin_cash_seednodes() -> Vec<&'static str> { 32 | vec![ 33 | "seed.bitcoinabc.org:8333", 34 | "seed-abc.bitcoinforks.org:8333", 35 | "seed.bitprim.org:8333", 36 | "seed.deadalnix.me:8333", 37 | "seeder.criptolayer.net:8333" 38 | ] 39 | } 40 | 41 | pub fn bitcoin_cash_testnet_seednodes() -> Vec<&'static str> { 42 | vec![ 43 | "testnet-seed.bitcoinabc.org:18333", 44 | "testnet-seed-abc.bitcoinforks.org:18333", 45 | "testnet-seed.bitprim.org:18333", 46 | "testnet-seed.deadalnix.me:18333", 47 | "testnet-seeder.criptolayer.net:18333" 48 | ] 49 | } 50 | -------------------------------------------------------------------------------- /pbtc/util.rs: -------------------------------------------------------------------------------- 1 | use std::sync::Arc; 2 | use std::path::PathBuf; 3 | use std::fs::create_dir_all; 4 | use app_dirs::{app_dir, AppDataType}; 5 | use {storage, APP_INFO}; 6 | use db; 7 | use config::Config; 8 | use chain::IndexedBlock; 9 | 10 | pub fn open_db(data_dir: &Option, db_cache: usize) -> storage::SharedStore { 11 | let db_path = match *data_dir { 12 | Some(ref data_dir) => custom_path(&data_dir, "db"), 13 | None => app_dir(AppDataType::UserData, &APP_INFO, "db").expect("Failed to get app dir"), 14 | }; 15 | Arc::new(db::BlockChainDatabase::open_at_path(db_path, db_cache).expect("Failed to open database")) 16 | } 17 | 18 | pub fn node_table_path(cfg: &Config) -> PathBuf { 19 | let mut node_table = match cfg.data_dir { 20 | Some(ref data_dir) => custom_path(&data_dir, "p2p"), 21 | None => app_dir(AppDataType::UserData, &APP_INFO, "p2p").expect("Failed to get app dir"), 22 | }; 23 | node_table.push("nodes.csv"); 24 | node_table 25 | } 26 | 27 | pub fn init_db(cfg: &Config) -> Result<(), String> { 28 | // insert genesis block if db is empty 29 | let genesis_block: IndexedBlock = cfg.network.genesis_block().into(); 30 | match cfg.db.block_hash(0) { 31 | Some(ref db_genesis_block_hash) if db_genesis_block_hash != genesis_block.hash() => Err("Trying to open database with incompatible genesis block".into()), 32 | Some(_) => Ok(()), 33 | None => { 34 | let hash = genesis_block.hash().clone(); 35 | cfg.db.insert(genesis_block).expect("Failed to insert genesis block to the database"); 36 | cfg.db.canonize(&hash).expect("Failed to canonize genesis block"); 37 | Ok(()) 38 | } 39 | } 40 | } 41 | 42 | fn custom_path(data_dir: &str, sub_dir: &str) -> PathBuf { 43 | let mut path = PathBuf::from(data_dir); 44 | path.push(sub_dir); 45 | create_dir_all(&path).expect("Failed to get app dir"); 46 | path 47 | } 48 | -------------------------------------------------------------------------------- /primitives/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "primitives" 3 | version = "0.1.0" 4 | authors = ["debris chainpool & "] 5 | 6 | [dependencies] 7 | crunchy = "0.2" 8 | sr-std = { git = "https://github.com/chainx-org/sr-std", default-features = false } 9 | heapsize = { version = "0.4", optional = true } 10 | rustc-hex = { version = "2.0", default-features = false } 11 | byteorder = { version = "1", default-features = false } 12 | void = { version = "1", default-features = false } 13 | parity-codec = { version = "3.0", default-features = false } 14 | uint = { version = "0.5", default-features = false } 15 | 16 | [features] 17 | default = ["std"] 18 | std = [ 19 | "uint/std", 20 | "rustc-hex/std", 21 | "sr-std/std", 22 | "heapsize", 23 | "byteorder/std", 24 | "parity-codec/std", 25 | "void/std", 26 | ] 27 | -------------------------------------------------------------------------------- /primitives/src/lib.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2018 Chainpool 2 | #![cfg_attr(not(feature="std"), no_std)] 3 | 4 | #[allow(unused_imports)] 5 | #[macro_use] 6 | extern crate sr_std as rstd; 7 | 8 | #[cfg(feature="std")] 9 | extern crate core; 10 | extern crate byteorder; 11 | extern crate void; 12 | extern crate rustc_hex as hex; 13 | extern crate parity_codec as codec; 14 | #[cfg(feature="std")] 15 | #[macro_use] 16 | extern crate heapsize; 17 | #[macro_use] 18 | extern crate crunchy; 19 | #[macro_use] 20 | extern crate uint as uint_crate; 21 | 22 | pub mod bytes; 23 | pub mod compact; 24 | pub mod hash; 25 | pub mod io; 26 | mod u256; 27 | pub use u256::U256; 28 | pub use rstd::{borrow, marker}; 29 | -------------------------------------------------------------------------------- /primitives/src/u256.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2018 Chainpool 2 | 3 | use rstd::prelude::Vec; 4 | #[cfg(not(feature="std"))] 5 | use rstd::alloc::prelude::ToOwned; 6 | 7 | construct_uint!(U256, 4); 8 | 9 | impl ::codec::Encode for U256 { 10 | fn using_encoded R>(&self, f: F) -> R { 11 | let mut bytes = [0u8; 4 * 8]; 12 | self.to_little_endian(&mut bytes); 13 | bytes.using_encoded(f) 14 | } 15 | } 16 | 17 | impl ::codec::Decode for U256 { 18 | fn decode(input: &mut I) -> Option { 19 | <[u8; 4 * 8] as ::codec::Decode>::decode(input) 20 | .map(|b| U256::from_little_endian(&b)) 21 | } 22 | } 23 | 24 | -------------------------------------------------------------------------------- /rpc/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "rpc" 3 | version = "0.1.0" 4 | authors = ["Ethcore "] 5 | 6 | [lib] 7 | 8 | [dependencies] 9 | log = "0.4" 10 | hex = "0.3.1" 11 | serde = "1.0" 12 | serde_json = "1.0" 13 | serde_derive = "1.0" 14 | rustc-hex = "2" 15 | tokio-core = "0.1.1" 16 | jsonrpc-core = { git = "https://github.com/ethcore/jsonrpc.git" } 17 | jsonrpc-macros = { git = "https://github.com/ethcore/jsonrpc.git" } 18 | jsonrpc-http-server = { git = "https://github.com/ethcore/jsonrpc.git" } 19 | 20 | sync = { path = "../sync" } 21 | serialization = { path = "../serialization" } 22 | chain = { path = "../chain" } 23 | primitives = { path = "../primitives" } 24 | p2p = { path = "../p2p" } 25 | db = { path = "../db" } 26 | network = { path = "../network" } 27 | storage = { path = "../storage" } 28 | miner = { path = "../miner" } 29 | verification = { path = "../verification" } 30 | script = { path = "../script" } 31 | keys = { path = "../keys" } 32 | 33 | [dev-dependencies] 34 | test-data = { path = "../test-data" } 35 | -------------------------------------------------------------------------------- /rpc/src/lib.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2018 Chainpool 2 | 3 | #[macro_use] 4 | extern crate log; 5 | extern crate rustc_hex as hex; 6 | extern crate serde; 7 | extern crate serde_json; 8 | #[macro_use] 9 | extern crate serde_derive; 10 | extern crate jsonrpc_core; 11 | #[macro_use] 12 | extern crate jsonrpc_macros; 13 | extern crate jsonrpc_http_server; 14 | extern crate tokio_core; 15 | extern crate sync; 16 | extern crate chain; 17 | extern crate serialization as ser; 18 | extern crate primitives; 19 | extern crate p2p; 20 | extern crate network; 21 | extern crate storage; 22 | extern crate miner; 23 | extern crate verification; 24 | extern crate script as global_script; 25 | extern crate keys; 26 | extern crate hex as other_hex; 27 | extern crate db; 28 | 29 | pub mod v1; 30 | pub mod rpc_server; 31 | 32 | pub use jsonrpc_core::{MetaIoHandler, Compatibility, Error}; 33 | pub use tokio_core::reactor::{Remote}; 34 | 35 | pub use jsonrpc_http_server::Server; 36 | pub use rpc_server::start_http; 37 | -------------------------------------------------------------------------------- /rpc/src/rpc_server.rs: -------------------------------------------------------------------------------- 1 | // TODO: panic handler 2 | use std::io; 3 | use std::net::SocketAddr; 4 | use jsonrpc_core; 5 | use jsonrpc_http_server::{self, ServerBuilder, Server, Host}; 6 | 7 | /// Start http server asynchronously and returns result with `Server` handle on success or an error. 8 | pub fn start_http>( 9 | addr: &SocketAddr, 10 | cors_domains: Option>, 11 | allowed_hosts: Option>, 12 | handler: jsonrpc_core::MetaIoHandler, 13 | ) -> Result { 14 | 15 | let cors_domains = cors_domains.map(|domains| { 16 | domains.into_iter() 17 | .map(|v| match v.as_str() { 18 | "*" => jsonrpc_http_server::AccessControlAllowOrigin::Any, 19 | "null" => jsonrpc_http_server::AccessControlAllowOrigin::Null, 20 | v => jsonrpc_http_server::AccessControlAllowOrigin::Value(v.into()), 21 | }) 22 | .collect() 23 | }); 24 | 25 | ServerBuilder::new(handler) 26 | .cors(cors_domains.into()) 27 | .allowed_hosts(allowed_hosts.map(|hosts| hosts.into_iter().map(Host::from).collect()).into()) 28 | .start_http(addr) 29 | } 30 | -------------------------------------------------------------------------------- /rpc/src/v1/helpers/mod.rs: -------------------------------------------------------------------------------- 1 | #[macro_use] 2 | pub mod errors; 3 | -------------------------------------------------------------------------------- /rpc/src/v1/impls/mod.rs: -------------------------------------------------------------------------------- 1 | mod blockchain; 2 | mod miner; 3 | mod raw; 4 | mod network; 5 | 6 | pub use self::blockchain::{BlockChainClient, BlockChainClientCore}; 7 | pub use self::miner::{MinerClient, MinerClientCore}; 8 | pub use self::raw::{RawClient, RawClientCore, SimpleClientCore}; 9 | pub use self::network::{NetworkClient, NetworkClientCore}; 10 | -------------------------------------------------------------------------------- /rpc/src/v1/mod.rs: -------------------------------------------------------------------------------- 1 | #[macro_use] 2 | pub mod helpers; 3 | pub mod impls; 4 | pub mod traits; 5 | pub mod types; 6 | 7 | pub use self::traits::Raw; 8 | pub use self::traits::Miner; 9 | pub use self::traits::BlockChain; 10 | pub use self::traits::Network; 11 | pub use self::impls::{RawClient, RawClientCore, SimpleClientCore}; 12 | pub use self::impls::{MinerClient, MinerClientCore}; 13 | pub use self::impls::{BlockChainClient, BlockChainClientCore}; 14 | pub use self::impls::{NetworkClient, NetworkClientCore}; 15 | -------------------------------------------------------------------------------- /rpc/src/v1/traits/miner.rs: -------------------------------------------------------------------------------- 1 | use jsonrpc_core::Error; 2 | 3 | use v1::types::{BlockTemplate, BlockTemplateRequest}; 4 | 5 | build_rpc_trait! { 6 | /// Parity-bitcoin miner data interface. 7 | pub trait Miner { 8 | /// Get block template for mining. 9 | /// @curl-example: curl --data-binary '{"jsonrpc": "2.0", "method": "getblocktemplate", "params": [{"capabilities": ["coinbasetxn", "workid", "coinbase/append"]}], "id":1 }' -H 'content-type: application/json' http://127.0.0.1:8332/ 10 | #[rpc(name = "getblocktemplate")] 11 | fn get_block_template(&self, BlockTemplateRequest) -> Result; 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /rpc/src/v1/traits/mod.rs: -------------------------------------------------------------------------------- 1 | mod blockchain; 2 | mod miner; 3 | mod raw; 4 | mod network; 5 | 6 | pub use self::blockchain::BlockChain; 7 | pub use self::miner::Miner; 8 | pub use self::raw::Raw; 9 | pub use self::network::Network; 10 | -------------------------------------------------------------------------------- /rpc/src/v1/traits/network.rs: -------------------------------------------------------------------------------- 1 | use jsonrpc_core::Error; 2 | use jsonrpc_macros::Trailing; 3 | use v1::types::{AddNodeOperation, NodeInfo}; 4 | 5 | build_rpc_trait! { 6 | /// Parity-bitcoin network interface 7 | pub trait Network { 8 | /// Add/remove/connect to the node 9 | /// @curl-example: curl --data-binary '{"jsonrpc": "2.0", "method": "addnode", "params": ["127.0.0.1:8888", "add"], "id":1 }' -H 'content-type: application/json' http://127.0.0.1:8332/ 10 | /// @curl-example: curl --data-binary '{"jsonrpc": "2.0", "method": "addnode", "params": ["127.0.0.1:8888", "remove"], "id":1 }' -H 'content-type: application/json' http://127.0.0.1:8332/ 11 | /// @curl-example: curl --data-binary '{"jsonrpc": "2.0", "method": "addnode", "params": ["127.0.0.1:8888", "onetry"], "id":1 }' -H 'content-type: application/json' http://127.0.0.1:8332/ 12 | #[rpc(name = "addnode")] 13 | fn add_node(&self, String, AddNodeOperation) -> Result<(), Error>; 14 | /// Query node(s) info 15 | /// @curl-example: curl --data-binary '{"jsonrpc": "2.0", "id":"1", "method": "getaddednodeinfo", "params": [true] }' -H 'content-type: application/json' http://127.0.0.1:8332/ 16 | /// @curl-example: curl --data-binary '{"jsonrpc": "2.0", "id":"1", "method": "getaddednodeinfo", "params": [true, "192.168.0.201"] }' -H 'content-type: application/json' http://127.0.0.1:8332/ 17 | #[rpc(name = "getaddednodeinfo")] 18 | fn node_info(&self, bool, Trailing) -> Result, Error>; 19 | /// Query node(s) info 20 | /// @curl-example: curl --data-binary '{"jsonrpc": "2.0", "id":"1", "method": "getconnectioncount", "params": [] }' -H 'content-type: application/json' http://127.0.0.1:8332/ 21 | #[rpc(name = "getconnectioncount")] 22 | fn connection_count(&self) -> Result; 23 | } 24 | } 25 | -------------------------------------------------------------------------------- /rpc/src/v1/types/address.rs: -------------------------------------------------------------------------------- 1 | use std::fmt; 2 | use serde::{Serialize, Serializer, Deserializer}; 3 | use serde::de::{Visitor, Unexpected}; 4 | use keys::Address; 5 | 6 | pub fn serialize(address: &Address, serializer: S) -> Result where S: Serializer { 7 | address.to_string().serialize(serializer) 8 | } 9 | 10 | pub fn deserialize<'a, D>(deserializer: D) -> Result where D: Deserializer<'a> { 11 | deserializer.deserialize_any(AddressVisitor) 12 | } 13 | 14 | #[derive(Default)] 15 | pub struct AddressVisitor; 16 | 17 | impl<'b> Visitor<'b> for AddressVisitor { 18 | type Value = Address; 19 | 20 | fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { 21 | formatter.write_str("an address") 22 | } 23 | 24 | fn visit_str(self, value: &str) -> Result where E: ::serde::de::Error { 25 | value.parse().map_err(|_| E::invalid_value(Unexpected::Str(value), &self)) 26 | } 27 | } 28 | 29 | pub mod vec { 30 | use serde::{Serialize, Serializer, Deserializer, Deserialize}; 31 | use serde::de::Visitor; 32 | use keys::Address; 33 | use super::AddressVisitor; 34 | 35 | pub fn serialize(addresses: &Vec

, serializer: S) -> Result where S: Serializer { 36 | addresses.iter().map(|address| address.to_string()).collect::>().serialize(serializer) 37 | } 38 | 39 | pub fn deserialize<'a, D>(deserializer: D) -> Result, D::Error> where D: Deserializer<'a> { 40 | as Deserialize>::deserialize(deserializer)? 41 | .into_iter() 42 | .map(|value| AddressVisitor::default().visit_str(value)) 43 | .collect() 44 | } 45 | } 46 | 47 | #[cfg(test)] 48 | mod tests { 49 | use serde_json; 50 | use keys::Address; 51 | use v1::types; 52 | 53 | #[derive(Debug, PartialEq, Serialize, Deserialize)] 54 | struct TestStruct { 55 | #[serde(with = "types::address")] 56 | address: Address, 57 | } 58 | 59 | impl TestStruct { 60 | fn new(address: Address) -> Self { 61 | TestStruct { 62 | address: address, 63 | } 64 | } 65 | } 66 | 67 | #[test] 68 | fn address_serialize() { 69 | let test = TestStruct::new("1A1zP1eP5QGefi2DMPTfTL5SLmv7DivfNa".into()); 70 | assert_eq!(serde_json::to_string(&test).unwrap(), r#"{"address":"1A1zP1eP5QGefi2DMPTfTL5SLmv7DivfNa"}"#); 71 | } 72 | 73 | #[test] 74 | fn address_deserialize() { 75 | let test = TestStruct::new("1A1zP1eP5QGefi2DMPTfTL5SLmv7DivfNa".into()); 76 | assert_eq!(serde_json::from_str::(r#"{"address":"1A1zP1eP5QGefi2DMPTfTL5SLmv7DivfNa"}"#).unwrap(), test); 77 | } 78 | } 79 | -------------------------------------------------------------------------------- /rpc/src/v1/types/block.rs: -------------------------------------------------------------------------------- 1 | use super::bytes::Bytes; 2 | 3 | /// Hex-encoded block 4 | pub type RawBlock = Bytes; 5 | -------------------------------------------------------------------------------- /rpc/src/v1/types/get_tx_out_set_info_response.rs: -------------------------------------------------------------------------------- 1 | #[derive(Debug, Serialize, Deserialize)] 2 | pub struct GetTxOutSetInfoResponse { 3 | } 4 | -------------------------------------------------------------------------------- /rpc/src/v1/types/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod address; 2 | mod block; 3 | mod block_template; 4 | mod block_template_request; 5 | mod bytes; 6 | mod get_block_response; 7 | mod get_tx_out_response; 8 | mod get_tx_out_set_info_response; 9 | mod hash; 10 | mod script; 11 | mod transaction; 12 | mod uint; 13 | mod nodes; 14 | 15 | pub use self::block::RawBlock; 16 | pub use self::block_template::{BlockTemplate, BlockTemplateTransaction}; 17 | pub use self::block_template_request::{BlockTemplateRequest, BlockTemplateRequestMode}; 18 | pub use self::bytes::Bytes; 19 | pub use self::get_block_response::{GetBlockResponse, VerboseBlock}; 20 | pub use self::get_tx_out_response::GetTxOutResponse; 21 | pub use self::get_tx_out_set_info_response::GetTxOutSetInfoResponse; 22 | pub use self::hash::{H160, H256}; 23 | pub use self::script::ScriptType; 24 | pub use self::transaction::{RawTransaction, Transaction, TransactionInput, TransactionOutput, 25 | TransactionOutputWithAddress, TransactionOutputWithScriptData, TransactionInputScript, 26 | TransactionOutputScript, SignedTransactionInput, GetRawTransactionResponse, 27 | SignedTransactionOutput, TransactionOutputs}; 28 | pub use self::uint::U256; 29 | pub use self::nodes::{AddNodeOperation, NodeInfo}; 30 | -------------------------------------------------------------------------------- /rpc/src/v1/types/nodes.rs: -------------------------------------------------------------------------------- 1 | use std::fmt; 2 | use serde::{Serialize, Serializer, Deserialize, Deserializer}; 3 | use serde::de::Unexpected; 4 | use p2p::{Direction, PeerInfo}; 5 | 6 | #[derive(Debug, PartialEq)] 7 | pub enum AddNodeOperation { 8 | Add, 9 | Remove, 10 | OneTry, 11 | } 12 | 13 | impl<'a> Deserialize<'a> for AddNodeOperation { 14 | fn deserialize(deserializer: D) -> Result where D: Deserializer<'a> { 15 | use serde::de::Visitor; 16 | 17 | struct DummyVisitor; 18 | 19 | impl<'b> Visitor<'b> for DummyVisitor { 20 | type Value = AddNodeOperation; 21 | 22 | fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { 23 | formatter.write_str("a node operation string") 24 | } 25 | 26 | fn visit_str(self, value: &str) -> Result where E: ::serde::de::Error { 27 | match value { 28 | "add" => Ok(AddNodeOperation::Add), 29 | "remove" => Ok(AddNodeOperation::Remove), 30 | "onetry" => Ok(AddNodeOperation::OneTry), 31 | _ => Err(E::invalid_value(Unexpected::Str(value), &self)), 32 | } 33 | } 34 | } 35 | 36 | deserializer.deserialize_identifier(DummyVisitor) 37 | } 38 | } 39 | 40 | #[derive(Serialize)] 41 | pub struct NodeInfoAddress { 42 | address: String, 43 | connected: NodeInfoAddressConnectionType, 44 | } 45 | 46 | impl From for NodeInfoAddress { 47 | fn from(info: PeerInfo) -> Self { 48 | NodeInfoAddress { 49 | address: format!("{}", info.address), 50 | connected: match info.direction { 51 | Direction::Inbound => NodeInfoAddressConnectionType::Inbound, 52 | Direction::Outbound => NodeInfoAddressConnectionType::Outbound, 53 | }, 54 | } 55 | } 56 | } 57 | 58 | #[derive(Serialize)] 59 | pub struct NodeInfo { 60 | pub addednode: String, 61 | pub connected: bool, 62 | pub addresses: Vec, 63 | } 64 | 65 | pub enum NodeInfoAddressConnectionType { 66 | Inbound, 67 | Outbound, 68 | } 69 | 70 | impl Serialize for NodeInfoAddressConnectionType { 71 | fn serialize(&self, serializer: S) -> Result where S: Serializer { 72 | match *self { 73 | NodeInfoAddressConnectionType::Inbound => "inbound".serialize(serializer), 74 | NodeInfoAddressConnectionType::Outbound => "outbound".serialize(serializer), 75 | } 76 | } 77 | } 78 | -------------------------------------------------------------------------------- /script/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "script" 3 | version = "0.1.0" 4 | authors = ["debris "] 5 | 6 | [dependencies] 7 | sr-std = { git = "https://github.com/chainx-org/sr-std", default-features = false } 8 | bitcrypto = { path = "../crypto", default-features = false } 9 | chain = { path = "../chain", default-features = false } 10 | keys = { path = "../keys", default-features = false } 11 | primitives = { path = "../primitives", default-features = false } 12 | serialization = { path = "../serialization", default-features = false } 13 | 14 | [features] 15 | default = ["std"] 16 | std = [ 17 | "bitcrypto/std", 18 | "chain/std", 19 | "keys/std", 20 | "primitives/std", 21 | "serialization/std", 22 | ] 23 | -------------------------------------------------------------------------------- /script/src/lib.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2018 Chainpool 2 | 3 | #![cfg_attr(not(feature = "std"), no_std)] 4 | 5 | extern crate bitcrypto as crypto; 6 | extern crate chain; 7 | extern crate keys; 8 | extern crate primitives; 9 | extern crate serialization as ser; 10 | extern crate sr_std as rstd; 11 | 12 | pub mod builder; 13 | mod error; 14 | mod flags; 15 | mod interpreter; 16 | mod num; 17 | mod opcode; 18 | pub mod script; 19 | mod sign; 20 | mod stack; 21 | mod verify; 22 | 23 | pub use primitives::{bytes, hash}; 24 | 25 | pub use self::builder::Builder; 26 | pub use self::error::Error; 27 | pub use self::flags::VerificationFlags; 28 | pub use self::interpreter::{eval_script, verify_script}; 29 | pub use self::opcode::Opcode; 30 | pub use self::num::Num; 31 | pub use self::script::{Script, ScriptType, ScriptAddress, ScriptWitness, is_witness_commitment_script}; 32 | pub use self::sign::{TransactionInputSigner, UnsignedTransactionInput, SignatureVersion}; 33 | pub use self::stack::Stack; 34 | pub use self::verify::{SignatureChecker, NoopSignatureChecker, TransactionSignatureChecker}; 35 | 36 | -------------------------------------------------------------------------------- /serialization/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "serialization" 3 | version = "0.1.0" 4 | authors = ["debris & chainpool "] 5 | 6 | [dependencies] 7 | byteorder = { version = "1", default-features = false } 8 | primitives = { path = "../primitives", default-features = false } 9 | sr-std = { git = "https://github.com/chainx-org/sr-std", default-features = false } 10 | 11 | [features] 12 | default = ["std"] 13 | std = [ "byteorder/std", "primitives/std", "sr-std/std"] 14 | -------------------------------------------------------------------------------- /serialization/src/lib.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2018 Chainpool 2 | 3 | #![cfg_attr(not(feature = "std"), no_std)] 4 | 5 | extern crate byteorder; 6 | extern crate primitives; 7 | #[allow(unused_imports)] 8 | #[macro_use] 9 | extern crate sr_std as rstd; 10 | 11 | mod compact_integer; 12 | mod impls; 13 | mod list; 14 | mod reader; 15 | mod stream; 16 | 17 | pub use primitives::{hash, bytes, compact, io::Error}; 18 | 19 | pub use compact_integer::CompactInteger; 20 | pub use list::List; 21 | pub use reader::{Reader, Deserializable, deserialize, deserialize_iterator, ReadIterator}; 22 | pub use stream::{ 23 | Stream, Serializable, serialize, serialize_with_flags, serialize_list, serialized_list_size, 24 | serialized_list_size_with_flags, SERIALIZE_TRANSACTION_WITNESS, 25 | }; 26 | 27 | -------------------------------------------------------------------------------- /serialization/src/list.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2018 Chainpool 2 | 3 | use primitives::io; 4 | use rstd::prelude::Vec; 5 | use primitives::io::Error; 6 | use {Serializable, Deserializable, Reader, Stream}; 7 | 8 | #[derive(Debug, Clone)] 9 | pub struct List(Vec); 10 | 11 | impl List where T: Serializable + Deserializable { 12 | pub fn from(vec: Vec) -> Self { 13 | List(vec) 14 | } 15 | 16 | pub fn into(self) -> Vec { 17 | self.0 18 | } 19 | } 20 | 21 | impl Serializable for List where S: Serializable { 22 | fn serialize(&self, s: &mut Stream) { 23 | s.append_list(&self.0); 24 | } 25 | } 26 | 27 | impl Deserializable for List where D: Deserializable { 28 | fn deserialize(reader: &mut Reader) -> Result where T: io::Read { 29 | reader.read_list().map(List) 30 | } 31 | } 32 | -------------------------------------------------------------------------------- /serialization_derive/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "serialization_derive" 3 | version = "0.1.0" 4 | authors = ["debris "] 5 | 6 | [lib] 7 | name = "serialization_derive" 8 | proc-macro = true 9 | 10 | [dependencies] 11 | syn = "0.11.11" 12 | quote = "0.3.15" 13 | primitives = { path = "../primitives" } 14 | 15 | [dev-dependencies] 16 | serialization = { path = "../serialization" } 17 | -------------------------------------------------------------------------------- /serialization_derive/src/de.rs: -------------------------------------------------------------------------------- 1 | use {syn, quote}; 2 | 3 | pub fn impl_deserializable(ast: &syn::DeriveInput) -> quote::Tokens { 4 | let body = match ast.body { 5 | syn::Body::Struct(ref s) => s, 6 | _ => panic!("#[derive(Deserializable)] is only defined for structs."), 7 | }; 8 | 9 | let stmts: Vec<_> = match *body { 10 | syn::VariantData::Struct(ref fields) => fields.iter().enumerate().map(deserialize_field_map).collect(), 11 | syn::VariantData::Tuple(ref fields) => fields.iter().enumerate().map(deserialize_field_map).collect(), 12 | syn::VariantData::Unit => panic!("#[derive(Deserializable)] is not defined for Unit structs."), 13 | }; 14 | 15 | let name = &ast.ident; 16 | 17 | let dummy_const = syn::Ident::new(format!("_IMPL_DESERIALIZABLE_FOR_{}", name)); 18 | let impl_block = quote! { 19 | impl serialization::Deserializable for #name { 20 | fn deserialize(reader: &mut serialization::Reader) -> Result where T: io::Read { 21 | let result = #name { 22 | #(#stmts)* 23 | }; 24 | 25 | Ok(result) 26 | } 27 | } 28 | }; 29 | 30 | quote! { 31 | #[allow(non_upper_case_globals, unused_attributes, unused_qualifications)] 32 | const #dummy_const: () = { 33 | extern crate serialization; 34 | use primitives::io; 35 | #impl_block 36 | }; 37 | } 38 | } 39 | 40 | fn deserialize_field_map(tuple: (usize, &syn::Field)) -> quote::Tokens { 41 | deserialize_field(tuple.0, tuple.1) 42 | } 43 | 44 | fn deserialize_field(index: usize, field: &syn::Field) -> quote::Tokens { 45 | let ident = match field.ident { 46 | Some(ref ident) => ident.to_string(), 47 | None => index.to_string(), 48 | }; 49 | 50 | let id = syn::Ident::new(ident.to_string()); 51 | 52 | match field.ty { 53 | syn::Ty::Path(_, ref path) => { 54 | let ident = &path.segments.first().expect("there must be at least 1 segment").ident; 55 | if &ident.to_string() == "Vec" { 56 | quote! { #id: reader.read_list()?, } 57 | } else { 58 | quote! { #id: reader.read()?, } 59 | } 60 | }, 61 | _ => panic!("serialization not supported"), 62 | } 63 | } 64 | -------------------------------------------------------------------------------- /serialization_derive/src/lib.rs: -------------------------------------------------------------------------------- 1 | extern crate proc_macro; 2 | extern crate syn; 3 | #[macro_use] 4 | extern crate quote; 5 | 6 | mod ser; 7 | mod de; 8 | 9 | use proc_macro::TokenStream; 10 | use ser::impl_serializable; 11 | use de::impl_deserializable; 12 | 13 | #[proc_macro_derive(Serializable)] 14 | pub fn serializable(input: TokenStream) -> TokenStream { 15 | let s = input.to_string(); 16 | let ast = syn::parse_derive_input(&s).unwrap(); 17 | let gen = impl_serializable(&ast); 18 | gen.parse().unwrap() 19 | } 20 | 21 | #[proc_macro_derive(Deserializable)] 22 | pub fn deserializable(input: TokenStream) -> TokenStream { 23 | let s = input.to_string(); 24 | let ast = syn::parse_derive_input(&s).unwrap(); 25 | let gen = impl_deserializable(&ast); 26 | gen.parse().unwrap() 27 | } 28 | 29 | -------------------------------------------------------------------------------- /serialization_derive/tests/raw.rs: -------------------------------------------------------------------------------- 1 | extern crate serialization; 2 | #[macro_use] 3 | extern crate serialization_derive; 4 | extern crate primitives; 5 | 6 | use serialization::{serialize, deserialize}; 7 | 8 | #[derive(Debug, PartialEq, Serializable, Deserializable)] 9 | struct Foo { 10 | a: u8, 11 | b: u16, 12 | c: u32, 13 | d: u64, 14 | } 15 | 16 | #[derive(Debug, PartialEq, Serializable, Deserializable)] 17 | struct Bar { 18 | a: Vec, 19 | } 20 | 21 | #[test] 22 | fn test_foo_serialize() { 23 | let foo = Foo { 24 | a: 1, 25 | b: 2, 26 | c: 3, 27 | d: 4, 28 | }; 29 | 30 | let expected = vec![ 31 | 1u8, 32 | 2, 0, 33 | 3, 0, 0, 0, 34 | 4, 0, 0, 0, 0, 0, 0, 0, 35 | ].into(); 36 | 37 | let result = serialize(&foo); 38 | assert_eq!(result, expected); 39 | 40 | let d = deserialize(expected.as_ref()).unwrap(); 41 | assert_eq!(foo, d); 42 | } 43 | 44 | #[test] 45 | fn test_bar_serialize() { 46 | let foo = Foo { 47 | a: 1, 48 | b: 2, 49 | c: 3, 50 | d: 4, 51 | }; 52 | 53 | let foo2 = Foo { 54 | a: 5, 55 | b: 6, 56 | c: 7, 57 | d: 8, 58 | }; 59 | 60 | let expected = vec![ 61 | // number of items 62 | 2u8, 63 | // first 64 | 1, 65 | 2, 0, 66 | 3, 0, 0, 0, 67 | 4, 0, 0, 0, 0, 0, 0, 0, 68 | // second 69 | 5, 70 | 6, 0, 71 | 7, 0, 0, 0, 72 | 8, 0, 0, 0, 0, 0, 0, 0, 73 | ].into(); 74 | 75 | let bar = Bar { 76 | a: vec![foo, foo2], 77 | }; 78 | 79 | let result = serialize(&bar); 80 | assert_eq!(result, expected); 81 | 82 | let d = deserialize(expected.as_ref()).unwrap(); 83 | assert_eq!(bar, d); 84 | } 85 | -------------------------------------------------------------------------------- /snap/snapcraft.yaml: -------------------------------------------------------------------------------- 1 | name: parity-bitcoin 2 | version: git 3 | summary: The Parity Bitcoin client 4 | description: | 5 | Bitcoin client written in Rust. 6 | 7 | grade: devel # must be 'stable' to release into candidate/stable channels 8 | confinement: strict 9 | 10 | apps: 11 | parity-bitcoin: 12 | command: pbtc 13 | plugs: [home, network, network-bind] 14 | 15 | parts: 16 | parity-bitcoin: 17 | source: . 18 | plugin: rust 19 | build-packages: [g++] 20 | -------------------------------------------------------------------------------- /storage/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "storage" 3 | version = "0.1.0" 4 | authors = ["Parity Technologies "] 5 | 6 | [dependencies] 7 | elastic-array = "0.6" 8 | parking_lot = "0.4" 9 | bit-vec = "0.4" 10 | lru-cache = "0.1" 11 | primitives = { path = "../primitives" } 12 | serialization = { path = "../serialization" } 13 | chain = { path = "../chain" } 14 | display_derive = "0.0.0" 15 | -------------------------------------------------------------------------------- /storage/src/best_block.rs: -------------------------------------------------------------------------------- 1 | use std::fmt; 2 | use hash::H256; 3 | 4 | /// Best block information 5 | #[derive(Clone, PartialEq, Default)] 6 | pub struct BestBlock { 7 | /// Height/number of the best block (genesis block has zero height) 8 | pub number: u32, 9 | /// Hash of the best block 10 | pub hash: H256, 11 | } 12 | 13 | impl fmt::Debug for BestBlock { 14 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 15 | f.debug_struct("BestBlock") 16 | .field("number", &self.number) 17 | .field("hash", &self.hash.reversed()) 18 | .finish() 19 | } 20 | } 21 | -------------------------------------------------------------------------------- /storage/src/block_ancestors.rs: -------------------------------------------------------------------------------- 1 | use chain::BlockHeader; 2 | use {BlockRef, BlockHeaderProvider}; 3 | 4 | pub struct BlockAncestors<'a> { 5 | block: Option, 6 | headers: &'a BlockHeaderProvider, 7 | } 8 | 9 | impl<'a> BlockAncestors<'a> { 10 | pub fn new(block: BlockRef, headers: &'a BlockHeaderProvider) -> Self { 11 | BlockAncestors { 12 | block: Some(block), 13 | headers: headers, 14 | } 15 | } 16 | } 17 | 18 | impl<'a> Iterator for BlockAncestors<'a> { 19 | type Item = BlockHeader; 20 | 21 | fn next(&mut self) -> Option { 22 | let result = self.block.take().and_then(|block| self.headers.block_header(block)); 23 | self.block = match result { 24 | Some(ref header) => Some(BlockRef::Hash(header.previous_header_hash.clone())), 25 | None => None, 26 | }; 27 | result 28 | } 29 | } 30 | -------------------------------------------------------------------------------- /storage/src/block_chain.rs: -------------------------------------------------------------------------------- 1 | use hash::H256; 2 | use chain::{IndexedBlock, IndexedBlockHeader}; 3 | use {Error, BlockOrigin, Store, SideChainOrigin}; 4 | 5 | pub trait ForkChain { 6 | /// Returns forks underlaying store. 7 | fn store(&self) -> &Store; 8 | 9 | /// Flush fork changes to canon chain. 10 | /// Should not be used directly from outside of `BlockChain`. 11 | fn flush(&self) -> Result<(), Error>; 12 | } 13 | 14 | pub trait BlockChain { 15 | /// Inserts new block into blockchain 16 | fn insert(&self, block: IndexedBlock) -> Result<(), Error>; 17 | 18 | /// Rollbacks single best block. Returns new best block hash 19 | fn rollback_best(&self) -> Result; 20 | 21 | /// Canonizes block with given hash 22 | fn canonize(&self, block_hash: &H256) -> Result<(), Error>; 23 | 24 | /// Decanonizes best block 25 | fn decanonize(&self) -> Result; 26 | 27 | /// Checks block origin 28 | fn block_origin(&self, header: &IndexedBlockHeader) -> Result; 29 | } 30 | 31 | pub trait Forkable { 32 | /// Forks current blockchain. 33 | /// Lifetime guarantees fork relationship with canon chain. 34 | fn fork<'a>(&'a self, side_chain: SideChainOrigin) -> Result, Error>; 35 | 36 | /// Switches blockchain to given fork. 37 | /// Lifetime guarantees that fork comes from this canon chain. 38 | fn switch_to_fork<'a>(&'a self, fork: Box) -> Result<(), Error>; 39 | } 40 | -------------------------------------------------------------------------------- /storage/src/block_impls.rs: -------------------------------------------------------------------------------- 1 | use std::cmp; 2 | use chain::{OutPoint, TransactionOutput, IndexedBlock, IndexedTransaction}; 3 | use {TransactionOutputProvider}; 4 | 5 | fn transaction_output(transactions: &[IndexedTransaction], prevout: &OutPoint) -> Option { 6 | transactions.iter() 7 | .find(|tx| tx.hash == prevout.hash) 8 | .and_then(|tx| tx.raw.outputs.get(prevout.index as usize)) 9 | .cloned() 10 | } 11 | 12 | fn is_spent(transactions: &[IndexedTransaction], prevout: &OutPoint) -> bool { 13 | // the code below is valid, but has rather poor performance 14 | 15 | // if previous transaction output appears more than once than we can safely 16 | // tell that it's spent (double spent) 17 | let spends = transactions.iter() 18 | .flat_map(|tx| &tx.raw.inputs) 19 | .filter(|input| &input.previous_output == prevout) 20 | .take(2) 21 | .count(); 22 | 23 | spends == 2 24 | } 25 | 26 | impl TransactionOutputProvider for IndexedBlock { 27 | fn transaction_output(&self, outpoint: &OutPoint, transaction_index: usize) -> Option { 28 | let take = cmp::min(transaction_index, self.transactions.len()); 29 | transaction_output(&self.transactions[..take], outpoint) 30 | } 31 | 32 | fn is_spent(&self, outpoint: &OutPoint) -> bool { 33 | is_spent(&self.transactions, outpoint) 34 | } 35 | } 36 | -------------------------------------------------------------------------------- /storage/src/block_iterator.rs: -------------------------------------------------------------------------------- 1 | use chain::BlockHeader; 2 | use {BlockRef, BlockHeaderProvider}; 3 | 4 | pub struct BlockIterator<'a> { 5 | block: u32, 6 | period: u32, 7 | headers: &'a BlockHeaderProvider, 8 | } 9 | 10 | impl<'a> BlockIterator<'a> { 11 | pub fn new(block: u32, period: u32, headers: &'a BlockHeaderProvider) -> Self { 12 | BlockIterator { 13 | block: block, 14 | period: period, 15 | headers: headers, 16 | } 17 | } 18 | } 19 | 20 | impl<'a> Iterator for BlockIterator<'a> { 21 | type Item = (u32, BlockHeader); 22 | 23 | fn next(&mut self) -> Option { 24 | let result = self.headers.block_header(BlockRef::Number(self.block)); 25 | let block = self.block; 26 | self.block += self.period; 27 | result.map(|header| (block, header)) 28 | } 29 | } 30 | -------------------------------------------------------------------------------- /storage/src/block_origin.rs: -------------------------------------------------------------------------------- 1 | use std::fmt; 2 | use hash::H256; 3 | 4 | #[derive(Clone)] 5 | pub struct SideChainOrigin { 6 | /// newest ancestor block number 7 | pub ancestor: u32, 8 | /// side chain block hashes. Ordered from oldest to newest 9 | pub canonized_route: Vec, 10 | /// canon chain block hahses. Ordered from oldest to newest 11 | pub decanonized_route: Vec, 12 | /// new block number 13 | pub block_number: u32, 14 | } 15 | 16 | impl fmt::Debug for SideChainOrigin { 17 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 18 | f.debug_struct("SideChainOrigin") 19 | .field("ancestor", &self.ancestor) 20 | .field("canonized_route", &self.canonized_route.iter().map(|h| h.reversed()).collect::>()) 21 | .field("decanonized_route", &self.decanonized_route.iter().map(|h| h.reversed()).collect::>()) 22 | .field("block_number", &self.block_number) 23 | .finish() 24 | } 25 | } 26 | 27 | #[derive(Debug)] 28 | pub enum BlockOrigin { 29 | KnownBlock, 30 | CanonChain { 31 | block_number: u32, 32 | }, 33 | SideChain(SideChainOrigin), 34 | SideChainBecomingCanonChain(SideChainOrigin), 35 | } 36 | -------------------------------------------------------------------------------- /storage/src/block_provider.rs: -------------------------------------------------------------------------------- 1 | use hash::H256; 2 | use bytes::Bytes; 3 | use chain::{BlockHeader, Transaction, Block, IndexedBlock, IndexedBlockHeader, IndexedTransaction}; 4 | use {BlockRef}; 5 | 6 | pub trait BlockHeaderProvider { 7 | /// resolves header bytes by block reference (number/hash) 8 | fn block_header_bytes(&self, block_ref: BlockRef) -> Option; 9 | 10 | /// resolves header bytes by block reference (number/hash) 11 | fn block_header(&self, block_ref: BlockRef) -> Option; 12 | } 13 | 14 | pub trait BlockProvider: BlockHeaderProvider { 15 | 16 | /// resolves number by block hash 17 | fn block_number(&self, hash: &H256) -> Option; 18 | 19 | /// resolves hash by block number 20 | fn block_hash(&self, number: u32) -> Option; 21 | 22 | /// resolves deserialized block body by block reference (number/hash) 23 | fn block(&self, block_ref: BlockRef) -> Option; 24 | 25 | /// returns true if store contains given block 26 | fn contains_block(&self, block_ref: BlockRef) -> bool { 27 | self.block_header_bytes(block_ref).is_some() 28 | } 29 | 30 | /// resolves list of block transactions by block reference (number/hash) 31 | fn block_transaction_hashes(&self, block_ref: BlockRef) -> Vec; 32 | 33 | /// returns all transactions in the block by block reference (number/hash) 34 | fn block_transactions(&self, block_ref: BlockRef) -> Vec; 35 | } 36 | 37 | pub trait IndexedBlockProvider: BlockProvider { 38 | fn indexed_block_header(&self, block_ref: BlockRef) -> Option; 39 | 40 | fn indexed_block(&self, block_ref: BlockRef) -> Option; 41 | 42 | fn indexed_block_transactions(&self, block_ref: BlockRef) -> Vec; 43 | } 44 | -------------------------------------------------------------------------------- /storage/src/block_ref.rs: -------------------------------------------------------------------------------- 1 | use hash::H256; 2 | 3 | #[derive(Debug, Clone)] 4 | pub enum BlockRef { 5 | Number(u32), 6 | Hash(H256), 7 | } 8 | 9 | impl From for BlockRef { 10 | fn from(u: u32) -> Self { 11 | BlockRef::Number(u) 12 | } 13 | } 14 | 15 | impl From for BlockRef { 16 | fn from(hash: H256) -> Self { 17 | BlockRef::Hash(hash) 18 | } 19 | } 20 | -------------------------------------------------------------------------------- /storage/src/error.rs: -------------------------------------------------------------------------------- 1 | #[derive(Debug, PartialEq, Display)] 2 | pub enum Error { 3 | /// Low level database error 4 | #[display(fmt = "Database error: {}", _0)] 5 | DatabaseError(String), 6 | /// Invalid block 7 | #[display(fmt = "Cannot canonize block")] 8 | CannotCanonize, 9 | /// Uknown parent 10 | #[display(fmt = "Block parent is unknown")] 11 | UnknownParent, 12 | /// Ancient fork 13 | #[display(fmt = "Fork is too long to proceed")] 14 | AncientFork, 15 | } 16 | 17 | impl From for String { 18 | fn from(e: Error) -> String { 19 | format!("{}", e) 20 | } 21 | } 22 | -------------------------------------------------------------------------------- /storage/src/lib.rs: -------------------------------------------------------------------------------- 1 | extern crate elastic_array; 2 | extern crate parking_lot; 3 | extern crate bit_vec; 4 | extern crate lru_cache; 5 | #[macro_use] 6 | extern crate display_derive; 7 | 8 | extern crate primitives; 9 | extern crate serialization as ser; 10 | extern crate chain; 11 | 12 | mod best_block; 13 | mod block_ancestors; 14 | mod block_chain; 15 | mod block_impls; 16 | mod block_iterator; 17 | mod block_origin; 18 | mod block_provider; 19 | mod block_ref; 20 | mod error; 21 | mod store; 22 | mod transaction_meta; 23 | mod transaction_provider; 24 | 25 | pub use primitives::{hash, bytes}; 26 | 27 | pub use best_block::BestBlock; 28 | pub use block_ancestors::BlockAncestors; 29 | pub use block_chain::{BlockChain, ForkChain, Forkable}; 30 | pub use block_iterator::BlockIterator; 31 | pub use block_origin::{BlockOrigin, SideChainOrigin}; 32 | pub use block_provider::{BlockHeaderProvider, BlockProvider, IndexedBlockProvider}; 33 | pub use block_ref::BlockRef; 34 | pub use error::Error; 35 | pub use store::{AsSubstore, Store, SharedStore, CanonStore, ConfigStore}; 36 | pub use transaction_meta::TransactionMeta; 37 | pub use transaction_provider::{TransactionProvider, TransactionOutputProvider, TransactionMetaProvider}; 38 | 39 | -------------------------------------------------------------------------------- /storage/src/store.rs: -------------------------------------------------------------------------------- 1 | use std::sync::Arc; 2 | use chain::BlockHeader; 3 | use { 4 | BestBlock, BlockProvider, BlockHeaderProvider, TransactionProvider, TransactionMetaProvider, 5 | TransactionOutputProvider, BlockChain, IndexedBlockProvider, Forkable, Error 6 | }; 7 | 8 | pub trait CanonStore: Store + Forkable + ConfigStore { 9 | fn as_store(&self) -> &Store; 10 | } 11 | 12 | /// Configuration storage interface 13 | pub trait ConfigStore { 14 | /// get consensus_fork this database is configured for 15 | fn consensus_fork(&self) -> Result, Error>; 16 | 17 | /// set consensus_fork this database is configured for 18 | fn set_consensus_fork(&self, consensus_fork: &str) -> Result<(), Error>; 19 | } 20 | 21 | /// Blockchain storage interface 22 | pub trait Store: AsSubstore { 23 | /// get best block 24 | fn best_block(&self) -> BestBlock; 25 | 26 | /// get best header 27 | fn best_header(&self) -> BlockHeader; 28 | 29 | /// get blockchain difficulty 30 | fn difficulty(&self) -> f64; 31 | } 32 | 33 | /// Allows casting Arc to reference to any substore type 34 | pub trait AsSubstore: BlockChain + IndexedBlockProvider + TransactionProvider + TransactionMetaProvider + TransactionOutputProvider { 35 | fn as_block_provider(&self) -> &BlockProvider; 36 | 37 | fn as_block_header_provider(&self) -> &BlockHeaderProvider; 38 | 39 | fn as_transaction_provider(&self) -> &TransactionProvider; 40 | 41 | fn as_transaction_output_provider(&self) -> &TransactionOutputProvider; 42 | 43 | fn as_transaction_meta_provider(&self) -> &TransactionMetaProvider; 44 | } 45 | 46 | impl AsSubstore for T where T: BlockChain + IndexedBlockProvider + TransactionProvider + TransactionMetaProvider + TransactionOutputProvider { 47 | fn as_block_provider(&self) -> &BlockProvider { 48 | &*self 49 | } 50 | 51 | fn as_block_header_provider(&self) -> &BlockHeaderProvider { 52 | &*self 53 | } 54 | 55 | fn as_transaction_provider(&self) -> &TransactionProvider { 56 | &*self 57 | } 58 | 59 | fn as_transaction_output_provider(&self) -> &TransactionOutputProvider { 60 | &*self 61 | } 62 | 63 | fn as_transaction_meta_provider(&self) -> &TransactionMetaProvider { 64 | &*self 65 | } 66 | } 67 | 68 | pub type SharedStore = Arc; 69 | -------------------------------------------------------------------------------- /storage/src/transaction_provider.rs: -------------------------------------------------------------------------------- 1 | use hash::H256; 2 | use bytes::Bytes; 3 | use chain::{Transaction, OutPoint, TransactionOutput}; 4 | use {TransactionMeta}; 5 | 6 | /// Should be used to obtain all transactions from canon chain and forks. 7 | pub trait TransactionProvider { 8 | /// Returns true if store contains given transaction. 9 | fn contains_transaction(&self, hash: &H256) -> bool { 10 | self.transaction(hash).is_some() 11 | } 12 | 13 | /// Resolves transaction body bytes by transaction hash. 14 | fn transaction_bytes(&self, hash: &H256) -> Option; 15 | 16 | /// Resolves serialized transaction info by transaction hash. 17 | fn transaction(&self, hash: &H256) -> Option; 18 | } 19 | 20 | /// Should be used to get canon chain transaction outputs. 21 | pub trait TransactionOutputProvider: Send + Sync { 22 | /// Returns transaction output. 23 | fn transaction_output(&self, outpoint: &OutPoint, transaction_index: usize) -> Option; 24 | 25 | /// Returns true if we know that output is double spent. 26 | fn is_spent(&self, outpoint: &OutPoint) -> bool; 27 | } 28 | 29 | /// Transaction meta provider stores transaction meta information 30 | pub trait TransactionMetaProvider: Send + Sync { 31 | /// Returns None if transactin with given hash does not exist 32 | /// Otherwise returns transaction meta object 33 | fn transaction_meta(&self, hash: &H256) -> Option; 34 | } 35 | -------------------------------------------------------------------------------- /sync/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "sync" 3 | version = "0.1.0" 4 | authors = ["Ethcore "] 5 | 6 | [dependencies] 7 | parking_lot = "0.4" 8 | log = "0.4" 9 | time = "0.1" 10 | futures = "0.1" 11 | linked-hash-map = "0.3" 12 | bit-vec = "0.4.3" 13 | murmur3 = "0.4" 14 | rand = "0.4" 15 | byteorder = "1.0" 16 | 17 | chain = { path = "../chain" } 18 | bitcrypto = { path = "../crypto" } 19 | storage = { path = "../storage" } 20 | db = { path = "../db" } 21 | message = { path = "../message" } 22 | miner = { path = "../miner" } 23 | p2p = { path = "../p2p" } 24 | primitives = { path = "../primitives" } 25 | script = { path = "../script" } 26 | serialization = { path = "../serialization" } 27 | verification = { path = "../verification" } 28 | network = { path = "../network" } 29 | test-data = { path = "../test-data" } 30 | 31 | [features] 32 | dev = [] 33 | -------------------------------------------------------------------------------- /sync/src/inbound_connection_factory.rs: -------------------------------------------------------------------------------- 1 | use std::sync::atomic::{AtomicUsize, Ordering}; 2 | use p2p::{LocalSyncNode, LocalSyncNodeRef, OutboundSyncConnectionRef, InboundSyncConnectionRef}; 3 | use message::Services; 4 | use inbound_connection::InboundConnection; 5 | use types::{PeersRef, LocalNodeRef}; 6 | 7 | /// Inbound synchronization connection factory 8 | pub struct InboundConnectionFactory { 9 | /// Peers reference 10 | peers: PeersRef, 11 | /// Reference to synchronization node 12 | node: LocalNodeRef, 13 | /// Throughout counter of synchronization peers 14 | counter: AtomicUsize, 15 | } 16 | 17 | impl InboundConnectionFactory { 18 | /// Create new inbound connection factory 19 | pub fn new(peers: PeersRef, node: LocalNodeRef) -> Self { 20 | InboundConnectionFactory { 21 | peers: peers, 22 | node: node, 23 | counter: AtomicUsize::new(0), 24 | } 25 | } 26 | 27 | /// Box inbound connection factory 28 | pub fn boxed(self) -> LocalSyncNodeRef { 29 | Box::new(self) 30 | } 31 | } 32 | 33 | impl LocalSyncNode for InboundConnectionFactory { 34 | fn create_sync_session( 35 | &self, 36 | _best_block_height: i32, 37 | services: Services, 38 | outbound_connection: OutboundSyncConnectionRef, 39 | ) -> InboundSyncConnectionRef { 40 | let peer_index = self.counter.fetch_add(1, Ordering::SeqCst) + 1; 41 | trace!(target: "sync", "Creating new sync session with peer#{}", peer_index); 42 | // remember outbound connection 43 | self.peers.insert(peer_index, services, outbound_connection); 44 | // create new inbound connection 45 | InboundConnection::new(peer_index, self.peers.clone(), self.node.clone()).boxed() 46 | } 47 | } 48 | -------------------------------------------------------------------------------- /sync/src/types.rs: -------------------------------------------------------------------------------- 1 | use std::sync::Arc; 2 | use futures::Future; 3 | use parking_lot::{Mutex, RwLock}; 4 | use storage; 5 | use local_node::LocalNode; 6 | use miner::MemoryPool; 7 | use super::SyncListener; 8 | use synchronization_client::SynchronizationClient; 9 | use synchronization_executor::LocalSynchronizationTaskExecutor; 10 | use synchronization_peers::Peers; 11 | use synchronization_server::ServerImpl; 12 | use synchronization_verifier::AsyncVerifier; 13 | use utils::SynchronizationState; 14 | 15 | pub use utils::BlockHeight; 16 | 17 | /// Network request id 18 | pub type RequestId = u32; 19 | 20 | /// Peer is indexed using this type 21 | pub type PeerIndex = usize; 22 | 23 | // No-error, no-result future 24 | pub type EmptyBoxFuture = Box + Send>; 25 | 26 | /// Reference to storage 27 | pub type StorageRef = storage::SharedStore; 28 | 29 | /// Reference to memory pool 30 | pub type MemoryPoolRef = Arc>; 31 | 32 | /// Shared synchronization state reference 33 | pub type SynchronizationStateRef = Arc; 34 | 35 | /// Reference to peers 36 | pub type PeersRef = Arc; 37 | 38 | /// Reference to synchronization tasks executor 39 | pub type ExecutorRef = Arc; 40 | 41 | /// Reference to synchronization client 42 | pub type ClientRef = Arc; 43 | 44 | /// Reference to synchronization client core 45 | pub type ClientCoreRef = Arc>; 46 | 47 | /// Reference to synchronization server 48 | pub type ServerRef = Arc; 49 | 50 | /// Reference to local node 51 | pub type LocalNodeRef = Arc< 52 | LocalNode< 53 | LocalSynchronizationTaskExecutor, 54 | ServerImpl, 55 | SynchronizationClient< 56 | LocalSynchronizationTaskExecutor, 57 | AsyncVerifier, 58 | >, 59 | >, 60 | >; 61 | 62 | /// Synchronization events listener reference 63 | pub type SyncListenerRef = Box; 64 | -------------------------------------------------------------------------------- /sync/src/utils/average_speed_meter.rs: -------------------------------------------------------------------------------- 1 | use std::collections::VecDeque; 2 | use time; 3 | 4 | /// Speed meter with given items number 5 | #[derive(Debug, Default)] 6 | pub struct AverageSpeedMeter { 7 | /// Number of items to inspect 8 | inspect_items: usize, 9 | /// Number of items currently inspected 10 | inspected_items: VecDeque, 11 | /// Current speed 12 | speed: f64, 13 | /// Last timestamp 14 | last_timestamp: Option, 15 | } 16 | 17 | impl AverageSpeedMeter { 18 | pub fn with_inspect_items(inspect_items: usize) -> Self { 19 | assert!(inspect_items > 0); 20 | AverageSpeedMeter { 21 | inspect_items: inspect_items, 22 | inspected_items: VecDeque::with_capacity(inspect_items), 23 | speed: 0_f64, 24 | last_timestamp: None, 25 | } 26 | } 27 | 28 | pub fn speed(&self) -> f64 { 29 | let items_per_second = 1_f64 / self.speed; 30 | if items_per_second.is_normal() { 31 | items_per_second 32 | } else { 33 | 0_f64 34 | } 35 | } 36 | 37 | pub fn inspected_items_len(&self) -> usize { 38 | self.inspected_items.len() 39 | } 40 | 41 | pub fn checkpoint(&mut self) { 42 | // if inspected_items is already full => remove oldest item from average 43 | if self.inspected_items.len() == self.inspect_items { 44 | let oldest = self.inspected_items.pop_front().expect( 45 | "len() is not zero; qed", 46 | ); 47 | self.speed = (self.inspect_items as f64 * self.speed - oldest) / 48 | (self.inspect_items as f64 - 1_f64); 49 | } 50 | 51 | // add new item 52 | let now = time::precise_time_s(); 53 | if let Some(last_timestamp) = self.last_timestamp { 54 | let newest = now - last_timestamp; 55 | self.speed = (self.inspected_items.len() as f64 * self.speed + newest) / 56 | (self.inspected_items.len() as f64 + 1_f64); 57 | self.inspected_items.push_back(newest); 58 | } 59 | self.last_timestamp = Some(now); 60 | } 61 | 62 | pub fn start(&mut self) { 63 | self.last_timestamp = Some(time::precise_time_s()); 64 | } 65 | 66 | pub fn stop(&mut self) { 67 | self.last_timestamp = None; 68 | } 69 | } 70 | -------------------------------------------------------------------------------- /sync/src/utils/fee_rate_filter.rs: -------------------------------------------------------------------------------- 1 | use message::types; 2 | 3 | /// Connection fee rate filter 4 | #[derive(Debug, Default)] 5 | pub struct FeeRateFilter { 6 | /// Minimal fee in satoshis per 1000 bytes 7 | fee_rate: u64, 8 | } 9 | 10 | impl FeeRateFilter { 11 | /// Set minimal fee rate, this filter accepts 12 | pub fn set_min_fee_rate(&mut self, message: types::FeeFilter) { 13 | self.fee_rate = message.fee_rate; 14 | } 15 | 16 | /// Filter transaction using its fee rate 17 | pub fn filter_transaction(&self, tx_fee_rate: Option) -> bool { 18 | tx_fee_rate 19 | .map(|tx_fee_rate| tx_fee_rate >= self.fee_rate) 20 | .unwrap_or(true) 21 | } 22 | } 23 | 24 | #[cfg(test)] 25 | mod tests { 26 | use message::types; 27 | use super::FeeRateFilter; 28 | 29 | #[test] 30 | fn fee_rate_filter_empty() { 31 | assert!(FeeRateFilter::default().filter_transaction(Some(0))); 32 | assert!(FeeRateFilter::default().filter_transaction(None)); 33 | } 34 | 35 | #[test] 36 | fn fee_rate_filter_accepts() { 37 | let mut filter = FeeRateFilter::default(); 38 | filter.set_min_fee_rate(types::FeeFilter::with_fee_rate(1000)); 39 | assert!(filter.filter_transaction(Some(1000))); 40 | assert!(filter.filter_transaction(Some(2000))); 41 | } 42 | 43 | #[test] 44 | fn fee_rate_filter_rejects() { 45 | let mut filter = FeeRateFilter::default(); 46 | filter.set_min_fee_rate(types::FeeFilter::with_fee_rate(1000)); 47 | assert!(!filter.filter_transaction(Some(500))); 48 | } 49 | } 50 | -------------------------------------------------------------------------------- /sync/src/utils/mod.rs: -------------------------------------------------------------------------------- 1 | mod average_speed_meter; 2 | mod best_headers_chain; 3 | mod bloom_filter; 4 | mod compact_block_builder; 5 | mod connection_filter; 6 | mod fee_rate_filter; 7 | mod hash_queue; 8 | mod known_hash_filter; 9 | mod memory_pool_transaction_provider; 10 | mod message_block_headers_provider; 11 | mod orphan_blocks_pool; 12 | mod orphan_transactions_pool; 13 | mod partial_merkle_tree; 14 | mod synchronization_state; 15 | 16 | pub use self::average_speed_meter::AverageSpeedMeter; 17 | pub use self::best_headers_chain::{BestHeadersChain, Information as BestHeadersChainInformation}; 18 | pub use self::bloom_filter::BloomFilter; 19 | pub use self::compact_block_builder::build_compact_block; 20 | pub use self::connection_filter::ConnectionFilter; 21 | pub use self::fee_rate_filter::FeeRateFilter; 22 | pub use self::hash_queue::{HashQueue, HashQueueChain, HashPosition}; 23 | pub use self::known_hash_filter::{KnownHashType, KnownHashFilter}; 24 | pub use self::memory_pool_transaction_provider::MemoryPoolTransactionOutputProvider; 25 | pub use self::message_block_headers_provider::MessageBlockHeadersProvider; 26 | pub use self::orphan_blocks_pool::OrphanBlocksPool; 27 | pub use self::orphan_transactions_pool::{OrphanTransactionsPool, OrphanTransaction}; 28 | pub use self::partial_merkle_tree::{PartialMerkleTree, build_partial_merkle_tree}; 29 | pub use self::synchronization_state::SynchronizationState; 30 | 31 | /// Block height type 32 | pub type BlockHeight = u32; 33 | -------------------------------------------------------------------------------- /sync/src/utils/synchronization_state.rs: -------------------------------------------------------------------------------- 1 | use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering}; 2 | use super::super::types::{StorageRef, BlockHeight}; 3 | 4 | // AtomicU32 is unstable => using AtomicUsize here 5 | 6 | /// Shared synchronization client state. 7 | /// It can be slightly innacurate, but the accuracy is not required for it 8 | #[derive(Debug)] 9 | pub struct SynchronizationState { 10 | /// Is synchronization in progress? 11 | is_synchronizing: AtomicBool, 12 | /// Height of best block in the storage 13 | best_storage_block_height: AtomicUsize, 14 | } 15 | 16 | impl SynchronizationState { 17 | pub fn with_storage(storage: StorageRef) -> Self { 18 | let best_storage_block_height = storage.best_block().number; 19 | SynchronizationState { 20 | is_synchronizing: AtomicBool::new(false), 21 | best_storage_block_height: AtomicUsize::new(best_storage_block_height as usize), 22 | } 23 | } 24 | 25 | pub fn synchronizing(&self) -> bool { 26 | self.is_synchronizing.load(Ordering::SeqCst) 27 | } 28 | 29 | pub fn update_synchronizing(&self, synchronizing: bool) { 30 | self.is_synchronizing.store(synchronizing, Ordering::SeqCst); 31 | } 32 | 33 | pub fn best_storage_block_height(&self) -> BlockHeight { 34 | self.best_storage_block_height.load(Ordering::SeqCst) as BlockHeight 35 | } 36 | 37 | pub fn update_best_storage_block_height(&self, height: BlockHeight) { 38 | self.best_storage_block_height.store( 39 | height as usize, 40 | Ordering::SeqCst, 41 | ); 42 | } 43 | } 44 | -------------------------------------------------------------------------------- /test-data/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "test-data" 3 | version = "0.1.0" 4 | authors = ["Nikolay Volf "] 5 | 6 | [dependencies] 7 | time = "0.1" 8 | 9 | chain = { path = "../chain" } 10 | primitives = { path = "../primitives" } 11 | serialization = { path = "../serialization" } 12 | script = { path = "../script" } 13 | -------------------------------------------------------------------------------- /test-data/src/invoke.rs: -------------------------------------------------------------------------------- 1 | //! invoke helper 2 | 3 | pub trait Invoke { 4 | type Result; 5 | 6 | fn invoke(self, arg: A) -> Self::Result; 7 | } 8 | 9 | pub struct Identity; 10 | 11 | impl Invoke for Identity { 12 | type Result = A; 13 | 14 | fn invoke(self, arg: A) -> A { arg } 15 | } 16 | -------------------------------------------------------------------------------- /tools/bench.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | cargo run --manifest-path ./bencher/Cargo.toml --release 3 | -------------------------------------------------------------------------------- /tools/clippy.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # NOTE! 4 | # this script requires nightly version of rustc 5 | # clippy plugin needs to be compiled with the same nigtly version as currently set one 6 | # tested with clippy 0.0.98 7 | 8 | # temporary workaround for cargo clippy issue #1330 9 | # https://github.com/Manishearth/rust-clippy/issues/1330 10 | 11 | # first, let's enter any directory in a workspace with target kind `lib` 12 | # https://github.com/Manishearth/rust-clippy/blob/6a73c8f8e3f513f6a16c6876be3d326633dbc78d/src/main.rs#L186 13 | cd primitives 14 | 15 | # now let's run clippy 16 | # clippy does not support multiple packages, so let's run them one after another 17 | cargo clippy -p bitcrypto 18 | cargo clippy -p chain 19 | cargo clippy -p db 20 | cargo clippy -p import 21 | cargo clippy -p keys 22 | cargo clippy -p message 23 | cargo clippy -p miner 24 | cargo clippy -p network 25 | cargo clippy -p p2p 26 | cargo clippy -p primitives 27 | cargo clippy -p rpc 28 | cargo clippy -p script 29 | cargo clippy -p serialization 30 | cargo clippy -p sync 31 | cargo clippy -p test-data 32 | cargo clippy -p verification 33 | 34 | -------------------------------------------------------------------------------- /tools/deb-build.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e # fail on any error 4 | set -u # treat unset variables as error 5 | rm -rf deb 6 | #create DEBIAN files 7 | mkdir -p deb/usr/bin/ 8 | mkdir -p deb/DEBIAN 9 | #create copyright, docs, compat 10 | cp LICENSE deb/DEBIAN/copyright 11 | echo "https://github.com/paritytech/parity-bitcoin/" >> deb/DEBIAN/docs 12 | echo "8" >> deb/DEBIAN/compat 13 | #create control file 14 | control=deb/DEBIAN/control 15 | echo "Package: pbtc" >> $control 16 | version=`grep -m 1 version Cargo.toml | awk '{print $3}' | tr -d '"' | tr -d "\n"` 17 | echo "Version: $version" >> $control 18 | echo "Source: pbtc" >> $control 19 | echo "Section: science" >> $control 20 | echo "Priority: extra" >> $control 21 | echo "Maintainer: Parity Technologies " >> $control 22 | echo "Build-Depends: debhelper (>=9)" >> $control 23 | echo "Standards-Version: 3.9.5" >> $control 24 | echo "Homepage: https://parity.io" >> $control 25 | echo "Vcs-Git: git://github.com/paritytech/parity-bitcoin.git" >> $control 26 | echo "Vcs-Browser: https://github.com/paritytech/parity-bitcoin" >> $control 27 | echo "Architecture: $1" >> $control 28 | echo "Depends: libssl1.0.0 (>=1.0.0)" >> $control 29 | echo "Description: Bitcoin network client by Parity Technologies" >> $control 30 | #build .deb package 31 | 32 | exit 33 | -------------------------------------------------------------------------------- /tools/deb_build.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e # fail on any error 4 | set -u # treat unset variables as error 5 | rm -rf deb 6 | #create DEBIAN files 7 | mkdir -p deb/usr/bin/ 8 | mkdir -p deb/DEBIAN 9 | #create copyright, docs, compat 10 | cp LICENSE deb/DEBIAN/copyright 11 | echo "https://github.com/paritytech/parity-bitcoin/" >> deb/DEBIAN/docs 12 | echo "8" >> deb/DEBIAN/compat 13 | #create control file 14 | control=deb/DEBIAN/control 15 | echo "Package: pbtc" >> $control 16 | version=`grep -m 1 version Cargo.toml | awk '{print $3}' | tr -d '"' | tr -d "\n"` 17 | echo "Version: $version" >> $control 18 | echo "Source: pbtc" >> $control 19 | echo "Section: science" >> $control 20 | echo "Priority: extra" >> $control 21 | echo "Maintainer: Parity Technologies " >> $control 22 | echo "Build-Depends: debhelper (>=9)" >> $control 23 | echo "Standards-Version: 3.9.5" >> $control 24 | echo "Homepage: https://parity.io" >> $control 25 | echo "Vcs-Git: git://github.com/paritytech/parity-bitcoin.git" >> $control 26 | echo "Vcs-Browser: https://github.com/paritytech/parity-bitcoin" >> $control 27 | echo "Architecture: $1" >> $control 28 | echo "Depends: libssl1.0.0 (>=1.0.0)" >> $control 29 | echo "Description: Bitcoin network client by Parity Technologies" >> $control 30 | #build .deb package 31 | 32 | exit 33 | -------------------------------------------------------------------------------- /tools/doc.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | cargo doc --no-deps\ 4 | -p bitcrypto\ 5 | -p chain\ 6 | -p db\ 7 | -p import\ 8 | -p keys\ 9 | -p message\ 10 | -p miner\ 11 | -p network\ 12 | -p pbtc\ 13 | -p p2p\ 14 | -p primitives\ 15 | -p rpc\ 16 | -p script\ 17 | -p serialization\ 18 | -p sync\ 19 | -p test-data\ 20 | -p verification 21 | -------------------------------------------------------------------------------- /tools/docker_build.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | cd docker/hub 4 | if [ "$1" == "latest" ]; then DOCKER_BUILD_TAG="beta-release"; fi 5 | docker build --build-arg BUILD_TAG=$DOCKER_BUILD_TAG --no-cache=true --tag $2/pbtc-ubuntu:$1 . 6 | docker push $2/pbtc-ubuntu:$1 7 | -------------------------------------------------------------------------------- /tools/draw_graph.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Cargo graph does not work with cargo worspaces #33 4 | # https://github.com/kbknapp/cargo-graph/issues/33 5 | # so first we need to patch Cargo.toml and remove workspace 6 | patch -R Cargo.toml tools/workspace.diff 7 | 8 | # Now let's rebuild Cargo.lock by telling cargo to update local package 9 | cargo update -p pbtc 10 | 11 | # And draw dependencies graph using cargo graph 12 | cargo graph --build-shape box --build-line-style dashed > tools/graph.dot 13 | 14 | # Let's fix graph ratio 15 | patch tools/graph.dot tools/graph_ratio.diff 16 | 17 | dot -Tsvg > tools/graph.svg tools/graph.dot 18 | 19 | # Finally let's bring back old Cargo.toml file 20 | patch Cargo.toml tools/workspace.diff 21 | 22 | # Now let's revert Cargo.lock to previous state 23 | cargo update -p pbtc 24 | -------------------------------------------------------------------------------- /tools/graph_ratio.diff: -------------------------------------------------------------------------------- 1 | diff --git tools/graph.dot tools/graph.dot 2 | index f842267..1a831d2 100644 3 | --- tools/graph.dot 4 | +++ tools/graph.dot 5 | @@ -1,4 +1,6 @@ 6 | digraph dependencies { 7 | + ratio=1.0; 8 | + size="5,5"; 9 | N0[label="pbtc",shape=box]; 10 | N1[label="app_dirs",shape=box]; 11 | N2[label="bencher",shape=box]; 12 | -------------------------------------------------------------------------------- /tools/regtests.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | ./target/release/pbtc --btc --regtest --db-cache=192 & 4 | ! java -jar ./tools/compare-tool/pull-tests-be0eef7.jar /tmp/regtest-db 2>&1 | tee regtests-full.log | grep -E --color=auto 'org.bitcoinj.store.BlockStoreException\:|BitcoindComparisonTool.main\: ERROR|bitcoind sent us a block it already had, make sure bitcoind has no blocks!|java.lang.NullPointerException' 5 | GREP_COLOR="01;32" grep 'BitcoindComparisonTool.main: Block "b1001" completed processing' regtests-full.log 6 | result=$? 7 | 8 | if [ $result -eq 1 ] 9 | then 10 | echo "Regtests failed" | grep --color=auto "failed" 11 | echo "-----------------------------" 12 | echo "Full log: " 13 | cat regtests-full.log 14 | else 15 | echo "Reg tests ok, test cases: " 16 | GREP_COLOR="01;32" grep -E "BitcoindComparisonTool.main: Block \"b[0-9]*\" completed processing" regtests-full.log 17 | fi 18 | 19 | pkill -f ./target/release/pbtc 20 | exit "$result" 21 | 22 | -------------------------------------------------------------------------------- /tools/workspace.diff: -------------------------------------------------------------------------------- 1 | diff --git Cargo.toml Cargo.toml 2 | index fca51d5..6a16dd6 100644 3 | --- Cargo.toml 4 | +++ Cargo.toml 5 | @@ -24,3 +24,6 @@ import = { path = "import" } 6 | [[bin]] 7 | path = "pbtc/main.rs" 8 | name = "pbtc" 9 | + 10 | +[workspace] 11 | +members = ["bencher"] 12 | -------------------------------------------------------------------------------- /verification/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "verification" 3 | version = "0.1.0" 4 | authors = ["Nikolay Volf "] 5 | 6 | [dependencies] 7 | time = "0.1" 8 | lazy_static = "1.0" 9 | log = "0.4" 10 | rayon = "1.0" 11 | parking_lot = "0.4" 12 | primitives = { path = "../primitives" } 13 | chain = { path = "../chain" } 14 | serialization = { path = "../serialization" } 15 | script = { path = "../script" } 16 | network = { path = "../network" } 17 | storage = { path = "../storage" } 18 | bitcrypto = { path = "../crypto" } 19 | 20 | [dev-dependencies] 21 | test-data = { path = "../test-data" } 22 | db = { path = "../db" } 23 | -------------------------------------------------------------------------------- /verification/src/accept_chain.rs: -------------------------------------------------------------------------------- 1 | use rayon::prelude::{IntoParallelRefIterator, IndexedParallelIterator, ParallelIterator}; 2 | use storage::Store; 3 | use network::ConsensusParams; 4 | use error::Error; 5 | use canon::CanonBlock; 6 | use accept_block::BlockAcceptor; 7 | use accept_header::HeaderAcceptor; 8 | use accept_transaction::TransactionAcceptor; 9 | use deployments::BlockDeployments; 10 | use duplex_store::DuplexTransactionOutputProvider; 11 | use VerificationLevel; 12 | 13 | pub struct ChainAcceptor<'a> { 14 | pub block: BlockAcceptor<'a>, 15 | pub header: HeaderAcceptor<'a>, 16 | pub transactions: Vec>, 17 | } 18 | 19 | impl<'a> ChainAcceptor<'a> { 20 | pub fn new(store: &'a Store, consensus: &'a ConsensusParams, verification_level: VerificationLevel, block: CanonBlock<'a>, height: u32, median_time_past: u32, deployments: &'a BlockDeployments) -> Self { 21 | trace!(target: "verification", "Block verification {}", block.hash().to_reversed_str()); 22 | let output_store = DuplexTransactionOutputProvider::new(store.as_transaction_output_provider(), block.raw()); 23 | let headers = store.as_block_header_provider(); 24 | 25 | ChainAcceptor { 26 | block: BlockAcceptor::new(store.as_transaction_output_provider(), consensus, block, height, median_time_past, deployments, headers), 27 | header: HeaderAcceptor::new(headers, consensus, block.header(), height, deployments), 28 | transactions: block.transactions() 29 | .into_iter() 30 | .enumerate() 31 | .map(|(tx_index, tx)| TransactionAcceptor::new( 32 | store.as_transaction_meta_provider(), 33 | output_store, 34 | consensus, 35 | tx, 36 | verification_level, 37 | block.hash(), 38 | height, 39 | block.header.raw.time, 40 | median_time_past, 41 | tx_index, 42 | deployments, 43 | )) 44 | .collect(), 45 | } 46 | } 47 | 48 | pub fn check(&self) -> Result<(), Error> { 49 | try!(self.block.check()); 50 | try!(self.header.check()); 51 | try!(self.check_transactions()); 52 | Ok(()) 53 | } 54 | 55 | fn check_transactions(&self) -> Result<(), Error> { 56 | self.transactions.par_iter() 57 | .enumerate() 58 | .fold(|| Ok(()), |result, (index, tx)| result.and_then(|_| tx.check().map_err(|err| Error::Transaction(index, err)))) 59 | .reduce(|| Ok(()), |acc, check| acc.and(check)) 60 | } 61 | } 62 | -------------------------------------------------------------------------------- /verification/src/canon.rs: -------------------------------------------------------------------------------- 1 | use std::ops; 2 | use primitives::hash::H256; 3 | use chain::{IndexedBlock, IndexedTransaction, IndexedBlockHeader}; 4 | 5 | /// Blocks whose parents are known to be in the chain 6 | #[derive(Clone, Copy)] 7 | pub struct CanonBlock<'a> { 8 | block: &'a IndexedBlock, 9 | } 10 | 11 | impl<'a> CanonBlock<'a> { 12 | pub fn new(block: &'a IndexedBlock) -> Self { 13 | CanonBlock { 14 | block: block, 15 | } 16 | } 17 | 18 | pub fn hash<'b>(&'b self) -> &'a H256 where 'a: 'b { 19 | &self.block.header.hash 20 | } 21 | 22 | pub fn raw<'b>(&'b self) -> &'a IndexedBlock where 'a: 'b { 23 | self.block 24 | } 25 | 26 | pub fn header<'b>(&'b self) -> CanonHeader<'a> where 'a: 'b { 27 | CanonHeader::new(&self.block.header) 28 | } 29 | 30 | pub fn transactions<'b>(&'b self) -> Vec> where 'a: 'b { 31 | self.block.transactions.iter().map(CanonTransaction::new).collect() 32 | } 33 | } 34 | 35 | impl<'a> ops::Deref for CanonBlock<'a> { 36 | type Target = IndexedBlock; 37 | 38 | fn deref(&self) -> &Self::Target { 39 | self.block 40 | } 41 | } 42 | 43 | #[derive(Clone, Copy)] 44 | pub struct CanonHeader<'a> { 45 | header: &'a IndexedBlockHeader, 46 | } 47 | 48 | impl<'a> CanonHeader<'a> { 49 | pub fn new(header: &'a IndexedBlockHeader) -> Self { 50 | CanonHeader { 51 | header: header, 52 | } 53 | } 54 | } 55 | 56 | impl<'a> ops::Deref for CanonHeader<'a> { 57 | type Target = IndexedBlockHeader; 58 | 59 | fn deref(&self) -> &Self::Target { 60 | self.header 61 | } 62 | } 63 | 64 | #[derive(Clone, Copy)] 65 | pub struct CanonTransaction<'a> { 66 | transaction: &'a IndexedTransaction, 67 | } 68 | 69 | impl<'a> CanonTransaction<'a> { 70 | pub fn new(transaction: &'a IndexedTransaction) -> Self { 71 | CanonTransaction { 72 | transaction: transaction, 73 | } 74 | } 75 | } 76 | 77 | impl<'a> ops::Deref for CanonTransaction<'a> { 78 | type Target = IndexedTransaction; 79 | 80 | fn deref(&self) -> &Self::Target { 81 | self.transaction 82 | } 83 | } 84 | -------------------------------------------------------------------------------- /verification/src/constants.rs: -------------------------------------------------------------------------------- 1 | //! Consenus constants 2 | 3 | pub const BLOCK_MAX_FUTURE: i64 = 2 * 60 * 60; // 2 hours 4 | //pub const COINBASE_MATURITY: u32 = 100; // 2 hours 5 | pub const COINBASE_MATURITY: u32 = 0; // 2 hours 6 | pub const MIN_COINBASE_SIZE: usize = 2; 7 | pub const MAX_COINBASE_SIZE: usize = 100; 8 | 9 | pub const RETARGETING_FACTOR: u32 = 4; 10 | pub const TARGET_SPACING_SECONDS: u32 = 10 * 60; 11 | //pub const DOUBLE_SPACING_SECONDS: u32 = 2 * TARGET_SPACING_SECONDS; 12 | pub const DOUBLE_SPACING_SECONDS: u32 = TARGET_SPACING_SECONDS / 10; 13 | pub const TARGET_TIMESPAN_SECONDS: u32 = 2 * 7 * 24 * 60 * 60; 14 | 15 | // The upper and lower bounds for retargeting timespan 16 | pub const MIN_TIMESPAN: u32 = TARGET_TIMESPAN_SECONDS / RETARGETING_FACTOR; 17 | pub const MAX_TIMESPAN: u32 = TARGET_TIMESPAN_SECONDS * RETARGETING_FACTOR; 18 | 19 | // Target number of blocks, 2 weaks, 2016 20 | pub const RETARGETING_INTERVAL: u32 = TARGET_TIMESPAN_SECONDS / TARGET_SPACING_SECONDS; 21 | -------------------------------------------------------------------------------- /verification/src/duplex_store.rs: -------------------------------------------------------------------------------- 1 | //! Some transaction validation rules, 2 | //! require sophisticated (in more than one source) previous transaction lookups 3 | 4 | use chain::{OutPoint, TransactionOutput}; 5 | use storage::TransactionOutputProvider; 6 | 7 | #[derive(Clone, Copy)] 8 | pub struct DuplexTransactionOutputProvider<'a> { 9 | first: &'a TransactionOutputProvider, 10 | second: &'a TransactionOutputProvider, 11 | } 12 | 13 | impl<'a> DuplexTransactionOutputProvider<'a> { 14 | pub fn new(first: &'a TransactionOutputProvider, second: &'a TransactionOutputProvider) -> Self { 15 | DuplexTransactionOutputProvider { 16 | first: first, 17 | second: second, 18 | } 19 | } 20 | } 21 | 22 | impl<'a> TransactionOutputProvider for DuplexTransactionOutputProvider<'a> { 23 | fn transaction_output(&self, prevout: &OutPoint, transaction_index: usize) -> Option { 24 | self.first.transaction_output(prevout, transaction_index) 25 | .or_else(|| self.second.transaction_output(prevout, transaction_index)) 26 | } 27 | 28 | fn is_spent(&self, prevout: &OutPoint) -> bool { 29 | self.first.is_spent(prevout) || self.second.is_spent(prevout) 30 | } 31 | } 32 | 33 | pub struct NoopStore; 34 | 35 | impl TransactionOutputProvider for NoopStore { 36 | fn transaction_output(&self, _prevout: &OutPoint, _transaction_index: usize) -> Option { 37 | None 38 | } 39 | 40 | fn is_spent(&self, _prevout: &OutPoint) -> bool { 41 | false 42 | } 43 | } 44 | -------------------------------------------------------------------------------- /verification/src/timestamp.rs: -------------------------------------------------------------------------------- 1 | use chain::BlockHeader; 2 | use storage::{BlockHeaderProvider, BlockAncestors}; 3 | use primitives::hash::H256; 4 | 5 | /// Returns median timestamp, of given header ancestors. 6 | /// The header should be later expected to have higher timestamp 7 | /// than this median timestamp 8 | pub fn median_timestamp(header: &BlockHeader, store: &BlockHeaderProvider) -> u32 { 9 | median_timestamp_inclusive(header.previous_header_hash.clone(), store) 10 | } 11 | 12 | /// Returns median timestamp, of given header + its ancestors. 13 | /// The header should be later expected to have higher timestamp 14 | /// than this median timestamp 15 | pub fn median_timestamp_inclusive(previous_header_hash: H256, store: &BlockHeaderProvider) -> u32 { 16 | let mut timestamps: Vec<_> = BlockAncestors::new(previous_header_hash.clone().into(), store) 17 | .take(11) 18 | .map(|header| header.time) 19 | .collect(); 20 | 21 | if timestamps.is_empty() { 22 | return 0; 23 | } 24 | 25 | timestamps.sort(); 26 | timestamps[timestamps.len() / 2] 27 | } 28 | -------------------------------------------------------------------------------- /verification/src/verify_chain.rs: -------------------------------------------------------------------------------- 1 | use rayon::prelude::{IntoParallelRefIterator, IndexedParallelIterator, ParallelIterator}; 2 | use chain::IndexedBlock; 3 | use network::Network; 4 | use error::Error; 5 | use verify_block::BlockVerifier; 6 | use verify_header::HeaderVerifier; 7 | use verify_transaction::TransactionVerifier; 8 | 9 | pub struct ChainVerifier<'a> { 10 | pub block: BlockVerifier<'a>, 11 | pub header: HeaderVerifier<'a>, 12 | pub transactions: Vec>, 13 | } 14 | 15 | impl<'a> ChainVerifier<'a> { 16 | pub fn new(block: &'a IndexedBlock, network: Network, current_time: u32) -> Self { 17 | trace!(target: "verification", "Block pre-verification {}", block.hash().to_reversed_str()); 18 | ChainVerifier { 19 | block: BlockVerifier::new(block), 20 | header: HeaderVerifier::new(&block.header, network, current_time), 21 | transactions: block.transactions.iter().map(TransactionVerifier::new).collect(), 22 | } 23 | } 24 | 25 | pub fn check(&self) -> Result<(), Error> { 26 | try!(self.block.check()); 27 | try!(self.header.check()); 28 | try!(self.check_transactions()); 29 | Ok(()) 30 | } 31 | 32 | fn check_transactions(&self) -> Result<(), Error> { 33 | self.transactions.par_iter() 34 | .enumerate() 35 | .fold(|| Ok(()), |result, (index, tx)| result.and_then(|_| tx.check().map_err(|err| Error::Transaction(index, err)))) 36 | .reduce(|| Ok(()), |acc, check| acc.and(check)) 37 | } 38 | } 39 | -------------------------------------------------------------------------------- /verification/src/verify_header.rs: -------------------------------------------------------------------------------- 1 | use primitives::compact::Compact; 2 | use chain::IndexedBlockHeader; 3 | use network::Network; 4 | use work::is_valid_proof_of_work; 5 | use error::Error; 6 | use constants::BLOCK_MAX_FUTURE; 7 | 8 | pub struct HeaderVerifier<'a> { 9 | pub proof_of_work: HeaderProofOfWork<'a>, 10 | pub timestamp: HeaderTimestamp<'a>, 11 | } 12 | 13 | impl<'a> HeaderVerifier<'a> { 14 | pub fn new(header: &'a IndexedBlockHeader, network: Network, current_time: u32) -> Self { 15 | HeaderVerifier { 16 | proof_of_work: HeaderProofOfWork::new(header, network), 17 | timestamp: HeaderTimestamp::new(header, current_time, BLOCK_MAX_FUTURE as u32), 18 | } 19 | } 20 | 21 | pub fn check(&self) -> Result<(), Error> { 22 | try!(self.proof_of_work.check()); 23 | try!(self.timestamp.check()); 24 | Ok(()) 25 | } 26 | } 27 | 28 | pub struct HeaderProofOfWork<'a> { 29 | header: &'a IndexedBlockHeader, 30 | max_work_bits: Compact, 31 | } 32 | 33 | impl<'a> HeaderProofOfWork<'a> { 34 | fn new(header: &'a IndexedBlockHeader, network: Network) -> Self { 35 | HeaderProofOfWork { 36 | header: header, 37 | max_work_bits: network.max_bits().into(), 38 | } 39 | } 40 | 41 | fn check(&self) -> Result<(), Error> { 42 | if is_valid_proof_of_work(self.max_work_bits, self.header.raw.bits, &self.header.hash) { 43 | Ok(()) 44 | } else { 45 | Err(Error::Pow) 46 | } 47 | } 48 | } 49 | 50 | pub struct HeaderTimestamp<'a> { 51 | header: &'a IndexedBlockHeader, 52 | current_time: u32, 53 | max_future: u32, 54 | } 55 | 56 | impl<'a> HeaderTimestamp<'a> { 57 | fn new(header: &'a IndexedBlockHeader, current_time: u32, max_future: u32) -> Self { 58 | HeaderTimestamp { 59 | header: header, 60 | current_time: current_time, 61 | max_future: max_future, 62 | } 63 | } 64 | 65 | fn check(&self) -> Result<(), Error> { 66 | if self.header.raw.time > self.current_time + self.max_future { 67 | Err(Error::FuturisticTimestamp) 68 | } else { 69 | Ok(()) 70 | } 71 | } 72 | } 73 | --------------------------------------------------------------------------------