├── clippy.toml ├── images ├── culture.jpeg └── pulsar-prompt.png ├── pulsar ├── src │ ├── commands.rs │ ├── commands │ │ ├── info.rs │ │ ├── wipe.rs │ │ └── init.rs │ ├── tests.rs │ ├── main.rs │ ├── summary.rs │ └── config.rs ├── build.rs └── Cargo.toml ├── rust-toolchain.toml ├── .gitignore ├── sdk ├── node │ ├── src │ │ ├── domains │ │ │ ├── mod.rs │ │ │ ├── domain_node.rs │ │ │ ├── utils.rs │ │ │ ├── domain.rs │ │ │ ├── domain_instance_starter.rs │ │ │ └── evm_chain_spec.rs │ │ └── builder.rs │ └── Cargo.toml ├── farmer │ ├── build.rs │ └── Cargo.toml ├── substrate │ ├── build.rs │ ├── Cargo.toml │ └── src │ │ ├── types.rs │ │ └── lib.rs ├── subspace-sdk │ ├── tests │ │ └── integration │ │ │ ├── main.rs │ │ │ ├── domains.rs │ │ │ ├── farmer.rs │ │ │ ├── common.rs │ │ │ └── node.rs │ ├── Cargo.toml │ ├── examples │ │ ├── simple.rs │ │ ├── complete.rs │ │ ├── sync.rs │ │ └── mini-farmer.rs │ └── src │ │ └── lib.rs ├── dsn │ ├── src │ │ ├── lib.rs │ │ ├── local_provider_record_utils.rs │ │ └── builder.rs │ └── Cargo.toml ├── traits │ ├── Cargo.toml │ └── src │ │ └── lib.rs ├── README.md └── utils │ └── Cargo.toml ├── rustfmt.toml ├── .cargo └── config.toml ├── .github ├── workflows │ ├── Entitlements.plist │ ├── rustdoc.yml │ ├── release-notifier.yml │ ├── ci-tests.yml │ └── release.yml └── ISSUE_TEMPLATE │ ├── feature_request.yml │ └── bug_report.yml ├── CODE_OF_CONDUCT.md ├── README.md └── Cargo.toml /clippy.toml: -------------------------------------------------------------------------------- 1 | allow-unwrap-in-tests = true 2 | -------------------------------------------------------------------------------- /images/culture.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomys/pulsar/HEAD/images/culture.jpeg -------------------------------------------------------------------------------- /images/pulsar-prompt.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomys/pulsar/HEAD/images/pulsar-prompt.png -------------------------------------------------------------------------------- /pulsar/src/commands.rs: -------------------------------------------------------------------------------- 1 | pub(crate) mod farm; 2 | pub(crate) mod info; 3 | pub(crate) mod init; 4 | pub(crate) mod wipe; 5 | -------------------------------------------------------------------------------- /rust-toolchain.toml: -------------------------------------------------------------------------------- 1 | [toolchain] 2 | channel = "nightly-2024-01-19" 3 | components = ["rust-src"] 4 | targets = ["wasm32-unknown-unknown"] 5 | profile = "default" 6 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /target/ 2 | /.idea 3 | .DS_Store 4 | .thumbs.db 5 | .vscode 6 | .subspaceFarmer 7 | 8 | # These are backup files generated by rustfmt 9 | **/*.rs.bk 10 | -------------------------------------------------------------------------------- /sdk/node/src/domains/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod builder; 2 | pub mod domain; 3 | pub mod domain_instance_starter; 4 | pub mod domain_node; 5 | pub mod evm_chain_spec; 6 | pub mod utils; 7 | -------------------------------------------------------------------------------- /pulsar/build.rs: -------------------------------------------------------------------------------- 1 | fn main() { 2 | let output = std::process::Command::new("git").args(["rev-parse", "HEAD"]).output().unwrap(); 3 | let git_hash = String::from_utf8(output.stdout).unwrap(); 4 | println!("cargo:rustc-env=GIT_HASH={git_hash}"); 5 | } 6 | -------------------------------------------------------------------------------- /sdk/farmer/build.rs: -------------------------------------------------------------------------------- 1 | fn main() { 2 | let output = std::process::Command::new("git").args(["rev-parse", "HEAD"]).output().unwrap(); 3 | let git_hash = String::from_utf8(output.stdout).unwrap(); 4 | println!("cargo:rustc-env=GIT_HASH={git_hash}"); 5 | } 6 | -------------------------------------------------------------------------------- /sdk/substrate/build.rs: -------------------------------------------------------------------------------- 1 | fn main() { 2 | let output = std::process::Command::new("git").args(["rev-parse", "HEAD"]).output().unwrap(); 3 | let git_hash = String::from_utf8(output.stdout).unwrap(); 4 | println!("cargo:rustc-env=GIT_HASH={git_hash}"); 5 | } 6 | -------------------------------------------------------------------------------- /rustfmt.toml: -------------------------------------------------------------------------------- 1 | edition = "2021" 2 | format_code_in_doc_comments = true 3 | format_macro_bodies = true 4 | format_macro_matchers = true 5 | format_strings = true 6 | imports_granularity = "Module" 7 | match_arm_blocks = false 8 | reorder_impl_items = true 9 | group_imports = "StdExternalCrate" 10 | use_field_init_shorthand = true 11 | use_small_heuristics = "Max" 12 | wrap_comments = true 13 | -------------------------------------------------------------------------------- /sdk/node/src/domains/domain_node.rs: -------------------------------------------------------------------------------- 1 | use derivative::Derivative; 2 | use sdk_utils::DestructorSet; 3 | use tokio::sync::oneshot; 4 | 5 | #[derive(Derivative)] 6 | #[derivative(Debug)] 7 | #[must_use = "Domain node should be closed"] 8 | pub struct DomainNode { 9 | pub domain_worker_result_receiver: oneshot::Receiver>, 10 | pub _destructors: DestructorSet, 11 | } 12 | -------------------------------------------------------------------------------- /sdk/subspace-sdk/tests/integration/main.rs: -------------------------------------------------------------------------------- 1 | pub mod common; 2 | #[cfg(all(feature = "core-payments", feature = "executor"))] 3 | mod domains; 4 | mod farmer; 5 | mod node; 6 | 7 | #[global_allocator] 8 | static GLOBAL: mimalloc::MiMalloc = mimalloc::MiMalloc; 9 | 10 | #[test] 11 | fn pubkey_parse() { 12 | "5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY".parse::().unwrap(); 13 | } 14 | -------------------------------------------------------------------------------- /.cargo/config.toml: -------------------------------------------------------------------------------- 1 | [target.'cfg(target_arch = "x64_64")'] 2 | # Require AES-NI on x86-64 by default 3 | rustflags = "-C target-feature=+aes" 4 | 5 | [target.'cfg(target_arch = "aarch64")'] 6 | # TODO: Try to remove once https://github.com/paritytech/substrate/issues/11538 is resolved 7 | # TODO: AES flag is such that we have decent performance on ARMv8, remove once `aes` crate bumps MSRV to at least 8 | # 1.61: https://github.com/RustCrypto/block-ciphers/issues/373 9 | rustflags = "--cfg aes_armv8" 10 | -------------------------------------------------------------------------------- /sdk/node/src/domains/utils.rs: -------------------------------------------------------------------------------- 1 | pub use evm_domain_runtime::AccountId as AccountId20; 2 | use sp_core::crypto::AccountId32; 3 | use sp_core::{ByteArray, H160}; 4 | use sp_runtime::traits::Convert; 5 | 6 | pub struct AccountId32ToAccountId20Converter; 7 | 8 | impl Convert for AccountId32ToAccountId20Converter { 9 | fn convert(acc: AccountId32) -> AccountId20 { 10 | // Using the full hex key, truncating to the first 20 bytes (the first 40 hex 11 | // chars) 12 | H160::from_slice(&acc.as_slice()[0..20]).into() 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /sdk/dsn/src/lib.rs: -------------------------------------------------------------------------------- 1 | //! Crate with DSN shared between sdk farmer and sdk node 2 | 3 | #![warn( 4 | missing_docs, 5 | clippy::dbg_macro, 6 | clippy::unwrap_used, 7 | clippy::disallowed_types, 8 | unused_features 9 | )] 10 | #![cfg_attr(not(test), warn(unused_crate_dependencies))] 11 | #![feature(concat_idents, const_option)] 12 | 13 | mod builder; 14 | mod local_provider_record_utils; 15 | 16 | pub use builder::*; 17 | use subspace_farmer::piece_cache::PieceCache as FarmerPieceCache; 18 | use tracing::warn; 19 | 20 | /// A record provider that uses farmer piece cache underneath 21 | pub type LocalRecordProvider = 22 | local_provider_record_utils::MaybeLocalRecordProvider; 23 | -------------------------------------------------------------------------------- /.github/workflows/Entitlements.plist: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | com.apple.security.network.server 7 | 8 | 9 | 10 | com.apple.security.network.client 11 | 12 | 13 | 14 | com.apple.security.files.user-selected.read-write 15 | 16 | 17 | 18 | com.apple.security.cs.allow-unsigned-executable-memory 19 | 20 | 21 | 22 | -------------------------------------------------------------------------------- /sdk/traits/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "sdk-traits" 3 | version = "0.1.0" 4 | edition = "2021" 5 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 6 | 7 | [dependencies] 8 | async-trait = "0.1" 9 | parking_lot = "0.12" 10 | sc-client-api = { version = "4.0.0-dev", git = "https://github.com/subspace/polkadot-sdk", rev = "d6b500960579d73c43fc4ef550b703acfa61c4c8" } 11 | sdk-dsn = { path = "../dsn" } 12 | subspace-core-primitives = { git = "https://github.com/subspace/subspace", rev = "a2e0318a75705ab31975ac8b172c999cab378e90" } 13 | subspace-farmer = { git = "https://github.com/subspace/subspace", rev = "a2e0318a75705ab31975ac8b172c999cab378e90", default-features = false } 14 | subspace-proof-of-space = { git = "https://github.com/subspace/subspace", rev = "a2e0318a75705ab31975ac8b172c999cab378e90" } 15 | 16 | -------------------------------------------------------------------------------- /.github/workflows/rustdoc.yml: -------------------------------------------------------------------------------- 1 | name: rustdoc 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | merge_group: 8 | types: [checks_requested] 9 | 10 | env: 11 | CARGO_INCREMENTAL: 0 12 | CARGO_NET_RETRY: 10 13 | RUSTUP_MAX_RETRIES: 10 14 | 15 | jobs: 16 | rustdoc: 17 | runs-on: ubuntu-latest 18 | 19 | steps: 20 | - name: Checkout repository 21 | uses: actions/checkout@v2 22 | 23 | - name: Install Protoc 24 | uses: arduino/setup-protoc@v1 25 | 26 | - name: Build docs 27 | uses: actions-rs/cargo@ae10961054e4aa8b4aa7dffede299aaf087aa33b # @v1.0.1 28 | with: 29 | command: doc 30 | args: --all --no-deps 31 | 32 | - name: Deploy Docs 33 | uses: JamesIves/github-pages-deploy-action@releases/v3 34 | with: 35 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 36 | BRANCH: gh-pages 37 | FOLDER: target/doc 38 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.yml: -------------------------------------------------------------------------------- 1 | name: 🎁 Feature Request 2 | description: If you have a great thing on your mind ⚡️ 3 | title: "🎁 [Feature Request]: " 4 | labels: ["enhancement"] 5 | assignees: 6 | - ozgunozerk 7 | body: 8 | - type: markdown 9 | attributes: 10 | value: | 11 | Thanks for taking the time to fill this feature request! 12 | - type: textarea 13 | id: feature 14 | attributes: 15 | label: What is the feature you would like to see? 16 | description: please explain all the details :) 17 | validations: 18 | required: true 19 | - type: checkboxes 20 | id: terms 21 | attributes: 22 | label: Code of Conduct 23 | description: By submitting this issue, you agree to follow our [Code of Conduct](https://github.com/subspace/pulsar/blob/main/CODE_OF_CONDUCT.md) # TODO: fix this link 24 | options: 25 | - label: I agree to follow this project's Code of Conduct 26 | required: true 27 | -------------------------------------------------------------------------------- /sdk/dsn/src/local_provider_record_utils.rs: -------------------------------------------------------------------------------- 1 | use std::sync::Arc; 2 | 3 | use derivative::Derivative; 4 | use parking_lot::RwLock; 5 | use subspace_networking::libp2p::kad::{ProviderRecord, RecordKey}; 6 | use subspace_networking::LocalRecordProvider; 7 | 8 | #[derive(Derivative)] 9 | #[derivative(Debug)] 10 | pub struct MaybeLocalRecordProvider { 11 | #[derivative(Debug = "ignore")] 12 | inner: Arc>>, 13 | } 14 | 15 | impl Clone for MaybeLocalRecordProvider { 16 | fn clone(&self) -> Self { 17 | Self { inner: Arc::clone(&self.inner) } 18 | } 19 | } 20 | 21 | impl MaybeLocalRecordProvider { 22 | pub fn new(inner: Arc>>) -> Self { 23 | Self { inner } 24 | } 25 | } 26 | 27 | impl LocalRecordProvider for MaybeLocalRecordProvider { 28 | fn record(&self, key: &RecordKey) -> Option { 29 | self.inner.read().as_ref().map(|v| v.record(key)).unwrap_or(None) 30 | } 31 | } 32 | -------------------------------------------------------------------------------- /sdk/subspace-sdk/tests/integration/domains.rs: -------------------------------------------------------------------------------- 1 | use futures::prelude::*; 2 | use sdk_utils::ByteSize; 3 | 4 | use crate::common::{Farmer, Node}; 5 | 6 | #[tokio::test(flavor = "multi_thread")] 7 | async fn core_start() { 8 | crate::common::setup(); 9 | 10 | let number_of_sectors = 10; 11 | let pieces_in_sector = 50u16; 12 | let sector_size = subspace_farmer_components::sector::sector_size(pieces_in_sector as _); 13 | let space_pledged = sector_size * number_of_sectors; 14 | 15 | let node = Node::dev().enable_core(true).build().await; 16 | let farmer = Farmer::dev() 17 | .pieces_in_sector(pieces_in_sector) 18 | .build(&node, ByteSize::b(space_pledged as u64)) 19 | .await; 20 | 21 | node.system_domain() 22 | .unwrap() 23 | .payments() 24 | .unwrap() 25 | .subscribe_new_heads() 26 | .await 27 | .unwrap() 28 | .next() 29 | .await 30 | .unwrap(); 31 | 32 | farmer.close().await; 33 | node.close().await; 34 | } 35 | -------------------------------------------------------------------------------- /sdk/subspace-sdk/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "subspace-sdk" 3 | version = "0.1.0" 4 | edition = "2021" 5 | 6 | [dependencies] 7 | sdk-dsn = { path = "../dsn" } 8 | sdk-farmer = { path = "../farmer", default-features = false } 9 | sdk-node = { path = "../node" } 10 | sdk-substrate = { path = "../substrate" } 11 | sdk-utils = { path = "../utils" } 12 | static_assertions = "1.1.0" 13 | 14 | subspace-proof-of-space = { git = "https://github.com/subspace/subspace", rev = "a2e0318a75705ab31975ac8b172c999cab378e90" } 15 | 16 | [dev-dependencies] 17 | anyhow = "1" 18 | clap = { version = "4", features = ["derive"] } 19 | derive_builder = "0.12" 20 | derive_more = "0.99" 21 | fdlimit = "0.2" 22 | futures = "0.3" 23 | mimalloc = { version = "*", default-features = false } 24 | serde_json = "1" 25 | subspace-farmer-components = { git = "https://github.com/subspace/subspace", rev = "a2e0318a75705ab31975ac8b172c999cab378e90" } 26 | tempfile = "3" 27 | tokio = { version = "1.34.0", features = ["rt-multi-thread", "macros"] } 28 | tracing = "0.1" 29 | tracing-futures = "0.2" 30 | tracing-subscriber = "0.3" 31 | 32 | 33 | [target.'cfg(tokio_unstable)'.dev-dependencies] 34 | console-subscriber = "0.1" 35 | 36 | [features] 37 | default = ["numa"] 38 | numa = [ 39 | "sdk-farmer/numa", 40 | ] 41 | -------------------------------------------------------------------------------- /sdk/dsn/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "sdk-dsn" 3 | version = "0.1.0" 4 | edition = "2021" 5 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 6 | 7 | [dependencies] 8 | anyhow = "1" 9 | derivative = "2.2.0" 10 | derive_builder = "0.12" 11 | derive_more = "0.99" 12 | futures = "0.3" 13 | hex = "0.4.3" 14 | parking_lot = "0.12" 15 | prometheus-client = "0.22.0" 16 | sc-client-api = { version = "4.0.0-dev", git = "https://github.com/subspace/polkadot-sdk", rev = "d6b500960579d73c43fc4ef550b703acfa61c4c8" } 17 | sc-consensus-subspace = { git = "https://github.com/subspace/subspace", rev = "a2e0318a75705ab31975ac8b172c999cab378e90" } 18 | sdk-utils = { path = "../utils" } 19 | serde = { version = "1", features = ["derive"] } 20 | sp-blockchain = { version = "4.0.0-dev", git = "https://github.com/subspace/polkadot-sdk", rev = "d6b500960579d73c43fc4ef550b703acfa61c4c8" } 21 | sp-runtime = { version = "24.0.0", git = "https://github.com/subspace/polkadot-sdk", rev = "d6b500960579d73c43fc4ef550b703acfa61c4c8" } 22 | subspace-farmer = { git = "https://github.com/subspace/subspace", rev = "a2e0318a75705ab31975ac8b172c999cab378e90", default-features = false } 23 | subspace-networking = { git = "https://github.com/subspace/subspace", rev = "a2e0318a75705ab31975ac8b172c999cab378e90" } 24 | tracing = "0.1" 25 | -------------------------------------------------------------------------------- /sdk/subspace-sdk/examples/simple.rs: -------------------------------------------------------------------------------- 1 | use std::num::NonZeroU8; 2 | 3 | use futures::prelude::*; 4 | 5 | #[tokio::main] 6 | async fn main() { 7 | tracing_subscriber::fmt().init(); 8 | let plots = [subspace_sdk::FarmDescription::new("plot", subspace_sdk::ByteSize::mb(100))]; 9 | let node = subspace_sdk::Node::builder() 10 | .force_authoring(true) 11 | .role(subspace_sdk::node::Role::Authority) 12 | // Starting a new chain 13 | .build("node", subspace_sdk::chain_spec::dev_config()) 14 | .await 15 | .unwrap(); 16 | 17 | let farmer = subspace_sdk::Farmer::builder() 18 | .build( 19 | subspace_sdk::PublicKey::from([0; 32]), 20 | &node, 21 | &plots, 22 | NonZeroU8::new(1).expect("Static value should not fail; qed"), 23 | ) 24 | .await 25 | .expect("Failed to init a farmer"); 26 | 27 | for plot in farmer.iter_farms().await { 28 | let mut plotting_progress = plot.subscribe_initial_plotting_progress().await; 29 | while plotting_progress.next().await.is_some() {} 30 | } 31 | tracing::info!("Initial plotting completed"); 32 | 33 | node.subscribe_new_heads() 34 | .await 35 | .unwrap() 36 | // Wait 10 blocks and exit 37 | .take(10) 38 | .for_each(|header| async move { tracing::info!(?header, "New block!") }) 39 | .await; 40 | } 41 | -------------------------------------------------------------------------------- /pulsar/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "pulsar" 3 | version = "0.8.0" 4 | edition = "2021" 5 | 6 | [dependencies] 7 | anyhow = "1" 8 | async-stream = "0.3.5" 9 | bytesize = "1.1" 10 | bytesize-serde = "0.2" 11 | clap = { version = "4.1.1", features = ["derive"] } 12 | color-eyre = "0.6.2" 13 | crossterm = "0.26.1" 14 | derivative = "2.2.0" 15 | derive_more = "0.99.17" 16 | dirs = "4.0.0" 17 | fdlimit = "0.2" 18 | futures = "0.3" 19 | indicatif = { version = "0.17.1", features = ["improved_unicode"] } 20 | libp2p-core = "0.38" 21 | mimalloc = { version = "*", default-features = false } 22 | num-rational = "0.4.1" 23 | num-traits = "0.2.14" 24 | open = "4.0.2" 25 | owo-colors = "3.5.0" 26 | rand = "0.8.5" 27 | serde = "1" 28 | serde_derive = "1" 29 | single-instance = "0.3.3" 30 | sp-core = { version = "21.0.0", git = "https://github.com/subspace/polkadot-sdk", rev = "d6b500960579d73c43fc4ef550b703acfa61c4c8", features = ["full_crypto"] } 31 | strum = "0.24.1" 32 | strum_macros = "0.24.3" 33 | subspace-sdk = { path = "../sdk/subspace-sdk", default-features = false } 34 | thiserror = "1" 35 | tokio = { version = "1.34.0", features = ["macros", "rt-multi-thread", "tracing", "signal"] } 36 | toml = "0.7" 37 | tracing = "0.1.37" 38 | tracing-appender = "0.2" 39 | tracing-bunyan-formatter = "0.3.4" 40 | tracing-error = "0.2.0" 41 | tracing-subscriber = { version = "0.3.16", features = ["env-filter"] } 42 | whoami = "1" 43 | zeroize = "1.6.0" 44 | 45 | [dev-dependencies] 46 | rand = "0.8.5" 47 | 48 | [features] 49 | default = [ 50 | "numa" 51 | ] 52 | numa = [ 53 | "subspace-sdk/numa" 54 | ] 55 | -------------------------------------------------------------------------------- /sdk/traits/src/lib.rs: -------------------------------------------------------------------------------- 1 | //! Crate with interfaces for SDK 2 | 3 | #![warn( 4 | missing_docs, 5 | clippy::dbg_macro, 6 | clippy::unwrap_used, 7 | clippy::disallowed_types, 8 | unused_features 9 | )] 10 | #![cfg_attr(not(test), warn(unused_crate_dependencies))] 11 | 12 | use subspace_farmer::piece_cache::PieceCache as FarmerPieceCache; 13 | 14 | /// Trait which abstracts farmer for node 15 | #[async_trait::async_trait] 16 | pub trait Farmer { 17 | /// Proof of space table 18 | type Table: subspace_proof_of_space::Table; 19 | 20 | /// Fetch piece by its hash 21 | async fn get_piece_by_index( 22 | piece_index: subspace_core_primitives::PieceIndex, 23 | piece_cache: &FarmerPieceCache, 24 | weak_readers_and_pieces: &std::sync::Weak< 25 | parking_lot::Mutex< 26 | Option, 27 | >, 28 | >, 29 | ) -> Option; 30 | } 31 | 32 | /// Trait which abstracts node for farmer 33 | pub trait Node { 34 | /// Client for aux store for DSN 35 | type Client: sc_client_api::AuxStore + Send + Sync + 'static; 36 | /// Proof of space table type 37 | type Table: subspace_proof_of_space::Table; 38 | /// Rpc implementation 39 | type Rpc: subspace_farmer::node_client::NodeClient + Clone; 40 | 41 | /// Node name in telemetry 42 | fn name(&self) -> &str; 43 | /// Shared dsn configuration 44 | fn dsn(&self) -> &sdk_dsn::DsnShared; 45 | /// Rpc 46 | fn rpc(&self) -> &Self::Rpc; 47 | } 48 | -------------------------------------------------------------------------------- /sdk/substrate/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "sdk-substrate" 3 | version = "0.1.0" 4 | edition = "2021" 5 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 6 | 7 | [dependencies] 8 | bytesize = "1.1" 9 | derivative = "2.2.0" 10 | derive_builder = "0.12" 11 | derive_more = "0.99" 12 | names = { version = "0.14.0", default-features = false } 13 | sc-chain-spec = { version = "4.0.0-dev", git = "https://github.com/subspace/polkadot-sdk", rev = "d6b500960579d73c43fc4ef550b703acfa61c4c8" } 14 | sc-informant = { version = "0.10.0-dev", git = "https://github.com/subspace/polkadot-sdk", rev = "d6b500960579d73c43fc4ef550b703acfa61c4c8" } 15 | sc-network = { version = "0.10.0-dev", git = "https://github.com/subspace/polkadot-sdk", rev = "d6b500960579d73c43fc4ef550b703acfa61c4c8" } 16 | sc-service = { version = "0.10.0-dev", git = "https://github.com/subspace/polkadot-sdk", rev = "d6b500960579d73c43fc4ef550b703acfa61c4c8", default-features = false } 17 | sc-state-db = { version = "0.10.0-dev", git = "https://github.com/subspace/polkadot-sdk", rev = "d6b500960579d73c43fc4ef550b703acfa61c4c8" } 18 | sc-storage-monitor = { version = "0.1.0", git = "https://github.com/subspace/polkadot-sdk", rev = "d6b500960579d73c43fc4ef550b703acfa61c4c8", default-features = false } 19 | sdk-utils = { path = "../utils" } 20 | serde = { version = "1", features = ["derive"] } 21 | sp-runtime = { version = "24.0.0", git = "https://github.com/subspace/polkadot-sdk", rev = "d6b500960579d73c43fc4ef550b703acfa61c4c8" } 22 | subspace-service = { git = "https://github.com/subspace/subspace", rev = "a2e0318a75705ab31975ac8b172c999cab378e90" } 23 | -------------------------------------------------------------------------------- /sdk/node/src/domains/domain.rs: -------------------------------------------------------------------------------- 1 | use std::sync::Arc; 2 | 3 | use derivative::Derivative; 4 | use sc_service::RpcHandlers; 5 | use sdk_utils::{DestructorSet, TaskOutput}; 6 | 7 | /// Progress of Domain 8 | #[derive(Derivative)] 9 | #[derivative(Debug)] 10 | pub enum DomainBuildingProgress { 11 | Default, 12 | BuildingStarted, 13 | Bootstrapped, 14 | PreparingToStart, 15 | Starting, 16 | } 17 | 18 | /// Domain structure 19 | #[derive(Derivative)] 20 | #[derivative(Debug)] 21 | #[must_use = "Domain should be closed"] 22 | pub struct Domain { 23 | #[doc(hidden)] 24 | pub _destructors: DestructorSet, 25 | /// Rpc Handlers for Domain node 26 | #[derivative(Debug = "ignore")] 27 | pub rpc_handlers: Arc>>, 28 | /// Domain building progress tracker 29 | pub current_building_progress: Arc>, 30 | /// Oneshot channel to receive result of domain runner 31 | #[derivative(Debug = "ignore")] 32 | pub domain_runner_result_receiver: 33 | tokio::sync::oneshot::Receiver>>, 34 | } 35 | 36 | impl Domain { 37 | /// Shuts down domain node 38 | pub async fn close(self) -> anyhow::Result<()> { 39 | self._destructors.async_drop().await?; 40 | let output = self.domain_runner_result_receiver.await??; 41 | match output { 42 | TaskOutput::Value(_) => Ok(()), 43 | TaskOutput::Cancelled(reason) => { 44 | tracing::warn!("Domain runner task was cancelled due to reason: {}", reason); 45 | Ok(()) 46 | } 47 | } 48 | } 49 | } 50 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.yml: -------------------------------------------------------------------------------- 1 | name: 🐞 Bug Report 2 | description: If something is not working as expected...🔍 3 | title: "🐞 [Bug]: " 4 | labels: ["bug"] 5 | assignees: 6 | - ozgunozerk 7 | body: 8 | - type: markdown 9 | attributes: 10 | value: | 11 | Thanks for taking the time to fill out this bug report! 12 | - type: textarea 13 | id: what-happened 14 | attributes: 15 | label: What happened? 16 | description: if you want, you can include screenshots as well :) 17 | validations: 18 | required: true 19 | - type: input 20 | id: version 21 | attributes: 22 | label: Version 23 | description: What version of our software are you running? 24 | validations: 25 | required: true 26 | - type: checkboxes 27 | id: platform 28 | attributes: 29 | label: platform 30 | description: On which operating system did this bug emerged? 31 | options: 32 | - label: linux 33 | required: false 34 | - label: windows 35 | required: false 36 | - label: macos 37 | required: false 38 | - type: textarea 39 | id: expected 40 | attributes: 41 | label: Expected behavior 42 | description: What should have happened instead? What was the expected behavior? 43 | - type: checkboxes 44 | id: terms 45 | attributes: 46 | label: Code of Conduct 47 | description: By submitting this issue, you agree to follow our [Code of Conduct](https://github.com/subspace/pulsar/blob/main/CODE_OF_CONDUCT.md) # TODO: fix this link 48 | options: 49 | - label: I agree to follow this project's Code of Conduct 50 | required: true 51 | -------------------------------------------------------------------------------- /pulsar/src/commands/info.rs: -------------------------------------------------------------------------------- 1 | use color_eyre::eyre::{Context, Result}; 2 | use single_instance::SingleInstance; 3 | 4 | use crate::commands::farm::SINGLE_INSTANCE; 5 | use crate::summary::{Summary, SummaryFile}; 6 | 7 | /// implementation of the `init` command. 8 | /// 9 | /// informs the user about the current farming instance 10 | pub(crate) async fn info() -> Result<()> { 11 | let instance = 12 | SingleInstance::new(SINGLE_INSTANCE).context("failed to initialize single instance")?; 13 | if !instance.is_single() { 14 | println!("A farmer instance is active!"); 15 | } else { 16 | println!("There is no active farmer instance..."); 17 | } 18 | 19 | let summary_file = SummaryFile::new(None).await?; 20 | let Summary { 21 | user_space_pledged, 22 | authored_count, 23 | vote_count, 24 | total_rewards, 25 | initial_plotting_finished, 26 | last_processed_block_num: last_block_parsed, 27 | } = summary_file 28 | .parse() 29 | .await 30 | .context("couldn't parse summary file, are you sure you have ran `farm` command?")?; 31 | 32 | println!("You have pledged to the network: {user_space_pledged}"); 33 | 34 | println!("Farmed {authored_count} block(s)"); 35 | 36 | println!("Voted on {vote_count} block(s)"); 37 | 38 | println!("{total_rewards} SSC(s) earned!",); 39 | 40 | println!("This data is derived from the first {last_block_parsed} blocks in the chain!",); 41 | 42 | if initial_plotting_finished { 43 | println!("Initial plotting is finished!"); 44 | } else { 45 | println!("Initial plotting is not finished..."); 46 | } 47 | 48 | Ok(()) 49 | } 50 | -------------------------------------------------------------------------------- /sdk/README.md: -------------------------------------------------------------------------------- 1 | # Subspace-SDK 2 | 3 | 4 | 5 | A library for easily running a local Subspace node and/or farmer. 6 | 7 | ## Dependencies 8 | 9 | You'll have to have [Rust toolchain](https://rustup.rs/) installed as well as some packages in addition (Ubuntu example): 10 | ```bash 11 | sudo apt-get install build-essential llvm protobuf-compiler 12 | ``` 13 | 14 | ## Simplest example 15 | 16 | Start a node and farmer and wait for 10 blocks being farmed. 17 | 18 | ```rust 19 | use futures::prelude::*; 20 | 21 | let node = subspace_sdk::Node::builder() 22 | .force_authoring(true) 23 | .role(subspace_sdk::node::Role::Authority) 24 | // Starting a new chain 25 | .build("node", subspace_sdk::chain_spec::dev_config().unwrap()) 26 | .await 27 | .unwrap(); 28 | 29 | let plots = [subspace_sdk::PlotDescription::new("plot", bytesize::ByteSize::mb(100)).unwrap()]; 30 | let cache = subspace_sdk::farmer::CacheDescription::new("cache", bytesize::ByteSize::mb(10)).unwrap(); 31 | let farmer = subspace_sdk::Farmer::builder() 32 | .build(subspace_sdk::PublicKey::from([0; 32]), node.clone(), &plots, cache) 33 | .await 34 | .expect("Failed to init a farmer"); 35 | 36 | for plot in farmer.iter_plots().await { 37 | let mut plotting_progress = plot.subscribe_initial_plotting_progress().await; 38 | while plotting_progress.next().await.is_some() {} 39 | } 40 | tracing::info!("Initial plotting completed"); 41 | 42 | node.subscribe_new_blocks() 43 | .await 44 | .unwrap() 45 | // Wait 10 blocks and exit 46 | .take(10) 47 | .for_each(|block| async move { tracing::info!(?block, "New block!") }) 48 | .await; 49 | ``` 50 | -------------------------------------------------------------------------------- /sdk/subspace-sdk/src/lib.rs: -------------------------------------------------------------------------------- 1 | //! Subspace SDK for easy running of both Subspace node and farmer 2 | 3 | #![warn( 4 | missing_docs, 5 | clippy::dbg_macro, 6 | clippy::unwrap_used, 7 | clippy::disallowed_types, 8 | unused_features 9 | )] 10 | #![cfg_attr(not(test), warn(unused_crate_dependencies))] 11 | 12 | /// Module related to the farmer 13 | pub use sdk_farmer::{Builder as FarmerBuilder, FarmDescription, Info as FarmerInfo}; 14 | pub use sdk_node::{chain_spec, Builder as NodeBuilder, Info as NodeInfo}; 15 | pub use sdk_utils::{ByteSize, Multiaddr, MultiaddrWithPeerId, PublicKey, Ss58ParsingError}; 16 | use subspace_proof_of_space::chia::ChiaTable; 17 | 18 | static_assertions::assert_impl_all!(Node: Send, Sync); 19 | static_assertions::assert_impl_all!(Farmer: Send, Sync); 20 | static_assertions::assert_impl_all!(Farm: Send, Sync); 21 | 22 | /// Subspace farmer type 23 | pub type Farmer = sdk_farmer::Farmer; 24 | /// Subspace farmer's plot 25 | pub type Farm = sdk_farmer::Farm; 26 | /// Subspace primary node 27 | pub type Node = sdk_node::Node; 28 | 29 | /// Farmer related things located here 30 | pub mod farmer { 31 | pub use sdk_farmer::FarmDescription; 32 | 33 | pub use super::{Farm, Farmer}; 34 | } 35 | 36 | /// Node related things located here 37 | pub mod node { 38 | pub use sdk_dsn::*; 39 | pub use sdk_node::chain_spec::ChainSpec; 40 | pub use sdk_node::{ 41 | chain_spec, BlockNumber, DomainConfigBuilder, Event, Hash, RewardsEvent, SubspaceEvent, 42 | SyncingProgress, 43 | }; 44 | pub use sdk_substrate::*; 45 | 46 | pub use super::Node; 47 | } 48 | 49 | /// SDK utilities, mainly used by tests 50 | pub mod utils { 51 | pub use sdk_utils::*; 52 | } 53 | -------------------------------------------------------------------------------- /sdk/farmer/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "sdk-farmer" 3 | version = "0.1.0" 4 | edition = "2021" 5 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 6 | 7 | [dependencies] 8 | anyhow = "1" 9 | async-trait = "0.1" 10 | bytesize = "1.2.0" 11 | derivative = "2.2.0" 12 | derive_builder = "0.12" 13 | derive_more = "0.99" 14 | futures = "0.3" 15 | lru = "0.12.2" 16 | libmimalloc-sys = { version = "0.1.35", features = ["extended"] } 17 | parking_lot = "0.12" 18 | pin-project = "1" 19 | rayon = "1.7.0" 20 | sdk-traits = { path = "../traits" } 21 | sdk-utils = { path = "../utils" } 22 | serde = { version = "1", features = ["derive"] } 23 | subspace-core-primitives = { git = "https://github.com/subspace/subspace", rev = "a2e0318a75705ab31975ac8b172c999cab378e90" } 24 | subspace-erasure-coding = { git = "https://github.com/subspace/subspace", rev = "a2e0318a75705ab31975ac8b172c999cab378e90" } 25 | subspace-farmer = { git = "https://github.com/subspace/subspace", rev = "a2e0318a75705ab31975ac8b172c999cab378e90", default-features = false } 26 | subspace-farmer-components = { git = "https://github.com/subspace/subspace", rev = "a2e0318a75705ab31975ac8b172c999cab378e90" } 27 | subspace-networking = { git = "https://github.com/subspace/subspace", rev = "a2e0318a75705ab31975ac8b172c999cab378e90" } 28 | subspace-proof-of-space = { git = "https://github.com/subspace/subspace", rev = "a2e0318a75705ab31975ac8b172c999cab378e90", features = ["parallel"] } 29 | subspace-rpc-primitives = { git = "https://github.com/subspace/subspace", rev = "a2e0318a75705ab31975ac8b172c999cab378e90" } 30 | thiserror = "1" 31 | tokio = { version = "1.34.0", features = ["fs", "rt", "tracing", "macros", "parking_lot", "rt-multi-thread", "signal"] } 32 | tokio-stream = { version = "0.1", features = ["sync", "time"] } 33 | tracing = "0.1" 34 | tracing-futures = "0.2" 35 | 36 | [features] 37 | default = ["numa"] 38 | numa = [ 39 | "subspace-farmer/numa", 40 | ] 41 | -------------------------------------------------------------------------------- /sdk/subspace-sdk/examples/complete.rs: -------------------------------------------------------------------------------- 1 | use std::num::NonZeroU8; 2 | 3 | use futures::StreamExt; 4 | use subspace_sdk::node::NetworkBuilder; 5 | use subspace_sdk::{chain_spec, node, ByteSize, FarmDescription, Farmer, Node, PublicKey}; 6 | 7 | #[tokio::main] 8 | async fn main() { 9 | let plots = [FarmDescription::new("plot", ByteSize::gb(10))]; 10 | let node: Node = Node::builder() 11 | .blocks_pruning(node::BlocksPruning::Number(1000)) 12 | .state_pruning(node::PruningMode::ArchiveCanonical) 13 | .network(NetworkBuilder::new().name("i1i1")) 14 | .build("node", chain_spec::dev_config()) 15 | .await 16 | .expect("Failed to init a node"); 17 | 18 | node.sync().await.unwrap(); 19 | 20 | let reward_address = PublicKey::from([0; 32]); 21 | let farmer: Farmer = Farmer::builder() 22 | // .ws_rpc("127.0.0.1:9955".parse().unwrap()) 23 | // .listen_on("/ip4/0.0.0.0/tcp/40333".parse().unwrap()) 24 | .build( 25 | reward_address, 26 | &node, 27 | &plots, 28 | NonZeroU8::new(1).expect("Static value should not fail; qed"), 29 | ) 30 | .await 31 | .expect("Failed to init a farmer"); 32 | 33 | tokio::spawn({ 34 | let mut solutions = 35 | farmer.iter_farms().await.next().unwrap().subscribe_new_solutions().await; 36 | async move { 37 | while let Some(solution) = solutions.next().await { 38 | eprintln!("Found solution: {solution:?}"); 39 | } 40 | } 41 | }); 42 | tokio::spawn({ 43 | let mut new_blocks = node.subscribe_new_heads().await.unwrap(); 44 | async move { 45 | while let Some(block) = new_blocks.next().await { 46 | eprintln!("New block: {block:?}"); 47 | } 48 | } 49 | }); 50 | 51 | dbg!(node.get_info().await.unwrap()); 52 | dbg!(farmer.get_info().await.unwrap()); 53 | 54 | farmer.close().await.unwrap(); 55 | node.close().await.unwrap(); 56 | 57 | // Restarting 58 | let node = Node::builder() 59 | .blocks_pruning(node::BlocksPruning::Number(1000)) 60 | .state_pruning(node::PruningMode::ArchiveCanonical) 61 | .build("node", chain_spec::dev_config()) 62 | .await 63 | .expect("Failed to init a node"); 64 | node.sync().await.unwrap(); 65 | 66 | let farmer = Farmer::builder() 67 | .build( 68 | reward_address, 69 | &node, 70 | &[FarmDescription::new("plot", ByteSize::gb(10))], 71 | NonZeroU8::new(1).expect("Static value should not fail; qed"), 72 | ) 73 | .await 74 | .expect("Failed to init a farmer"); 75 | 76 | farmer.close().await.unwrap(); 77 | node.close().await.unwrap(); 78 | 79 | // Delete everything 80 | for plot in plots { 81 | plot.wipe().await.unwrap(); 82 | } 83 | Node::wipe("node").await.unwrap(); 84 | } 85 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Contributor Code of Conduct 2 | 3 | Subspace perceives trust, respect, collaboration and transparency as core values. Our community welcomes participants from around the world with different experiences, unique perspectives, and great ideas to share. 4 | 5 | ## Our Pledge 6 | 7 | In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, sex characteristics, gender identity and expression, level of experience, education, socio-economic status, nationality, personal appearance, race, religion, or sexual identity and orientation. 8 | 9 | ## Our Standards 10 | 11 | Examples of behavior that contributes to creating a positive environment include: 12 | 13 | - Using welcoming and inclusive language 14 | - Being respectful of differing viewpoints and experiences 15 | - Gracefully accepting constructive criticism 16 | - Attempting collaboration before conflict 17 | - Focusing on what is best for the community 18 | - Showing empathy towards other community members 19 | 20 | Examples of unacceptable behavior by participants include: 21 | 22 | - Violence, threats of violence, or inciting others to commit self-harm 23 | - The use of sexualized language or imagery and unwelcome sexual attention or advances 24 | - Trolling, intentionally spreading misinformation, insulting/derogatory comments, and personal or political attacks 25 | - Public or private harassment 26 | - Publishing others' private information, such as a physical or electronic address, without explicit permission 27 | - Abuse of the reporting process to intentionally harass or exclude others 28 | - Advocating for, or encouraging, any of the above behavior 29 | - Other conduct which could reasonably be considered inappropriate in a professional setting 30 | 31 | ## Our Responsibilities 32 | 33 | Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior. 34 | 35 | Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful. 36 | 37 | ## Scope 38 | 39 | This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers. 40 | 41 | ## Attribution 42 | 43 | This Code of Conduct is adapted from the [Contributor Covenant](https://www.contributor-covenant.org/), [version 1.4](https://www.contributor-covenant.org/version/1/4/code-of-conduct.html). 44 | -------------------------------------------------------------------------------- /sdk/utils/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "sdk-utils" 3 | version = "0.1.0" 4 | edition = "2021" 5 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 6 | 7 | [dependencies] 8 | anyhow = "1" 9 | async-trait = "0.1" 10 | base58 = "0.2" 11 | blake2 = "0.10.5" 12 | bytesize = "1" 13 | bytesize-serde = "0.2" 14 | derivative = "2.2.0" 15 | derive_more = "0.99" 16 | frame-support = { version = "4.0.0-dev", git = "https://github.com/subspace/polkadot-sdk", rev = "d6b500960579d73c43fc4ef550b703acfa61c4c8" } 17 | frame-system = { version = "4.0.0-dev", git = "https://github.com/subspace/polkadot-sdk", rev = "d6b500960579d73c43fc4ef550b703acfa61c4c8" } 18 | futures = "0.3" 19 | jsonrpsee-core = "0.16" 20 | libp2p-core = { git = "https://github.com/subspace/rust-libp2p", rev = "d6339da35589d86bae6ecb25a5121c02f2e5b90e" } 21 | parity-scale-codec = "3.6.3" 22 | sc-consensus-subspace-rpc = { git = "https://github.com/subspace/subspace", rev = "a2e0318a75705ab31975ac8b172c999cab378e90" } 23 | sc-network = { version = "0.10.0-dev", git = "https://github.com/subspace/polkadot-sdk", rev = "d6b500960579d73c43fc4ef550b703acfa61c4c8", default-features = false } 24 | sc-rpc = { version = "4.0.0-dev", git = "https://github.com/subspace/polkadot-sdk", rev = "d6b500960579d73c43fc4ef550b703acfa61c4c8", default-features = false } 25 | sc-rpc-api = { version = "0.10.0-dev", git = "https://github.com/subspace/polkadot-sdk", rev = "d6b500960579d73c43fc4ef550b703acfa61c4c8", default-features = false } 26 | sc-service = { version = "0.10.0-dev", git = "https://github.com/subspace/polkadot-sdk", rev = "d6b500960579d73c43fc4ef550b703acfa61c4c8", default-features = false } 27 | serde = { version = "1", features = ["derive"] } 28 | serde_json = "1.0.106" 29 | sp-core = { version = "21.0.0", git = "https://github.com/subspace/polkadot-sdk", rev = "d6b500960579d73c43fc4ef550b703acfa61c4c8" } 30 | sp-core-hashing = { version = "9.0.0", git = "https://github.com/subspace/polkadot-sdk", rev = "d6b500960579d73c43fc4ef550b703acfa61c4c8" } 31 | sp-runtime = { version = "24.0.0", git = "https://github.com/subspace/polkadot-sdk", rev = "d6b500960579d73c43fc4ef550b703acfa61c4c8" } 32 | sp-storage = { version = "13.0.0", git = "https://github.com/subspace/polkadot-sdk", rev = "d6b500960579d73c43fc4ef550b703acfa61c4c8" } 33 | ss58-registry = "1.33" 34 | # Unused for now. TODO: add `serde` feature to `subspace-core-primitives` in `subspace-archiver` 35 | subspace-core-primitives = { git = "https://github.com/subspace/subspace", rev = "a2e0318a75705ab31975ac8b172c999cab378e90" } 36 | subspace-farmer = { git = "https://github.com/subspace/subspace", rev = "a2e0318a75705ab31975ac8b172c999cab378e90", default-features = false } 37 | subspace-rpc-primitives = { git = "https://github.com/subspace/subspace", rev = "a2e0318a75705ab31975ac8b172c999cab378e90" } 38 | subspace-runtime = { git = "https://github.com/subspace/subspace", rev = "a2e0318a75705ab31975ac8b172c999cab378e90" } 39 | subspace-runtime-primitives = { git = "https://github.com/subspace/subspace", rev = "a2e0318a75705ab31975ac8b172c999cab378e90" } 40 | thiserror = "1" 41 | tokio = { version = "1.34.0", features = ["fs", "rt", "tracing", "macros", "parking_lot", "rt-multi-thread", "signal"] } 42 | tracing = "0.1" 43 | -------------------------------------------------------------------------------- /pulsar/src/commands/wipe.rs: -------------------------------------------------------------------------------- 1 | use color_eyre::eyre::{Context, Result}; 2 | use owo_colors::OwoColorize; 3 | use subspace_sdk::{FarmDescription, Node}; 4 | 5 | use crate::config::{delete_config, parse_config}; 6 | use crate::summary::delete_summary; 7 | use crate::utils::{ 8 | farm_directory_getter, get_user_input, node_directory_getter, yes_or_no_parser, 9 | }; 10 | 11 | /// wipe configurator 12 | /// 13 | /// sets the `farmer`, `node`, `summary`, and `config` flags for the `wipe` 14 | /// command 15 | pub(crate) async fn wipe_config(farmer: bool, node: bool) -> Result<()> { 16 | if !farmer && !node { 17 | // if user did not supply any argument, ask for everything 18 | let prompt = "Do you want to wipe farmer (delete farm)? [y/n]: "; 19 | let wipe_farmer = 20 | get_user_input(prompt, None, yes_or_no_parser).context("prompt failed")?; 21 | 22 | let prompt = "Do you want to wipe node? [y/n]: "; 23 | let wipe_node = get_user_input(prompt, None, yes_or_no_parser).context("prompt failed")?; 24 | 25 | let prompt = "Do you want to wipe summary? [y/n]: "; 26 | let wipe_summary = 27 | get_user_input(prompt, None, yes_or_no_parser).context("prompt failed")?; 28 | 29 | let prompt = "Do you want to wipe config? [y/n]: "; 30 | let wipe_config = 31 | get_user_input(prompt, None, yes_or_no_parser).context("prompt failed")?; 32 | 33 | wipe(wipe_farmer, wipe_node, wipe_summary, wipe_config).await?; 34 | } else { 35 | // don't delete summary and config if user supplied flags 36 | wipe(farmer, node, false, false).await?; 37 | } 38 | 39 | Ok(()) 40 | } 41 | 42 | /// implementation of the `wipe` command 43 | /// 44 | /// can wipe farmer, node, summary and farm 45 | async fn wipe( 46 | wipe_farmer: bool, 47 | wipe_node: bool, 48 | wipe_summary: bool, 49 | wipe_config: bool, 50 | ) -> Result<()> { 51 | if wipe_node { 52 | println!("wiping node..."); 53 | let node_directory = node_directory_getter(); 54 | let _ = Node::wipe(node_directory).await; 55 | } 56 | 57 | if wipe_farmer { 58 | println!("wiping farmer..."); 59 | let config = match parse_config() { 60 | Ok(args) => Some(args), 61 | Err(_) => { 62 | println!( 63 | "could not read your config. Wipe will still continue... \n{}", 64 | "However, if you have set a custom location for your plots, you will need to \ 65 | manually delete your plots!" 66 | .underline() 67 | ); 68 | None 69 | } 70 | }; 71 | 72 | // TODO: modify here when supporting multi-farm 73 | // if config can be read, delete the farmer using the path in the config, else, 74 | // delete the default location 75 | if let Some(config) = config { 76 | let _ = FarmDescription::new(config.farmer.farm_directory, config.farmer.farm_size) 77 | .wipe() 78 | .await; 79 | } else { 80 | let _ = tokio::fs::remove_dir_all(farm_directory_getter()).await; 81 | } 82 | } 83 | 84 | if wipe_summary { 85 | match delete_summary() { 86 | Ok(_) => println!("deleted the summary file"), 87 | Err(_) => println!("Skipping wiping summary, could not find the file..."), 88 | } 89 | } 90 | 91 | if wipe_config { 92 | match delete_config() { 93 | Ok(_) => println!("deleted the config file"), 94 | Err(_) => println!("Skipping wiping config, could not find the file..."), 95 | } 96 | } 97 | 98 | println!("Wipe finished!"); 99 | 100 | Ok(()) 101 | } 102 | -------------------------------------------------------------------------------- /sdk/subspace-sdk/examples/sync.rs: -------------------------------------------------------------------------------- 1 | use std::num::NonZeroU8; 2 | use std::path::PathBuf; 3 | 4 | use anyhow::anyhow; 5 | use clap::Parser; 6 | use futures::stream::StreamExt; 7 | use sdk_node::ChainSpec; 8 | use subspace_sdk::node::NetworkBuilder; 9 | use subspace_sdk::{ 10 | chain_spec, ByteSize, FarmDescription, Farmer, MultiaddrWithPeerId, Node, PublicKey, 11 | }; 12 | use tempfile::TempDir; 13 | 14 | #[derive(clap::Parser, Debug)] 15 | enum Args { 16 | Farm { 17 | /// Path to the plot 18 | #[arg(short, long)] 19 | plot: PathBuf, 20 | 21 | /// Size of the plot 22 | #[arg(long)] 23 | plot_size: ByteSize, 24 | 25 | /// Path to the node directory 26 | #[arg(short, long)] 27 | node: PathBuf, 28 | 29 | /// Path to the chain spec 30 | #[arg(short, long)] 31 | spec: PathBuf, 32 | }, 33 | Sync { 34 | /// Bootstrap nodes 35 | #[arg(short, long)] 36 | boot_nodes: Vec, 37 | 38 | /// Path to the chain spec 39 | #[arg(short, long)] 40 | spec: PathBuf, 41 | }, 42 | GenerateSpec { 43 | path: PathBuf, 44 | }, 45 | } 46 | 47 | #[tokio::main] 48 | async fn main() -> anyhow::Result<()> { 49 | tracing_subscriber::fmt().init(); 50 | 51 | let args = Args::parse(); 52 | match args { 53 | Args::GenerateSpec { path } => 54 | tokio::fs::write( 55 | path, 56 | chain_spec::dev_config() 57 | .as_json(false) 58 | .map_err(|e| anyhow!("unable to write json spec, error: {}", e))?, 59 | ) 60 | .await?, 61 | Args::Farm { plot, plot_size, node, spec } => { 62 | let chain_spec = ChainSpec::from_json_file(spec) 63 | .map_err(|e| anyhow!("unable to read json spec, error: {}", e))?; 64 | let (plot_size, _cache_size) = 65 | (ByteSize::b(plot_size.as_u64() * 9 / 10), ByteSize::b(plot_size.as_u64() / 10)); 66 | let plots = [FarmDescription::new(plot.join("plot"), plot_size)]; 67 | 68 | let node = Node::builder() 69 | .network( 70 | NetworkBuilder::new() 71 | .listen_addresses(vec!["/ip4/127.0.0.1/tcp/0".parse().unwrap()]) 72 | .force_synced(true), 73 | ) 74 | .force_authoring(true) 75 | .role(subspace_sdk::node::Role::Authority) 76 | .build(node, chain_spec) 77 | .await?; 78 | 79 | let _farmer: Farmer = Farmer::builder() 80 | .build( 81 | PublicKey::from([13; 32]), 82 | &node, 83 | &plots, 84 | NonZeroU8::new(1).expect("Static value should not fail; qed"), 85 | ) 86 | .await?; 87 | 88 | let addr = node.listen_addresses().await?.into_iter().next().unwrap(); 89 | tracing::info!(%addr, "Node listening at"); 90 | 91 | node.subscribe_new_heads() 92 | .await? 93 | .for_each(|header| async move { tracing::info!(?header, "New block!") }) 94 | .await; 95 | } 96 | Args::Sync { boot_nodes, spec } => { 97 | let node = TempDir::new()?; 98 | let chain_spec = ChainSpec::from_json_file(spec) 99 | .map_err(|e| anyhow!("unable to read json spec, error: {}", e))?; 100 | let node = Node::builder() 101 | .force_authoring(true) 102 | .role(subspace_sdk::node::Role::Authority) 103 | .network(NetworkBuilder::new().boot_nodes(boot_nodes)) 104 | .build(node.as_ref(), chain_spec) 105 | .await?; 106 | 107 | node.sync().await.unwrap(); 108 | tracing::info!("Node was synced!"); 109 | 110 | node.subscribe_new_heads() 111 | .await? 112 | .for_each(|header| async move { tracing::info!(?header, "New block!") }) 113 | .await; 114 | } 115 | } 116 | 117 | Ok(()) 118 | } 119 | -------------------------------------------------------------------------------- /.github/workflows/release-notifier.yml: -------------------------------------------------------------------------------- 1 | # This action is to send a release message to our Discord community. 2 | # - It will only trigger when our release workflow completes successfully or be triggered manually. 3 | # - When triggering this manually you will need to select Branch > Tags > Select the release you want to notify. (Generally the release you just updated from a pre-release.) 4 | name: Send Release Messages 5 | 6 | on: 7 | workflow_run: 8 | workflows: ["release"] 9 | types: 10 | - completed 11 | workflow_dispatch: 12 | inputs: 13 | branch_name: 14 | description: "Please run this workflow from the tag of the release you updated" 15 | required: true 16 | type: boolean 17 | notify_community: 18 | description: "I understand that running this workflow will notify the community of my actions" 19 | required: true 20 | type: boolean 21 | 22 | jobs: 23 | send_message: 24 | runs-on: ubuntu-latest 25 | if: (github.event.workflow_run.conclusion == 'success' && github.event.workflow_run.event != 'workflow_dispatch' ) || github.event_name == 'workflow_dispatch' 26 | steps: 27 | - name: Get release by Tag for manual run 28 | if: ${{ github.event_name == 'workflow_dispatch' }} 29 | id: manual_release_info 30 | uses: cardinalby/git-get-release-action@v1 31 | env: 32 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 33 | with: 34 | commitSha: ${{ github.sha }} 35 | prerelease: false 36 | - name: Get release by Tag for release run 37 | if: ${{ github.event_name == 'workflow_run' }} 38 | id: release_info 39 | uses: cardinalby/git-get-release-action@v1 40 | env: 41 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 42 | with: 43 | commitSha: ${{ github.event.workflow_run.head_sha }} 44 | prerelease: false 45 | latest: true 46 | - name: Send Discord Message 47 | if: ${{ github.event_name == 'workflow_dispatch' || github.event_name == 'workflow_run' && github.event.workflow_run.head_branch == steps.release_info.outputs.tag_name}} 48 | env: 49 | DISCORD_WEBHOOK: ${{ secrets.DISCORD_WEBHOOK }} 50 | uses: Ilshidur/action-discord@master 51 | with: 52 | args: | 53 | ${{ steps.release_info.outputs.tag_name || steps.manual_release_info.outputs.tag_name }} of ${{ github.repository }} has been released! 54 | 55 | *Release Name:** 56 | ``` 57 | ${{ steps.release_info.outputs.name || steps.manual_release_info.outputs.name }} 58 | ``` 59 | 60 | *Release Description:** 61 | ``` 62 | ${{ steps.release_info.outputs.body || steps.manual_release_info.outputs.body }} 63 | ``` 64 | Read more at: ${{ steps.release_info.outputs.html_url || steps.manual_release_info.outputs.html_url }} 65 | 66 | You can also update by visiting our docs at: 67 | https://docs.subspace.network/docs/protocol/cli 68 | - name: Create Issue in subspace-docs repository 69 | if: ${{ github.event_name == 'workflow_dispatch' || github.event_name == 'workflow_run' && github.event.workflow_run.head_branch == steps.release_info.outputs.tag_name}} 70 | uses: dacbd/create-issue-action@main 71 | env: 72 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 73 | with: 74 | token: ${{ secrets.GITHUB_TOKEN }} 75 | owner: 'subspace' 76 | repo: 'subspace-docs' 77 | title: 'Update docs for release ${{ steps.release_info.outputs.tag_name || steps.manual_release_info.outputs.tag_name }}' 78 | # TODO: Setup Github Organization Team for Product Team and add @Product tag to the body, to notify non-subscribed team members of the open issue. 79 | body: | 80 | A new release of ${{ github.repository }} is was released and marked latest. 81 | 82 | Documentation requires update, please update prior version to ${{ steps.release_info.outputs.name || steps.manual_release_info.outputs.name }} 83 | 84 | *Release Description:** 85 | ${{ steps.release_info.outputs.body || steps.manual_release_info.outputs.body }} 86 | 87 | 88 | Read more at: ${{ steps.release_info.outputs.html_url || steps.manual_release_info.outputs.html_url }} 89 | 90 | 91 | -------------------------------------------------------------------------------- /sdk/subspace-sdk/tests/integration/farmer.rs: -------------------------------------------------------------------------------- 1 | use futures::prelude::*; 2 | use subspace_sdk::utils::ByteSize; 3 | 4 | use crate::common::{Farmer, Node}; 5 | 6 | #[tokio::test(flavor = "multi_thread")] 7 | #[ignore = "We need api from single disk plot to calculate precise target sector count"] 8 | async fn track_progress() { 9 | crate::common::setup(); 10 | 11 | let number_of_sectors = 10; 12 | let pieces_in_sector = 50u16; 13 | let sector_size = subspace_farmer_components::sector::sector_size(pieces_in_sector as _); 14 | let space_pledged = sector_size * number_of_sectors; 15 | 16 | let node = Node::dev().build(true).await; 17 | let farmer = Farmer::dev() 18 | .pieces_in_sector(pieces_in_sector) 19 | .build(&node, ByteSize::b(space_pledged as u64)) 20 | .await; 21 | 22 | let progress = farmer 23 | .iter_farms() 24 | .await 25 | .next() 26 | .unwrap() 27 | .subscribe_initial_plotting_progress() 28 | .await 29 | .collect::>() 30 | .await; 31 | assert_eq!(progress.len(), number_of_sectors); 32 | 33 | farmer.close().await; 34 | node.close().await; 35 | } 36 | 37 | #[tokio::test(flavor = "multi_thread")] 38 | async fn new_solution() { 39 | crate::common::setup(); 40 | 41 | let number_of_sectors = 10; 42 | let pieces_in_sector = 50u16; 43 | let sector_size = subspace_farmer_components::sector::sector_size(pieces_in_sector as _); 44 | let space_pledged = sector_size * number_of_sectors; 45 | 46 | let node = Node::dev().build(true).await; 47 | let farmer = Farmer::dev() 48 | .pieces_in_sector(pieces_in_sector) 49 | .build(&node, ByteSize::b(space_pledged as u64)) 50 | .await; 51 | 52 | farmer 53 | .iter_farms() 54 | .await 55 | .next() 56 | .unwrap() 57 | .subscribe_new_solutions() 58 | .await 59 | .next() 60 | .await 61 | .expect("Farmer should send new solutions"); 62 | 63 | farmer.close().await; 64 | node.close().await; 65 | } 66 | 67 | #[tokio::test(flavor = "multi_thread")] 68 | async fn progress_restart() { 69 | crate::common::setup(); 70 | 71 | let number_of_sectors = 10; 72 | let pieces_in_sector = 50u16; 73 | let sector_size = subspace_farmer_components::sector::sector_size(pieces_in_sector as _); 74 | let space_pledged = sector_size * number_of_sectors; 75 | 76 | let node = Node::dev().build(true).await; 77 | let farmer = Farmer::dev() 78 | .pieces_in_sector(pieces_in_sector) 79 | .build(&node, ByteSize::b(space_pledged as u64)) 80 | .await; 81 | 82 | let plot = farmer.iter_farms().await.next().unwrap(); 83 | 84 | plot.subscribe_initial_plotting_progress().await.for_each(|_| async {}).await; 85 | 86 | tokio::time::timeout( 87 | std::time::Duration::from_secs(5), 88 | plot.subscribe_initial_plotting_progress().await.for_each(|_| async {}), 89 | ) 90 | .await 91 | .unwrap(); 92 | 93 | farmer.close().await; 94 | node.close().await; 95 | } 96 | 97 | #[tokio::test(flavor = "multi_thread")] 98 | #[ignore = "Stack overflows for now"] 99 | async fn farmer_restart() { 100 | crate::common::setup(); 101 | 102 | let number_of_sectors = 10; 103 | let pieces_in_sector = 50u16; 104 | let sector_size = subspace_farmer_components::sector::sector_size(pieces_in_sector as _); 105 | let space_pledged = sector_size * number_of_sectors; 106 | 107 | let node = Node::dev().build(true).await; 108 | 109 | for _ in 0..10 { 110 | Farmer::dev() 111 | .pieces_in_sector(pieces_in_sector) 112 | .build(&node, ByteSize::b(space_pledged as u64)) 113 | .await 114 | .close() 115 | .await; 116 | } 117 | 118 | node.close().await; 119 | } 120 | 121 | #[tokio::test(flavor = "multi_thread")] 122 | async fn farmer_close() { 123 | crate::common::setup(); 124 | 125 | let number_of_sectors = 10; 126 | let pieces_in_sector = 50u16; 127 | let sector_size = subspace_farmer_components::sector::sector_size(pieces_in_sector as _); 128 | let space_pledged = sector_size * number_of_sectors; 129 | 130 | let node = Node::dev().build(true).await; 131 | let farmer = Farmer::dev() 132 | .pieces_in_sector(pieces_in_sector) 133 | .build(&node, ByteSize::b(space_pledged as u64)) 134 | .await; 135 | 136 | farmer.close().await; 137 | node.close().await; 138 | } 139 | -------------------------------------------------------------------------------- /sdk/node/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "sdk-node" 3 | version = "0.1.0" 4 | edition = "2021" 5 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 6 | 7 | [dependencies] 8 | anyhow = "1" 9 | backoff = "0.4" 10 | cross-domain-message-gossip = { git = "https://github.com/subspace/subspace", rev = "a2e0318a75705ab31975ac8b172c999cab378e90" } 11 | derivative = "2.2.0" 12 | derive_builder = "0.12" 13 | derive_more = "0.99" 14 | domain-client-message-relayer = { git = "https://github.com/subspace/subspace", rev = "a2e0318a75705ab31975ac8b172c999cab378e90" } 15 | domain-client-operator = { git = "https://github.com/subspace/subspace", rev = "a2e0318a75705ab31975ac8b172c999cab378e90" } 16 | domain-eth-service = { git = "https://github.com/subspace/subspace", rev = "a2e0318a75705ab31975ac8b172c999cab378e90" } 17 | domain-runtime-primitives = { git = "https://github.com/subspace/subspace", rev = "a2e0318a75705ab31975ac8b172c999cab378e90" } 18 | domain-service = { git = "https://github.com/subspace/subspace", rev = "a2e0318a75705ab31975ac8b172c999cab378e90" } 19 | evm-domain-runtime = { git = "https://github.com/subspace/subspace", rev = "a2e0318a75705ab31975ac8b172c999cab378e90" } 20 | fp-evm = { version = "3.0.0-dev", git = "https://github.com/subspace/frontier", rev = "7627e61d80275a4cf24d06f27491f6c31eadb7b7" } 21 | frame-system = { version = "4.0.0-dev", git = "https://github.com/subspace/polkadot-sdk", rev = "d6b500960579d73c43fc4ef550b703acfa61c4c8" } 22 | futures = "0.3" 23 | hex-literal = "0.4" 24 | pallet-rewards = { git = "https://github.com/subspace/subspace", rev = "a2e0318a75705ab31975ac8b172c999cab378e90" } 25 | pallet-subspace = { git = "https://github.com/subspace/subspace", rev = "a2e0318a75705ab31975ac8b172c999cab378e90" } 26 | parity-scale-codec = "3.6.3" 27 | parking_lot = "0.12" 28 | pin-project = "1" 29 | sc-client-api = { version = "4.0.0-dev", git = "https://github.com/subspace/polkadot-sdk", rev = "d6b500960579d73c43fc4ef550b703acfa61c4c8" } 30 | sc-consensus-slots = { version = "0.10.0-dev", git = "https://github.com/subspace/polkadot-sdk", rev = "d6b500960579d73c43fc4ef550b703acfa61c4c8" } 31 | sc-consensus-subspace = { git = "https://github.com/subspace/subspace", rev = "a2e0318a75705ab31975ac8b172c999cab378e90" } 32 | sc-network = { version = "0.10.0-dev", git = "https://github.com/subspace/polkadot-sdk", rev = "d6b500960579d73c43fc4ef550b703acfa61c4c8" } 33 | sc-network-sync = { version = "0.10.0-dev", git = "https://github.com/subspace/polkadot-sdk", rev = "d6b500960579d73c43fc4ef550b703acfa61c4c8" } 34 | sc-rpc-api = { version = "0.10.0-dev", git = "https://github.com/subspace/polkadot-sdk", rev = "d6b500960579d73c43fc4ef550b703acfa61c4c8" } 35 | sc-service = { version = "0.10.0-dev", git = "https://github.com/subspace/polkadot-sdk", rev = "d6b500960579d73c43fc4ef550b703acfa61c4c8", default-features = false } 36 | sc-storage-monitor = { version = "0.1.0", git = "https://github.com/subspace/polkadot-sdk", rev = "d6b500960579d73c43fc4ef550b703acfa61c4c8", default-features = false } 37 | sc-subspace-chain-specs = { git = "https://github.com/subspace/subspace", rev = "a2e0318a75705ab31975ac8b172c999cab378e90" } 38 | sc-telemetry = { version = "4.0.0-dev", git = "https://github.com/subspace/polkadot-sdk", rev = "d6b500960579d73c43fc4ef550b703acfa61c4c8" } 39 | sc-transaction-pool-api = { version = "4.0.0-dev", git = "https://github.com/subspace/polkadot-sdk", rev = "d6b500960579d73c43fc4ef550b703acfa61c4c8" } 40 | sc-utils = { version = "4.0.0-dev", git = "https://github.com/subspace/polkadot-sdk", rev = "d6b500960579d73c43fc4ef550b703acfa61c4c8" } 41 | sdk-dsn = { path = "../dsn" } 42 | sdk-substrate = { path = "../substrate" } 43 | sdk-traits = { path = "../traits" } 44 | sdk-utils = { path = "../utils" } 45 | serde = { version = "1", features = ["derive"] } 46 | serde_json = "1" 47 | sp-blockchain = { version = "4.0.0-dev", git = "https://github.com/subspace/polkadot-sdk", rev = "d6b500960579d73c43fc4ef550b703acfa61c4c8" } 48 | sp-consensus = { version = "0.10.0-dev", git = "https://github.com/subspace/polkadot-sdk", rev = "d6b500960579d73c43fc4ef550b703acfa61c4c8" } 49 | sp-consensus-subspace = { git = "https://github.com/subspace/subspace", rev = "a2e0318a75705ab31975ac8b172c999cab378e90" } 50 | sp-core = { version = "21.0.0", git = "https://github.com/subspace/polkadot-sdk", rev = "d6b500960579d73c43fc4ef550b703acfa61c4c8" } 51 | sp-domains = { git = "https://github.com/subspace/subspace", rev = "a2e0318a75705ab31975ac8b172c999cab378e90" } 52 | sp-messenger = { git = "https://github.com/subspace/subspace", rev = "a2e0318a75705ab31975ac8b172c999cab378e90" } 53 | sp-runtime = { version = "24.0.0", git = "https://github.com/subspace/polkadot-sdk", rev = "d6b500960579d73c43fc4ef550b703acfa61c4c8" } 54 | sp-version = { version = "22.0.0", git = "https://github.com/subspace/polkadot-sdk", rev = "d6b500960579d73c43fc4ef550b703acfa61c4c8" } 55 | subspace-core-primitives = { git = "https://github.com/subspace/subspace", rev = "a2e0318a75705ab31975ac8b172c999cab378e90" } 56 | subspace-farmer = { git = "https://github.com/subspace/subspace", rev = "a2e0318a75705ab31975ac8b172c999cab378e90", default-features = false } 57 | subspace-farmer-components = { git = "https://github.com/subspace/subspace", rev = "a2e0318a75705ab31975ac8b172c999cab378e90" } 58 | subspace-networking = { git = "https://github.com/subspace/subspace", rev = "a2e0318a75705ab31975ac8b172c999cab378e90" } 59 | subspace-rpc-primitives = { git = "https://github.com/subspace/subspace", rev = "a2e0318a75705ab31975ac8b172c999cab378e90" } 60 | subspace-runtime = { git = "https://github.com/subspace/subspace", rev = "a2e0318a75705ab31975ac8b172c999cab378e90" } 61 | subspace-runtime-primitives = { git = "https://github.com/subspace/subspace", rev = "a2e0318a75705ab31975ac8b172c999cab378e90" } 62 | subspace-service = { git = "https://github.com/subspace/subspace", rev = "a2e0318a75705ab31975ac8b172c999cab378e90" } 63 | tokio = { version = "1.34.0", features = ["fs", "rt", "tracing", "macros", "parking_lot", "rt-multi-thread", "signal"] } 64 | tokio-stream = { version = "0.1", features = ["sync", "time"] } 65 | tracing = "0.1" 66 | -------------------------------------------------------------------------------- /pulsar/src/tests.rs: -------------------------------------------------------------------------------- 1 | use std::str::FromStr; 2 | 3 | use rand::rngs::SmallRng; 4 | use rand::{Rng, SeedableRng}; 5 | use subspace_sdk::ByteSize; 6 | 7 | use crate::config::ChainConfig; 8 | use crate::summary::*; 9 | use crate::utils::{ 10 | apply_extra_options, custom_log_dir, directory_parser, farm_directory_getter, 11 | node_directory_getter, node_name_parser, reward_address_parser, size_parser, yes_or_no_parser, 12 | }; 13 | 14 | async fn update_summary_file_randomly(summary_file: SummaryFile) { 15 | let mut rng = SmallRng::from_entropy(); 16 | 17 | for _ in 0..10 { 18 | let update_fields = SummaryUpdateFields { 19 | is_plotting_finished: false, 20 | new_authored_count: rng.gen_range(1..10), 21 | new_parsed_blocks: rng.gen_range(1..100), 22 | }; 23 | let result = summary_file.update(update_fields).await; 24 | assert!(result.is_ok(), "Failed to update summary file"); 25 | } 26 | } 27 | 28 | #[tokio::test(flavor = "multi_thread")] 29 | async fn summary_file_integration() { 30 | // this test is mainly for CI, in which, summary file won't exist 31 | // if there is a summary file (user env), we don't want to modify the existing 32 | // summary file for test 33 | if SummaryFile::new(None).await.is_ok() { 34 | return; 35 | } 36 | 37 | // create summary file 38 | let farm_size = ByteSize::gb(1); 39 | let summary_file = 40 | SummaryFile::new(Some(farm_size)).await.expect("Failed to create summary file"); 41 | 42 | // sequential update trial 43 | let update_fields = SummaryUpdateFields { 44 | is_plotting_finished: true, 45 | new_authored_count: 11, 46 | new_parsed_blocks: 101, 47 | }; 48 | summary_file.update(update_fields).await.expect("Failed to update summary file"); 49 | 50 | // create two concurrent tasks, they try to write to summary file 5 times each 51 | let task1 = tokio::spawn(update_summary_file_randomly(summary_file.clone())); 52 | let task2 = tokio::spawn(update_summary_file_randomly(summary_file.clone())); 53 | 54 | // Wait for both tasks to complete concurrently 55 | let (result1, result2) = tokio::join!(task1, task2); 56 | 57 | assert!(result1.is_ok(), "Task 1 encountered an error: {:?}", result1.unwrap_err()); 58 | assert!(result2.is_ok(), "Task 2 encountered an error: {:?}", result2.unwrap_err()); 59 | 60 | // parse the summary after updates 61 | summary_file.parse().await.expect("Failed to parse the summary file after updates"); 62 | 63 | // Clean up the summary file 64 | delete_summary().expect("summary deletion failed"); 65 | } 66 | 67 | #[test] 68 | fn extra_options() { 69 | let cargo_toml = toml::toml! { 70 | name = "toml" 71 | 72 | [package] 73 | version = "0.4.5" 74 | authors = ["Alex Crichton "] 75 | }; 76 | let extra = toml::toml! { 77 | name = "toml-edit" 78 | option = true 79 | 80 | [package] 81 | version = "0.4.6" 82 | badges = ["travis-ci"] 83 | }; 84 | let result = toml::toml! { 85 | name = "toml-edit" 86 | option = true 87 | 88 | [package] 89 | authors = ["Alex Crichton "] 90 | version = "0.4.6" 91 | badges = ["travis-ci"] 92 | }; 93 | 94 | assert_eq!(apply_extra_options(&cargo_toml, extra).unwrap(), result); 95 | } 96 | 97 | #[test] 98 | fn yes_no_checker() { 99 | assert!(yes_or_no_parser("yas").is_err()); 100 | assert!(yes_or_no_parser("yess").is_err()); 101 | assert!(yes_or_no_parser("y").is_ok()); 102 | } 103 | 104 | #[test] 105 | fn directory_checker() { 106 | assert!(directory_parser("./").is_ok()); 107 | } 108 | 109 | #[test] 110 | fn node_name_checker() { 111 | assert!(node_name_parser(" ").is_err()); 112 | assert!(node_name_parser("root ").is_err()); 113 | assert!(node_name_parser("ゴゴゴゴ yare yare daze").is_ok()); 114 | } 115 | 116 | #[test] 117 | fn reward_address_checker() { 118 | // below address is randomly generated via metamask and then deleted 119 | assert!(reward_address_parser("5FWr7j9DW4uy7K1JLmFN2R3eoae35PFDUfW7G42ARpBEUaN7").is_ok()); 120 | assert!(reward_address_parser("sdjhfskjfhdksjhfsfhskjskdjhfdsfjhk").is_err()); 121 | } 122 | 123 | #[test] 124 | fn size_checker() { 125 | assert!(size_parser("2GB").is_ok()); 126 | assert!(size_parser("12GB").is_ok()); 127 | assert!(size_parser("103gjie").is_err()); 128 | assert!(size_parser("1.2GB").is_err()); 129 | } 130 | 131 | #[test] 132 | fn chain_checker() { 133 | assert!(ChainConfig::from_str("gemini3h").is_ok()); 134 | assert!(ChainConfig::from_str("devv").is_err()); 135 | } 136 | 137 | #[test] 138 | fn farm_directory_tester() { 139 | let farm_path = farm_directory_getter(); 140 | 141 | #[cfg(target_os = "macos")] 142 | assert!(farm_path.ends_with("Library/Application Support/pulsar/farms")); 143 | 144 | #[cfg(target_os = "linux")] 145 | assert!(farm_path.ends_with(".local/share/pulsar/farms")); 146 | 147 | #[cfg(target_os = "windows")] 148 | assert!(farm_path.ends_with("AppData/Roaming/pulsar/farms")); 149 | } 150 | 151 | #[test] 152 | fn node_directory_tester() { 153 | let node_path = node_directory_getter(); 154 | 155 | #[cfg(target_os = "macos")] 156 | assert!(node_path.ends_with("Library/Application Support/pulsar/node")); 157 | 158 | #[cfg(target_os = "linux")] 159 | assert!(node_path.ends_with(".local/share/pulsar/node")); 160 | 161 | #[cfg(target_os = "windows")] 162 | assert!(node_path.ends_with("AppData/Roaming/pulsar/node")); 163 | } 164 | 165 | #[test] 166 | fn custom_log_dir_test() { 167 | let log_path = custom_log_dir(); 168 | 169 | #[cfg(target_os = "macos")] 170 | assert!(log_path.ends_with("Library/Logs/pulsar")); 171 | 172 | #[cfg(target_os = "linux")] 173 | assert!(log_path.ends_with(".local/share/pulsar/logs")); 174 | 175 | #[cfg(target_os = "windows")] 176 | assert!(log_path.ends_with("AppData/Local/pulsar/logs")); 177 | } 178 | -------------------------------------------------------------------------------- /sdk/node/src/builder.rs: -------------------------------------------------------------------------------- 1 | use std::collections::HashSet; 2 | use std::num::NonZeroUsize; 3 | use std::path::Path; 4 | 5 | use derivative::Derivative; 6 | use derive_builder::Builder; 7 | use derive_more::{Deref, DerefMut, Display, From}; 8 | use sdk_dsn::{Dsn, DsnBuilder}; 9 | use sdk_substrate::{ 10 | Base, BaseBuilder, BlocksPruning, NetworkBuilder, PruningMode, Role, RpcBuilder, StorageMonitor, 11 | }; 12 | use sdk_utils::ByteSize; 13 | use serde::{Deserialize, Serialize}; 14 | 15 | use super::{ChainSpec, Farmer, Node}; 16 | use crate::domains::builder::DomainConfig; 17 | 18 | /// Wrapper with default value for piece cache size 19 | #[derive( 20 | Debug, Clone, Derivative, Deserialize, Serialize, PartialEq, Eq, From, Deref, DerefMut, Display, 21 | )] 22 | #[derivative(Default)] 23 | #[serde(transparent)] 24 | /// Size of cache of pieces that node produces 25 | /// TODO: Set it to 1 GB once DSN is fixed 26 | pub struct PieceCacheSize(#[derivative(Default(value = "ByteSize::gib(3)"))] pub(crate) ByteSize); 27 | 28 | /// Wrapper with default value for segment publish concurrent jobs 29 | #[derive( 30 | Debug, Clone, Derivative, Deserialize, Serialize, PartialEq, Eq, From, Deref, DerefMut, Display, 31 | )] 32 | #[derivative(Default)] 33 | #[serde(transparent)] 34 | pub struct SegmentPublishConcurrency( 35 | #[derivative(Default(value = "NonZeroUsize::new(10).expect(\"10 > 0\")"))] 36 | pub(crate) NonZeroUsize, 37 | ); 38 | 39 | /// Node builder 40 | #[derive(Debug, Clone, Derivative, Builder, Deserialize, Serialize, PartialEq)] 41 | #[derivative(Default(bound = ""))] 42 | #[builder(pattern = "owned", build_fn(private, name = "_build"), name = "Builder")] 43 | #[non_exhaustive] 44 | pub struct Config { 45 | /// Max number of segments that can be published concurrently, impacts 46 | /// RAM usage and network bandwidth. 47 | #[builder(setter(into), default)] 48 | #[serde(default, skip_serializing_if = "sdk_utils::is_default")] 49 | pub segment_publish_concurrency: SegmentPublishConcurrency, 50 | /// Should we sync blocks from the DSN? 51 | #[builder(default)] 52 | #[serde(default, skip_serializing_if = "sdk_utils::is_default")] 53 | pub sync_from_dsn: bool, 54 | #[doc(hidden)] 55 | #[builder( 56 | setter(into, strip_option), 57 | field(type = "BaseBuilder", build = "self.base.build()") 58 | )] 59 | #[serde(flatten, skip_serializing_if = "sdk_utils::is_default")] 60 | pub base: Base, 61 | /// DSN settings 62 | #[builder(setter(into), default)] 63 | #[serde(default, skip_serializing_if = "sdk_utils::is_default")] 64 | pub dsn: Dsn, 65 | /// Storage monitor settings 66 | #[builder(setter(into), default)] 67 | #[serde(default, skip_serializing_if = "sdk_utils::is_default")] 68 | pub storage_monitor: Option, 69 | /// Enables subspace block relayer 70 | #[builder(default)] 71 | #[serde(default, skip_serializing_if = "sdk_utils::is_default")] 72 | pub enable_subspace_block_relay: bool, 73 | #[builder(setter(skip), default)] 74 | #[serde(skip, default)] 75 | _farmer: std::marker::PhantomData, 76 | /// Optional domain configuration 77 | #[builder(setter(into), default)] 78 | #[serde(default, skip_serializing_if = "sdk_utils::is_default")] 79 | pub domain: Option, 80 | /// Flag indicating if the node is authority for Proof of time consensus 81 | #[builder(setter(into), default)] 82 | #[serde(default, skip_serializing_if = "sdk_utils::is_default")] 83 | pub is_timekeeper: bool, 84 | /// CPU cores that timekeeper can use 85 | #[builder(setter(into), default)] 86 | #[serde(default, skip_serializing_if = "sdk_utils::is_default")] 87 | pub timekeeper_cpu_cores: HashSet, 88 | /// Proof of time entropy 89 | #[builder(setter(into), default)] 90 | #[serde(default, skip_serializing_if = "sdk_utils::is_default")] 91 | pub pot_external_entropy: Option, 92 | } 93 | 94 | impl Config { 95 | /// Dev configuraiton 96 | pub fn dev() -> Builder { 97 | Builder::dev() 98 | } 99 | 100 | /// Gemini 3g configuraiton 101 | pub fn gemini_3h() -> Builder { 102 | Builder::gemini_3h() 103 | } 104 | 105 | /// Devnet configuraiton 106 | pub fn devnet() -> Builder { 107 | Builder::devnet() 108 | } 109 | } 110 | 111 | impl Builder { 112 | /// Dev chain configuration 113 | pub fn dev() -> Self { 114 | Self::new() 115 | .is_timekeeper(true) 116 | .force_authoring(true) 117 | .network(NetworkBuilder::dev()) 118 | .dsn(DsnBuilder::dev()) 119 | .rpc(RpcBuilder::dev()) 120 | } 121 | 122 | /// Gemini 3g configuration 123 | pub fn gemini_3h() -> Self { 124 | Self::new() 125 | .network(NetworkBuilder::gemini_3h()) 126 | .dsn(DsnBuilder::gemini_3h()) 127 | .rpc(RpcBuilder::gemini_3h()) 128 | .role(Role::Authority) 129 | .state_pruning(PruningMode::ArchiveCanonical) 130 | .blocks_pruning(BlocksPruning::Number(256)) 131 | } 132 | 133 | /// Devnet chain configuration 134 | pub fn devnet() -> Self { 135 | Self::new() 136 | .network(NetworkBuilder::devnet()) 137 | .dsn(DsnBuilder::devnet()) 138 | .rpc(RpcBuilder::devnet()) 139 | .role(Role::Authority) 140 | .state_pruning(PruningMode::ArchiveCanonical) 141 | .blocks_pruning(BlocksPruning::Number(256)) 142 | } 143 | 144 | /// Get configuration for saving on disk 145 | pub fn configuration(self) -> Config { 146 | self._build().expect("Build is infallible") 147 | } 148 | 149 | /// New builder 150 | pub fn new() -> Self { 151 | Self::default() 152 | } 153 | 154 | /// Start a node with supplied parameters 155 | pub async fn build( 156 | self, 157 | directory: impl AsRef, 158 | chain_spec: ChainSpec, 159 | ) -> anyhow::Result> { 160 | self.configuration().build(directory, chain_spec).await 161 | } 162 | } 163 | 164 | sdk_substrate::derive_base!( @ Base => Builder); 165 | -------------------------------------------------------------------------------- /sdk/subspace-sdk/tests/integration/common.rs: -------------------------------------------------------------------------------- 1 | use std::num::NonZeroU8; 2 | use std::path::PathBuf; 3 | use std::sync::Arc; 4 | 5 | use derive_builder::Builder; 6 | use derive_more::{Deref, DerefMut}; 7 | use subspace_sdk::farmer::FarmDescription; 8 | use subspace_sdk::node::{ 9 | chain_spec, ChainSpec, DomainConfigBuilder, DsnBuilder, NetworkBuilder, Role, 10 | }; 11 | use subspace_sdk::utils::ByteSize; 12 | use subspace_sdk::MultiaddrWithPeerId; 13 | use tempfile::TempDir; 14 | use tracing_subscriber::layer::SubscriberExt; 15 | use tracing_subscriber::util::SubscriberInitExt; 16 | use tracing_subscriber::Layer; 17 | 18 | pub fn setup() { 19 | #[cfg(tokio_unstable)] 20 | let registry = tracing_subscriber::registry().with(console_subscriber::spawn()); 21 | #[cfg(not(tokio_unstable))] 22 | let registry = tracing_subscriber::registry(); 23 | 24 | let _ = registry 25 | .with( 26 | tracing_subscriber::fmt::layer().with_test_writer().with_filter( 27 | "debug,parity-db=info,cranelift_codegen=info,wasmtime_cranelift=info,\ 28 | subspace_sdk=trace,subspace_farmer=trace,subspace_service=trace,\ 29 | subspace_farmer::utils::parity_db_store=debug,trie-cache=info,\ 30 | wasm_overrides=info,jsonrpsee_core=info,libp2p_gossipsub::behaviour=info,\ 31 | libp2p_core=info,libp2p_tcp=info,multistream_select=info,yamux=info,\ 32 | libp2p_swarm=info,libp2p_ping=info,subspace_networking::node_runner=info,\ 33 | subspace_networking::utils::piece_announcement=info,\ 34 | subspace_farmer::utils::farmer_provider_record_processor=debug,\ 35 | subspace_farmer::utils::farmer_piece_cache=debug,wasmtime_jit=info,\ 36 | wasm-runtime=info" 37 | .parse::() 38 | .expect("Env filter directives are correct"), 39 | ), 40 | ) 41 | .try_init(); 42 | } 43 | 44 | #[derive(Builder)] 45 | #[builder(pattern = "immutable", build_fn(private, name = "_build"), name = "NodeBuilder")] 46 | pub struct InnerNode { 47 | #[builder(default)] 48 | not_force_synced: bool, 49 | #[builder(default)] 50 | boot_nodes: Vec, 51 | #[builder(default)] 52 | dsn_boot_nodes: Vec, 53 | #[builder(default)] 54 | not_authority: bool, 55 | #[builder(default = "chain_spec::dev_config()")] 56 | chain: ChainSpec, 57 | #[builder(default = "TempDir::new().map(Arc::new).unwrap()")] 58 | path: Arc, 59 | #[cfg(feature = "core-payments")] 60 | #[builder(default)] 61 | enable_core: bool, 62 | } 63 | 64 | #[derive(Deref, DerefMut)] 65 | pub struct Node { 66 | #[deref] 67 | #[deref_mut] 68 | node: subspace_sdk::Node, 69 | pub path: Arc, 70 | pub chain: ChainSpec, 71 | } 72 | 73 | impl NodeBuilder { 74 | pub async fn build(self, enable_domains: bool) -> Node { 75 | let InnerNode { 76 | not_force_synced, 77 | boot_nodes, 78 | dsn_boot_nodes, 79 | not_authority, 80 | chain, 81 | path, 82 | #[cfg(feature = "core-payments")] 83 | enable_core, 84 | } = self._build().expect("Infallible"); 85 | let node = subspace_sdk::Node::dev() 86 | .dsn( 87 | DsnBuilder::dev() 88 | .listen_addresses(vec!["/ip4/127.0.0.1/tcp/0".parse().unwrap()]) 89 | .boot_nodes(dsn_boot_nodes), 90 | ) 91 | .network( 92 | NetworkBuilder::dev() 93 | .force_synced(!not_force_synced) 94 | .listen_addresses(vec!["/ip4/127.0.0.1/tcp/0".parse().unwrap()]) 95 | .boot_nodes(boot_nodes), 96 | ) 97 | .role(if not_authority { Role::Full } else { Role::Authority }) 98 | .is_timekeeper(!not_authority); 99 | 100 | let node = if enable_domains { 101 | node.domain(Some(DomainConfigBuilder::dev().configuration())) 102 | } else { 103 | node 104 | }; 105 | 106 | #[cfg(all(feature = "core-payments", feature = "executor"))] 107 | let node = if enable_core { 108 | node.system_domain(subspace_sdk::node::domains::ConfigBuilder::new().core_payments( 109 | subspace_sdk::node::domains::core_payments::ConfigBuilder::new().build(), 110 | )) 111 | } else { 112 | node 113 | }; 114 | 115 | let node = node.build(path.path().join("node"), chain.clone()).await.unwrap(); 116 | 117 | Node { node, path, chain } 118 | } 119 | } 120 | 121 | impl Node { 122 | pub fn dev() -> NodeBuilder { 123 | NodeBuilder::default() 124 | } 125 | 126 | pub fn path(&self) -> Arc { 127 | Arc::clone(&self.path) 128 | } 129 | 130 | pub async fn close(self) { 131 | self.node.close().await.unwrap(); 132 | } 133 | } 134 | 135 | #[derive(Builder)] 136 | #[builder(pattern = "immutable", build_fn(private, name = "_build"), name = "FarmerBuilder")] 137 | pub struct InnerFarmer { 138 | #[builder(default)] 139 | reward_address: subspace_sdk::PublicKey, 140 | #[builder(default = "50")] 141 | pieces_in_sector: u16, 142 | } 143 | 144 | #[derive(Deref, DerefMut)] 145 | pub struct Farmer { 146 | #[deref] 147 | #[deref_mut] 148 | farmer: subspace_sdk::Farmer, 149 | pub path: Arc, 150 | } 151 | 152 | impl FarmerBuilder { 153 | pub async fn build(self, node: &Node, space_pledged: ByteSize) -> Farmer { 154 | let InnerFarmer { reward_address, pieces_in_sector } = self._build().expect("Infallible"); 155 | let farmer = subspace_sdk::Farmer::builder() 156 | .max_pieces_in_sector(Some(pieces_in_sector)) 157 | .build( 158 | reward_address, 159 | &**node, 160 | &[FarmDescription::new( 161 | node.path().path().join("plot"), 162 | // TODO: account for overhead here 163 | space_pledged, 164 | )], 165 | NonZeroU8::new(20).expect("Static value should not fail; qed"), 166 | ) 167 | .await 168 | .unwrap(); 169 | Farmer { farmer, path: node.path() } 170 | } 171 | } 172 | 173 | impl Farmer { 174 | pub fn dev() -> FarmerBuilder { 175 | FarmerBuilder::default() 176 | } 177 | 178 | pub fn plot_dir(&self) -> PathBuf { 179 | self.path.path().join("plot") 180 | } 181 | 182 | pub async fn close(self) { 183 | self.farmer.close().await.unwrap() 184 | } 185 | } 186 | -------------------------------------------------------------------------------- /sdk/subspace-sdk/examples/mini-farmer.rs: -------------------------------------------------------------------------------- 1 | use std::num::NonZeroU8; 2 | use std::path::PathBuf; 3 | 4 | use anyhow::Context; 5 | use clap::{Parser, ValueEnum}; 6 | use futures::prelude::*; 7 | use subspace_sdk::node::{self, Event, Node, RewardsEvent, SubspaceEvent}; 8 | use subspace_sdk::{ByteSize, FarmDescription, Farmer, PublicKey}; 9 | use tracing_subscriber::prelude::*; 10 | 11 | #[global_allocator] 12 | static GLOBAL: mimalloc::MiMalloc = mimalloc::MiMalloc; 13 | 14 | #[derive(ValueEnum, Debug, Clone)] 15 | enum Chain { 16 | Gemini3h, 17 | Devnet, 18 | Dev, 19 | } 20 | 21 | /// Mini farmer 22 | #[derive(Parser, Debug)] 23 | #[command(author, version, about)] 24 | pub struct Args { 25 | /// Set the chain 26 | #[arg(value_enum)] 27 | chain: Chain, 28 | #[cfg(feature = "executor")] 29 | /// Run executor with specified domain 30 | #[arg(short, long)] 31 | executor: bool, 32 | /// Address for farming rewards 33 | #[arg(short, long)] 34 | reward_address: PublicKey, 35 | /// Path for all data 36 | #[arg(short, long)] 37 | base_path: Option, 38 | /// Size of the plot 39 | #[arg(short, long)] 40 | plot_size: ByteSize, 41 | /// Cache size 42 | #[arg(short, long, default_value_t = ByteSize::gib(1))] 43 | cache_size: ByteSize, 44 | } 45 | 46 | #[tokio::main] 47 | async fn main() -> anyhow::Result<()> { 48 | fdlimit::raise_fd_limit(); 49 | 50 | #[cfg(tokio_unstable)] 51 | let registry = tracing_subscriber::registry().with(console_subscriber::spawn()); 52 | #[cfg(not(tokio_unstable))] 53 | let registry = tracing_subscriber::registry(); 54 | 55 | registry 56 | .with(tracing_subscriber::fmt::layer()) 57 | .with( 58 | tracing_subscriber::EnvFilter::from_default_env() 59 | .add_directive("info".parse().unwrap()), 60 | ) 61 | .init(); 62 | 63 | let Args { 64 | chain, 65 | #[cfg(feature = "executor")] 66 | executor, 67 | reward_address, 68 | base_path, 69 | plot_size, 70 | cache_size: _, 71 | } = Args::parse(); 72 | let (base_path, _tmp_dir) = base_path.map(|x| (x, None)).unwrap_or_else(|| { 73 | let tmp = tempfile::tempdir().expect("Failed to create temporary directory"); 74 | (tmp.as_ref().to_owned(), Some(tmp)) 75 | }); 76 | 77 | let node_dir = base_path.join("node"); 78 | let node = match chain { 79 | Chain::Gemini3h => Node::gemini_3h().dsn(subspace_sdk::node::DsnBuilder::gemini_3h()), 80 | Chain::Devnet => Node::devnet().dsn(subspace_sdk::node::DsnBuilder::devnet()), 81 | Chain::Dev => Node::dev().dsn(subspace_sdk::node::DsnBuilder::dev()), 82 | } 83 | .role(node::Role::Authority); 84 | 85 | #[cfg(feature = "executor")] 86 | let node = if executor { 87 | node.system_domain( 88 | node::domains::ConfigBuilder::new() 89 | .rpc(subspace_sdk::node::RpcBuilder::new().addr("127.0.0.1:9990".parse().unwrap())) 90 | .role(node::Role::Authority), 91 | ) 92 | } else { 93 | node 94 | }; 95 | 96 | let node = node 97 | .build( 98 | &node_dir, 99 | match chain { 100 | Chain::Gemini3h => node::chain_spec::gemini_3h(), 101 | Chain::Devnet => node::chain_spec::devnet_config(), 102 | Chain::Dev => node::chain_spec::dev_config(), 103 | }, 104 | ) 105 | .await?; 106 | 107 | let sync = if !matches!(chain, Chain::Dev) { 108 | futures::future::Either::Left(node.sync()) 109 | } else { 110 | futures::future::Either::Right(futures::future::ok(())) 111 | }; 112 | 113 | tokio::select! { 114 | result = sync => result?, 115 | _ = tokio::signal::ctrl_c() => { 116 | tracing::error!("Exitting..."); 117 | return node.close().await.context("Failed to close node") 118 | } 119 | } 120 | tracing::error!("Node was synced!"); 121 | 122 | let farmer = Farmer::builder() 123 | .build( 124 | reward_address, 125 | &node, 126 | &[FarmDescription::new(base_path.join("plot"), plot_size)], 127 | NonZeroU8::new(1).expect("static value should not fail; qed"), 128 | ) 129 | .await?; 130 | 131 | tokio::spawn({ 132 | let initial_plotting = 133 | farmer.iter_farms().await.next().unwrap().subscribe_initial_plotting_progress().await; 134 | async move { 135 | initial_plotting 136 | .for_each(|progress| async move { 137 | tracing::error!(?progress, "Plotting!"); 138 | }) 139 | .await; 140 | tracing::error!("Finished initial plotting!"); 141 | } 142 | }); 143 | 144 | let rewards_sub = { 145 | let node = &node; 146 | 147 | async move { 148 | let mut new_blocks = node.subscribe_finalized_heads().await?; 149 | while let Some(header) = new_blocks.next().await { 150 | let events = node.get_events(Some(header.hash)).await?; 151 | 152 | for event in events { 153 | match event { 154 | Event::Rewards( 155 | RewardsEvent::VoteReward { reward, voter: author } 156 | | RewardsEvent::BlockReward { reward, block_author: author }, 157 | ) if author == reward_address.into() => 158 | tracing::error!(%reward, "Received a reward!"), 159 | Event::Subspace(SubspaceEvent::FarmerVote { 160 | reward_address: author, 161 | height: block_number, 162 | .. 163 | }) if author == reward_address.into() => 164 | tracing::error!(block_number, "Vote counted for block"), 165 | _ => (), 166 | }; 167 | } 168 | 169 | if let Some(pre_digest) = header.pre_digest { 170 | if pre_digest.solution().reward_address == reward_address { 171 | tracing::error!("We authored a block"); 172 | } 173 | } 174 | } 175 | 176 | anyhow::Ok(()) 177 | } 178 | }; 179 | 180 | tokio::select! { 181 | _ = rewards_sub => {}, 182 | _ = tokio::signal::ctrl_c() => { 183 | tracing::error!("Exitting..."); 184 | } 185 | } 186 | 187 | node.close().await.context("Failed to close node")?; 188 | farmer.close().await.context("Failed to close farmer")?; 189 | 190 | Ok(()) 191 | } 192 | -------------------------------------------------------------------------------- /sdk/node/src/domains/domain_instance_starter.rs: -------------------------------------------------------------------------------- 1 | use std::sync::Arc; 2 | 3 | use cross_domain_message_gossip::ChainTxPoolMsg; 4 | use domain_client_operator::OperatorStreams; 5 | use domain_eth_service::provider::EthProvider; 6 | use domain_eth_service::DefaultEthConfig; 7 | use domain_runtime_primitives::opaque::Block as DomainBlock; 8 | use domain_service::{FullBackend, FullClient}; 9 | use futures::StreamExt; 10 | use sc_client_api::ImportNotifications; 11 | use sc_consensus_subspace::block_import::BlockImportingNotification; 12 | use sc_consensus_subspace::notification::SubspaceNotificationStream; 13 | use sc_consensus_subspace::slot_worker::NewSlotNotification; 14 | use sc_network::NetworkService; 15 | use sc_service::{Configuration, RpcHandlers}; 16 | use sc_transaction_pool_api::OffchainTransactionPoolFactory; 17 | use sc_utils::mpsc::{TracingUnboundedReceiver, TracingUnboundedSender}; 18 | use sp_core::H256; 19 | use sp_domains::{DomainId, OperatorId, RuntimeType}; 20 | use sp_runtime::traits::NumberFor; 21 | use subspace_runtime::RuntimeApi as CRuntimeApi; 22 | use subspace_runtime_primitives::opaque::Block as CBlock; 23 | use subspace_service::FullClient as CFullClient; 24 | use tokio::task::JoinHandle; 25 | 26 | use crate::domains::utils::AccountId20; 27 | 28 | /// `DomainInstanceStarter` used to start a domain instance node based on the 29 | /// given bootstrap result 30 | pub struct DomainInstanceStarter { 31 | pub service_config: Configuration, 32 | pub maybe_operator_id: Option, 33 | pub domain_id: DomainId, 34 | pub runtime_type: RuntimeType, 35 | pub additional_arguments: Vec, 36 | pub consensus_client: Arc>, 37 | pub consensus_network: Arc>, 38 | pub block_importing_notification_stream: 39 | SubspaceNotificationStream>, 40 | pub new_slot_notification_stream: SubspaceNotificationStream, 41 | pub consensus_sync_service: Arc>, 42 | pub consensus_offchain_tx_pool_factory: OffchainTransactionPoolFactory, 43 | pub domain_message_receiver: TracingUnboundedReceiver, 44 | pub gossip_message_sink: TracingUnboundedSender, 45 | } 46 | 47 | impl DomainInstanceStarter { 48 | pub async fn prepare_for_start( 49 | self, 50 | domain_created_at: NumberFor, 51 | imported_block_notification_stream: ImportNotifications, 52 | ) -> anyhow::Result<(RpcHandlers, JoinHandle>)> { 53 | let DomainInstanceStarter { 54 | domain_id, 55 | consensus_network, 56 | maybe_operator_id, 57 | runtime_type, 58 | mut additional_arguments, 59 | service_config, 60 | consensus_client, 61 | block_importing_notification_stream, 62 | new_slot_notification_stream, 63 | consensus_sync_service, 64 | consensus_offchain_tx_pool_factory, 65 | domain_message_receiver, 66 | gossip_message_sink, 67 | } = self; 68 | 69 | let block_importing_notification_stream = || { 70 | block_importing_notification_stream.subscribe().then( 71 | |block_importing_notification| async move { 72 | ( 73 | block_importing_notification.block_number, 74 | block_importing_notification.acknowledgement_sender, 75 | ) 76 | }, 77 | ) 78 | }; 79 | 80 | let new_slot_notification_stream = || { 81 | new_slot_notification_stream.subscribe().then(|slot_notification| async move { 82 | ( 83 | slot_notification.new_slot_info.slot, 84 | slot_notification.new_slot_info.global_randomness, 85 | ) 86 | }) 87 | }; 88 | 89 | let operator_streams = OperatorStreams { 90 | // TODO: proper value 91 | consensus_block_import_throttling_buffer_size: 10, 92 | block_importing_notification_stream: block_importing_notification_stream(), 93 | imported_block_notification_stream, 94 | new_slot_notification_stream: new_slot_notification_stream(), 95 | _phantom: Default::default(), 96 | acknowledgement_sender_stream: futures::stream::empty(), 97 | }; 98 | 99 | match runtime_type { 100 | RuntimeType::Evm => { 101 | let eth_provider = EthProvider::< 102 | evm_domain_runtime::TransactionConverter, 103 | DefaultEthConfig< 104 | FullClient, 105 | FullBackend, 106 | >, 107 | >::new( 108 | Some(service_config.base_path.path()), 109 | additional_arguments.drain(..), 110 | ); 111 | 112 | let domain_params = domain_service::DomainParams { 113 | domain_id, 114 | domain_config: service_config, 115 | domain_created_at, 116 | maybe_operator_id, 117 | consensus_client, 118 | consensus_network, 119 | consensus_offchain_tx_pool_factory, 120 | consensus_network_sync_oracle: consensus_sync_service.clone(), 121 | operator_streams, 122 | gossip_message_sink, 123 | domain_message_receiver, 124 | provider: eth_provider, 125 | skip_empty_bundle_production: true, 126 | }; 127 | 128 | let mut domain_node = domain_service::new_full::< 129 | _, 130 | _, 131 | _, 132 | _, 133 | _, 134 | _, 135 | evm_domain_runtime::RuntimeApi, 136 | AccountId20, 137 | _, 138 | _, 139 | >(domain_params) 140 | .await 141 | .map_err(anyhow::Error::new)?; 142 | 143 | let domain_start_join_handle = sdk_utils::task_spawn( 144 | format!("domain-{}/start-domain", >::into(domain_id)), 145 | async move { 146 | domain_node.network_starter.start_network(); 147 | domain_node.task_manager.future().await.map_err(anyhow::Error::new) 148 | }, 149 | ); 150 | 151 | Ok((domain_node.rpc_handlers.clone(), domain_start_join_handle)) 152 | } 153 | } 154 | } 155 | } 156 | -------------------------------------------------------------------------------- /sdk/substrate/src/types.rs: -------------------------------------------------------------------------------- 1 | use derivative::Derivative; 2 | use derive_builder::Builder; 3 | use derive_more::{Deref, DerefMut, Display, From}; 4 | use sdk_utils::ByteSize; 5 | use serde::{Deserialize, Serialize}; 6 | 7 | /// Block pruning settings. 8 | #[derive(Debug, Clone, Copy, PartialEq, Default, Serialize, Deserialize, Eq, PartialOrd, Ord)] 9 | pub enum BlocksPruning { 10 | #[default] 11 | /// Keep full finalized block history. 12 | ArchiveCanonical, 13 | /// Keep full block history, of every block that was ever imported. 14 | Archive, 15 | /// Keep N recent finalized blocks. 16 | Number(u32), 17 | } 18 | 19 | impl From for BlocksPruning { 20 | fn from(value: sc_service::BlocksPruning) -> Self { 21 | match value { 22 | sc_service::BlocksPruning::KeepAll => Self::Archive, 23 | sc_service::BlocksPruning::KeepFinalized => Self::ArchiveCanonical, 24 | sc_service::BlocksPruning::Some(n) => Self::Number(n), 25 | } 26 | } 27 | } 28 | 29 | impl From for sc_service::BlocksPruning { 30 | fn from(value: BlocksPruning) -> Self { 31 | match value { 32 | BlocksPruning::Archive => Self::KeepAll, 33 | BlocksPruning::ArchiveCanonical => Self::KeepFinalized, 34 | BlocksPruning::Number(n) => Self::Some(n), 35 | } 36 | } 37 | } 38 | 39 | /// Pruning constraints. If none are specified pruning is 40 | #[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)] 41 | pub struct Constraints { 42 | /// Maximum blocks. Defaults to 0 when unspecified, effectively keeping 43 | /// only non-canonical states. 44 | pub max_blocks: Option, 45 | } 46 | 47 | impl From for sc_state_db::Constraints { 48 | fn from(Constraints { max_blocks }: Constraints) -> Self { 49 | Self { max_blocks } 50 | } 51 | } 52 | 53 | impl From for Constraints { 54 | fn from(sc_state_db::Constraints { max_blocks }: sc_state_db::Constraints) -> Self { 55 | Self { max_blocks } 56 | } 57 | } 58 | 59 | /// Pruning mode. 60 | #[derive(Debug, Clone, Eq, PartialEq, Default, Serialize, Deserialize)] 61 | pub enum PruningMode { 62 | /// Canonicalization discards non-canonical nodes. All the canonical 63 | /// nodes are kept in the DB. 64 | #[default] 65 | ArchiveCanonical, 66 | /// No pruning. Canonicalization is a no-op. 67 | ArchiveAll, 68 | } 69 | 70 | impl From for sc_service::PruningMode { 71 | fn from(value: PruningMode) -> Self { 72 | match value { 73 | PruningMode::ArchiveAll => Self::ArchiveAll, 74 | PruningMode::ArchiveCanonical => Self::ArchiveCanonical, 75 | } 76 | } 77 | } 78 | 79 | /// Type wrapper with default value for implementation name 80 | #[derive( 81 | Debug, Clone, Derivative, Deserialize, Serialize, PartialEq, Eq, From, Deref, DerefMut, Display, 82 | )] 83 | #[derivative(Default)] 84 | #[serde(transparent)] 85 | pub struct ImplName( 86 | #[derivative(Default(value = "env!(\"CARGO_PKG_NAME\").to_owned()"))] pub String, 87 | ); 88 | 89 | /// Type wrapper with default value for implementation version 90 | #[derive( 91 | Debug, Clone, Derivative, Deserialize, Serialize, PartialEq, Eq, From, Deref, DerefMut, Display, 92 | )] 93 | #[derivative(Default)] 94 | #[serde(transparent)] 95 | pub struct ImplVersion( 96 | #[derivative(Default( 97 | value = "format!(\"{}-{}\", env!(\"CARGO_PKG_VERSION\"), env!(\"GIT_HASH\"))" 98 | ))] 99 | pub String, 100 | ); 101 | 102 | /// Storage monitor 103 | #[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq)] 104 | pub struct StorageMonitor { 105 | /// How much space do we want to reserve 106 | pub threshold: ByteSize, 107 | /// Polling period for threshold 108 | pub polling_period: std::time::Duration, 109 | } 110 | 111 | impl From for sc_storage_monitor::StorageMonitorParams { 112 | fn from(StorageMonitor { threshold, polling_period }: StorageMonitor) -> Self { 113 | Self { 114 | threshold: (threshold.as_u64() / bytesize::MIB).max(1), 115 | polling_period: polling_period.as_secs().max(1) as u32, 116 | } 117 | } 118 | } 119 | 120 | /// Wrapper with default value for max subscriptions per connection 121 | #[derive( 122 | Debug, Clone, Derivative, Deserialize, Serialize, PartialEq, Eq, From, Deref, DerefMut, Display, 123 | )] 124 | #[derivative(Default)] 125 | #[serde(transparent)] 126 | pub struct MaxSubsPerConn(#[derivative(Default(value = "1024"))] pub usize); 127 | 128 | /// Offchain worker config 129 | #[derive(Debug, Clone, Derivative, Builder, Deserialize, Serialize, PartialEq, Eq)] 130 | #[derivative(Default)] 131 | #[builder(pattern = "owned", build_fn(name = "_build"), name = "OffchainWorkerBuilder")] 132 | #[non_exhaustive] 133 | pub struct OffchainWorker { 134 | /// Is enabled 135 | #[builder(default)] 136 | #[serde(default, skip_serializing_if = "sdk_utils::is_default")] 137 | pub enabled: bool, 138 | /// Is indexing enabled 139 | #[builder(default)] 140 | #[serde(default, skip_serializing_if = "sdk_utils::is_default")] 141 | pub indexing_enabled: bool, 142 | } 143 | 144 | impl OffchainWorkerBuilder { 145 | /// Dev chain configuration 146 | pub fn dev() -> Self { 147 | Self::default() 148 | } 149 | 150 | /// Gemini 3g configuration 151 | pub fn gemini_3h() -> Self { 152 | Self::default().enabled(true) 153 | } 154 | 155 | /// Devnet configuration 156 | pub fn devnet() -> Self { 157 | Self::default().enabled(true) 158 | } 159 | } 160 | 161 | impl From for sc_service::config::OffchainWorkerConfig { 162 | fn from(OffchainWorker { enabled, indexing_enabled }: OffchainWorker) -> Self { 163 | Self { enabled, indexing_enabled } 164 | } 165 | } 166 | 167 | sdk_utils::generate_builder!(OffchainWorker); 168 | 169 | /// Role of the local node. 170 | #[derive(Debug, Clone, Copy, Default, Serialize, Deserialize, PartialEq, Eq)] 171 | pub enum Role { 172 | #[default] 173 | /// Regular full node. 174 | Full, 175 | /// Actual authority. 176 | Authority, 177 | } 178 | 179 | impl From for sc_service::Role { 180 | fn from(value: Role) -> Self { 181 | match value { 182 | Role::Full => sc_service::Role::Full, 183 | Role::Authority => sc_service::Role::Authority, 184 | } 185 | } 186 | } 187 | 188 | /// Available RPC methods. 189 | #[derive(Debug, Copy, Clone, Default, Serialize, Deserialize, PartialEq, Eq)] 190 | pub enum RpcMethods { 191 | /// Expose every RPC method only when RPC is listening on `localhost`, 192 | /// otherwise serve only safe RPC methods. 193 | #[default] 194 | Auto, 195 | /// Allow only a safe subset of RPC methods. 196 | Safe, 197 | /// Expose every RPC method (even potentially unsafe ones). 198 | Unsafe, 199 | } 200 | 201 | impl From for sc_service::RpcMethods { 202 | fn from(value: RpcMethods) -> Self { 203 | match value { 204 | RpcMethods::Auto => Self::Auto, 205 | RpcMethods::Safe => Self::Safe, 206 | RpcMethods::Unsafe => Self::Unsafe, 207 | } 208 | } 209 | } 210 | -------------------------------------------------------------------------------- /sdk/subspace-sdk/tests/integration/node.rs: -------------------------------------------------------------------------------- 1 | use std::sync::Arc; 2 | 3 | use futures::prelude::*; 4 | use subspace_sdk::utils::ByteSize; 5 | use tempfile::TempDir; 6 | use tracing_futures::Instrument; 7 | 8 | use crate::common::{Farmer, Node}; 9 | 10 | async fn sync_block_inner() { 11 | crate::common::setup(); 12 | 13 | let number_of_sectors = 10; 14 | let pieces_in_sector = 50u16; 15 | let sector_size = subspace_farmer_components::sector::sector_size(pieces_in_sector as _); 16 | let space_pledged = sector_size * number_of_sectors; 17 | 18 | let node = Node::dev().build(true).await; 19 | let farmer = Farmer::dev() 20 | .pieces_in_sector(pieces_in_sector) 21 | .build(&node, ByteSize::b(space_pledged as u64)) 22 | .await; 23 | 24 | let farm_blocks = 5; 25 | 26 | node.subscribe_new_heads() 27 | .await 28 | .unwrap() 29 | .skip_while(|notification| futures::future::ready(notification.number < farm_blocks)) 30 | .next() 31 | .await 32 | .unwrap(); 33 | 34 | farmer.close().await; 35 | 36 | let other_node = Node::dev() 37 | .chain(node.chain.clone()) 38 | .boot_nodes(node.listen_addresses().await.unwrap()) 39 | .not_force_synced(true) 40 | .not_authority(true) 41 | .build(false) 42 | .await; 43 | 44 | other_node.subscribe_syncing_progress().await.unwrap().for_each(|_| async {}).await; 45 | assert_eq!(other_node.get_info().await.unwrap().best_block.1, farm_blocks); 46 | 47 | node.close().await; 48 | other_node.close().await; 49 | } 50 | 51 | #[tokio::test(flavor = "multi_thread")] 52 | #[cfg_attr(any(tarpaulin, not(target_os = "linux")), ignore = "Slow tests are run only on linux")] 53 | async fn sync_block() { 54 | tokio::time::timeout(std::time::Duration::from_secs(60 * 60), sync_block_inner()).await.unwrap() 55 | } 56 | 57 | async fn sync_farm_inner() { 58 | crate::common::setup(); 59 | 60 | let number_of_sectors = 10; 61 | let pieces_in_sector = 50u16; 62 | let sector_size = subspace_farmer_components::sector::sector_size(pieces_in_sector as _); 63 | let space_pledged = sector_size * number_of_sectors; 64 | 65 | let node_span = tracing::trace_span!("node 1"); 66 | let node = Node::dev().build(true).instrument(node_span.clone()).await; 67 | 68 | let farmer = Farmer::dev() 69 | .pieces_in_sector(pieces_in_sector) 70 | .build(&node, ByteSize::b(space_pledged as u64)) 71 | .instrument(node_span.clone()) 72 | .await; 73 | 74 | let farm_blocks = 4; 75 | 76 | node.subscribe_new_heads() 77 | .await 78 | .unwrap() 79 | .skip_while(|notification| futures::future::ready(notification.number < farm_blocks)) 80 | .next() 81 | .await 82 | .unwrap(); 83 | 84 | let other_node_span = tracing::trace_span!("node 2"); 85 | let other_node = Node::dev() 86 | .dsn_boot_nodes(node.dsn_listen_addresses().await.unwrap()) 87 | .boot_nodes(node.listen_addresses().await.unwrap()) 88 | .not_force_synced(true) 89 | .chain(node.chain.clone()) 90 | .build(false) 91 | .instrument(other_node_span.clone()) 92 | .await; 93 | 94 | while other_node.get_info().await.unwrap().best_block.1 95 | < node.get_info().await.unwrap().best_block.1 96 | { 97 | tokio::time::sleep(std::time::Duration::from_secs(1)).await; 98 | } 99 | 100 | let other_farmer = Farmer::dev() 101 | .pieces_in_sector(pieces_in_sector) 102 | .build(&other_node, ByteSize::b(space_pledged as u64)) 103 | .instrument(other_node_span.clone()) 104 | .await; 105 | 106 | let farm = other_farmer.iter_farms().await.next().unwrap(); 107 | farm.subscribe_initial_plotting_progress().await.for_each(|_| async {}).await; 108 | farmer.close().await; 109 | 110 | farm.subscribe_new_solutions().await.next().await.expect("Solution stream never ends"); 111 | 112 | node.close().await; 113 | other_node.close().await; 114 | other_farmer.close().await; 115 | } 116 | 117 | #[tokio::test(flavor = "multi_thread")] 118 | #[cfg_attr(any(tarpaulin, not(target_os = "linux")), ignore = "Slow tests are run only on linux")] 119 | async fn sync_farm() { 120 | tokio::time::timeout(std::time::Duration::from_secs(60 * 60), sync_farm_inner()).await.unwrap() 121 | } 122 | 123 | #[tokio::test(flavor = "multi_thread")] 124 | #[ignore = "Substrate rpc server doesn't let node to properly exit"] 125 | async fn node_restart() { 126 | crate::common::setup(); 127 | let dir = Arc::new(TempDir::new().unwrap()); 128 | 129 | for i in 0..4 { 130 | tracing::error!(i, "Running new node"); 131 | Node::dev().path(dir.clone()).build(true).await.close().await; 132 | } 133 | } 134 | 135 | #[tokio::test(flavor = "multi_thread")] 136 | #[cfg_attr(any(tarpaulin, not(target_os = "linux")), ignore = "Slow tests are run only on linux")] 137 | async fn node_events() { 138 | crate::common::setup(); 139 | 140 | tokio::time::timeout(std::time::Duration::from_secs(30 * 60), async { 141 | let number_of_sectors = 10; 142 | let pieces_in_sector = 50u16; 143 | let sector_size = subspace_farmer_components::sector::sector_size(pieces_in_sector as _); 144 | let space_pledged = sector_size * number_of_sectors; 145 | 146 | let node = Node::dev().build(true).await; 147 | let farmer = Farmer::dev() 148 | .pieces_in_sector(pieces_in_sector) 149 | .build(&node, ByteSize::b(space_pledged as u64)) 150 | .await; 151 | 152 | let events = node 153 | .subscribe_new_heads() 154 | .await 155 | .unwrap() 156 | // Skip genesis 157 | .skip(1) 158 | .then(|_| node.get_events(None).boxed()) 159 | .take(1) 160 | .next() 161 | .await 162 | .unwrap() 163 | .unwrap(); 164 | 165 | assert!(!events.is_empty()); 166 | 167 | farmer.close().await; 168 | node.close().await; 169 | }) 170 | .await 171 | .unwrap(); 172 | } 173 | 174 | #[tokio::test(flavor = "multi_thread")] 175 | #[cfg_attr(any(tarpaulin, not(target_os = "linux")), ignore = "Slow tests are run only on linux")] 176 | async fn fetch_block_author() { 177 | crate::common::setup(); 178 | 179 | tokio::time::timeout(std::time::Duration::from_secs(30 * 60), async { 180 | let number_of_sectors = 10; 181 | let pieces_in_sector = 50u16; 182 | let sector_size = subspace_farmer_components::sector::sector_size(pieces_in_sector as _); 183 | let space_pledged = sector_size * number_of_sectors; 184 | 185 | let node = Node::dev().build(false).await; 186 | let reward_address = Default::default(); 187 | let farmer = Farmer::dev() 188 | .reward_address(reward_address) 189 | .pieces_in_sector(pieces_in_sector) 190 | .build(&node, ByteSize::b(space_pledged as u64)) 191 | .await; 192 | 193 | let block = node.subscribe_new_heads().await.unwrap().skip(1).take(1).next().await.unwrap(); 194 | assert_eq!(block.pre_digest.unwrap().solution().reward_address, reward_address); 195 | 196 | farmer.close().await; 197 | node.close().await; 198 | }) 199 | .await 200 | .unwrap(); 201 | } 202 | -------------------------------------------------------------------------------- /pulsar/src/commands/init.rs: -------------------------------------------------------------------------------- 1 | use std::io::{BufRead, Write}; 2 | use std::str::FromStr; 3 | 4 | use color_eyre::eyre::{eyre, Context, Error, Result}; 5 | use crossterm::terminal::{Clear, ClearType}; 6 | use crossterm::{cursor, execute}; 7 | use rand::prelude::IteratorRandom; 8 | use sp_core::Pair; 9 | use strum::IntoEnumIterator; 10 | use subspace_sdk::PublicKey; 11 | use zeroize::Zeroizing; 12 | 13 | use crate::config::{ 14 | create_config, AdvancedFarmerSettings, AdvancedNodeSettings, ChainConfig, Config, FarmerConfig, 15 | NodeConfig, DEFAULT_FARM_SIZE, 16 | }; 17 | use crate::utils::{ 18 | directory_parser, farm_directory_getter, get_user_input, node_directory_getter, 19 | node_name_parser, print_ascii_art, print_run_executable_command, print_version, 20 | reward_address_parser, size_parser, yes_or_no_parser, 21 | }; 22 | 23 | /// implementation of the `init` command 24 | /// 25 | /// prints a very cool ascii art, 26 | /// creates a config file from the user inputs 27 | pub(crate) fn init() -> Result<()> { 28 | let (mut config_file, config_path) = create_config()?; 29 | print_ascii_art(); 30 | print_version(); 31 | println!(); 32 | println!("Configuration creation process has started..."); 33 | let config = get_config_from_user_inputs()?; 34 | config_file 35 | .write_all(toml::to_string_pretty(&config).wrap_err("Failed to write config")?.as_ref()) 36 | .wrap_err("Failed to write config")?; 37 | 38 | println!("Configuration has been generated at {}", config_path.display()); 39 | 40 | println!("Ready for lift off! Run the follow command to begin:"); 41 | print_run_executable_command(); 42 | 43 | Ok(()) 44 | } 45 | 46 | /// gets the necessary information from user, and writes them to the given 47 | /// configuration file 48 | fn get_config_from_user_inputs() -> Result { 49 | // check if user has an existing reward address 50 | let reward_address_exist = get_user_input( 51 | "Do you have an existing farmer/reward address? [y/n]: ", 52 | None, 53 | yes_or_no_parser, 54 | )?; 55 | 56 | let reward_address = generate_or_get_reward_address(reward_address_exist) 57 | .context("reward address creation failed")?; 58 | 59 | // get node name 60 | let default_node_name = whoami::username(); 61 | let node_name = get_user_input( 62 | &format!( 63 | "Enter your node name to be identified on the network(defaults to \ 64 | `{default_node_name}`, press enter to use the default): " 65 | ), 66 | (default_node_name != "root").then_some(default_node_name), 67 | node_name_parser, 68 | )?; 69 | 70 | // get farm directory 71 | let default_farm_loc = farm_directory_getter(); 72 | let farm_directory = get_user_input( 73 | &format!( 74 | "Specify a path for storing farm files (press enter to use the default: \ 75 | `{default_farm_loc:?}`): ", 76 | ), 77 | Some(default_farm_loc), 78 | directory_parser, 79 | )?; 80 | 81 | let default_node_loc = node_directory_getter(); 82 | let node_directory = get_user_input( 83 | &format!( 84 | "Specify a path for storing node files (press enter to use the default: \ 85 | `{default_node_loc:?}`): ", 86 | ), 87 | Some(default_node_loc), 88 | directory_parser, 89 | )?; 90 | 91 | // get farm size 92 | let farm_size = get_user_input( 93 | &format!( 94 | "Specify a farm size (defaults to `{DEFAULT_FARM_SIZE}`, press enter to use the \ 95 | default): " 96 | ), 97 | Some(DEFAULT_FARM_SIZE), 98 | size_parser, 99 | )?; 100 | 101 | // get chain 102 | let default_chain = ChainConfig::Gemini3h; 103 | let chain = get_user_input( 104 | &format!( 105 | "Specify the chain to farm. Available options are: {:?}. \n Defaults to \ 106 | `{default_chain:?}`, press enter to use the default:", 107 | ChainConfig::iter().collect::>() 108 | ), 109 | Some(default_chain), 110 | ChainConfig::from_str, 111 | )?; 112 | 113 | let farmer_config = FarmerConfig { 114 | farm_size, 115 | farm_directory, 116 | reward_address, 117 | advanced: AdvancedFarmerSettings::default(), 118 | }; 119 | let node_config = NodeConfig { 120 | name: node_name, 121 | directory: node_directory, 122 | advanced: AdvancedNodeSettings::default(), 123 | }; 124 | 125 | Ok(Config { farmer: farmer_config, node: node_config, chain }) 126 | } 127 | 128 | fn generate_or_get_reward_address(reward_address_exist: bool) -> Result { 129 | if reward_address_exist { 130 | return get_user_input("Enter your farmer/reward address: ", None, reward_address_parser); 131 | } 132 | 133 | let wants_new_key = get_user_input( 134 | "Do you want to create a new farmer/reward key? [y/n]: ", 135 | None, 136 | yes_or_no_parser, 137 | )?; 138 | 139 | if !wants_new_key { 140 | return Err(eyre!("New key creation was not confirmed")); 141 | } 142 | 143 | // generate new mnemonic and key pair 144 | let (pair, phrase, seed): ( 145 | sp_core::sr25519::Pair, 146 | String, 147 | ::Seed, 148 | ) = Pair::generate_with_phrase(None); 149 | let _seed = Zeroizing::new(seed); 150 | let phrase = Zeroizing::new(phrase); 151 | let words: Vec<&str> = phrase.split_whitespace().collect(); 152 | 153 | println!( 154 | "IMPORTANT NOTICE: The mnemonic displayed below is crucial to regain access to your \ 155 | account in case you forget your credentials. It's highly recommended to store it in a \ 156 | secure and retrievable location. Failure to do so may result in permanent loss of access \ 157 | to your account.\n" 158 | ); 159 | println!( 160 | "Please press 'Enter' after you've securely stored the mnemonic. Once you press 'Enter', \ 161 | the mnemonic will no longer be visible in this interface for security reasons.\n" 162 | ); 163 | // saving position, since we will later clear the mnemonic 164 | println!("Here is your mnemonic:"); 165 | execute!(std::io::stdout(), cursor::SavePosition).context("save position failed")?; 166 | println!("{}", phrase.as_str()); 167 | std::io::stdin().lock().lines().next(); 168 | 169 | // clear the mnemonic 170 | execute!(std::io::stdout(), cursor::RestorePosition).context("restore cursor failed")?; 171 | execute!(std::io::stdout(), Clear(ClearType::FromCursorDown)) 172 | .context("clear mnemonic failed")?; 173 | 174 | println!("...redacted..."); 175 | 176 | // User has to provide 3 randomly selected words from the mnemonic 177 | let mut rng = rand::thread_rng(); 178 | let word_indexes: Vec = (0..words.len()).choose_multiple(&mut rng, 3); 179 | 180 | for index in &word_indexes { 181 | loop { 182 | let word = get_user_input( 183 | &format!("Enter the {}th word in the mnemonic: ", index + 1), 184 | None, 185 | |input| Ok::(input.to_owned()), 186 | )?; 187 | 188 | if word == words[*index] { 189 | break; 190 | } else { 191 | println!("incorrect word, please try again.") 192 | } 193 | } 194 | } 195 | 196 | // print the public key and return it 197 | println!("Your new public key is: {}", pair.public()); 198 | let public_key_array = pair.public().0; 199 | Ok(public_key_array.into()) 200 | } 201 | -------------------------------------------------------------------------------- /sdk/node/src/domains/evm_chain_spec.rs: -------------------------------------------------------------------------------- 1 | //! System domain chain specs 2 | 3 | use std::str::FromStr; 4 | 5 | use evm_domain_runtime::{ 6 | AccountId, BalancesConfig, EVMChainIdConfig, EVMConfig, Precompiles, RuntimeGenesisConfig, 7 | SudoConfig, SystemConfig, WASM_BINARY, 8 | }; 9 | use hex_literal::hex; 10 | use sc_service::{ChainSpec as _, ChainType, GenericChainSpec}; 11 | use sdk_utils::chain_spec::chain_spec_properties; 12 | use sp_domains::storage::RawGenesis; 13 | use subspace_runtime_primitives::SSC; 14 | 15 | /// Chain spec type for the system domain 16 | pub type ChainSpec = GenericChainSpec; 17 | 18 | #[derive(Copy, Clone)] 19 | pub enum SpecId { 20 | Dev, 21 | Gemini, 22 | DevNet, 23 | } 24 | 25 | pub fn create_domain_spec(chain_id: &str, raw_genesis: RawGenesis) -> Result { 26 | // The value of the `RuntimeGenesisConfig` doesn't matter since it will be 27 | // overwritten later 28 | let constructor = RuntimeGenesisConfig::default; 29 | let mut chain_spec = match chain_id { 30 | "dev" => development_config(constructor), 31 | "gemini-3g" => gemini_3h_config(constructor), 32 | "devnet" => devnet_config(constructor), 33 | path => ChainSpec::from_json_file(std::path::PathBuf::from(path))?, 34 | }; 35 | 36 | chain_spec.set_storage(raw_genesis.into_storage()); 37 | 38 | Ok(chain_spec) 39 | } 40 | 41 | /// Development keys that will be injected automatically on polkadotjs apps 42 | fn get_dev_accounts() -> Vec { 43 | vec![ 44 | // Alith key 45 | AccountId::from(hex!("f24FF3a9CF04c71Dbc94D0b566f7A27B94566cac")), 46 | // Baltathar key 47 | AccountId::from(hex!("3Cd0A705a2DC65e5b1E1205896BaA2be8A07c6e0")), 48 | // Charleth key 49 | AccountId::from(hex!("798d4Ba9baf0064Ec19eB4F0a1a45785ae9D6DFc")), 50 | // Dorothy 51 | AccountId::from(hex!("773539d4Ac0e786233D90A233654ccEE26a613D9")), 52 | ] 53 | } 54 | 55 | pub fn get_testnet_genesis_by_spec_id(spec_id: SpecId) -> RuntimeGenesisConfig { 56 | match spec_id { 57 | SpecId::Dev => { 58 | let accounts = get_dev_accounts(); 59 | testnet_genesis( 60 | accounts.clone(), 61 | // Alith is Sudo 62 | Some(accounts[0]), 63 | ) 64 | } 65 | SpecId::Gemini => { 66 | let sudo_account = AccountId::from_str("f31e60022e290708c17d6997c34de6a30d09438f") 67 | .expect("Invalid Sudo account"); 68 | testnet_genesis( 69 | vec![ 70 | // Sudo account 71 | sudo_account, 72 | ], 73 | Some(sudo_account), 74 | ) 75 | } 76 | SpecId::DevNet => { 77 | let sudo_account = AccountId::from_str("b66a91845249464309fad766fd0ece8144547736") 78 | .expect("Invalid Sudo account"); 79 | testnet_genesis( 80 | vec![ 81 | // Sudo account 82 | sudo_account, 83 | ], 84 | Some(sudo_account), 85 | ) 86 | } 87 | } 88 | } 89 | 90 | /// Development config 91 | pub fn development_config RuntimeGenesisConfig + 'static + Send + Sync>( 92 | constructor: F, 93 | ) -> ChainSpec { 94 | // TODO: Migrate once https://github.com/paritytech/polkadot-sdk/issues/2963 is un-broken 95 | #[allow(deprecated)] 96 | ChainSpec::from_genesis( 97 | // Name 98 | "Development", 99 | // ID 100 | "evm_domain_dev", 101 | ChainType::Development, 102 | constructor, 103 | vec![], 104 | None, 105 | None, 106 | None, 107 | Some(chain_spec_properties()), 108 | None, 109 | // Code 110 | WASM_BINARY.expect("WASM binary was not build, please build it!"), 111 | ) 112 | } 113 | 114 | /// Gemini 3g config 115 | pub fn gemini_3h_config RuntimeGenesisConfig + 'static + Send + Sync>( 116 | constructor: F, 117 | ) -> ChainSpec { 118 | // TODO: Migrate once https://github.com/paritytech/polkadot-sdk/issues/2963 is un-broken 119 | #[allow(deprecated)] 120 | ChainSpec::from_genesis( 121 | // Name 122 | "Subspace Gemini 3g EVM Domain", 123 | // ID 124 | "subspace_gemini_3h_evm_domain", 125 | ChainType::Live, 126 | constructor, 127 | // Bootnodes 128 | vec![], 129 | // Telemetry 130 | None, 131 | // Protocol ID 132 | Some("subspace-gemini-3g-evm-domain"), 133 | None, 134 | // Properties 135 | Some(chain_spec_properties()), 136 | // Extensions 137 | None, 138 | // Code 139 | WASM_BINARY.expect("WASM binary was not build, please build it!"), 140 | ) 141 | } 142 | 143 | pub fn devnet_config RuntimeGenesisConfig + 'static + Send + Sync>( 144 | constructor: F, 145 | ) -> ChainSpec { 146 | // TODO: Migrate once https://github.com/paritytech/polkadot-sdk/issues/2963 is un-broken 147 | #[allow(deprecated)] 148 | ChainSpec::from_genesis( 149 | // Name 150 | "Subspace Devnet EVM Domain", 151 | // ID 152 | "subspace_devnet_evm_domain", 153 | ChainType::Custom("Testnet".to_string()), 154 | constructor, 155 | // Bootnodes 156 | vec![], 157 | // Telemetry 158 | None, 159 | // Protocol ID 160 | Some("subspace-devnet-evm-domain"), 161 | None, 162 | // Properties 163 | Some(chain_spec_properties()), 164 | // Extensions 165 | None, 166 | // Code 167 | WASM_BINARY.expect("WASM binary was not build, please build it!"), 168 | ) 169 | } 170 | 171 | fn testnet_genesis( 172 | endowed_accounts: Vec, 173 | maybe_sudo_account: Option, 174 | ) -> RuntimeGenesisConfig { 175 | // This is the simplest bytecode to revert without returning any data. 176 | // We will pre-deploy it under all of our precompiles to ensure they can be 177 | // called from within contracts. 178 | // (PUSH1 0x00 PUSH1 0x00 REVERT) 179 | let revert_bytecode = vec![0x60, 0x00, 0x60, 0x00, 0xFD]; 180 | 181 | RuntimeGenesisConfig { 182 | system: SystemConfig::default(), 183 | sudo: SudoConfig { key: maybe_sudo_account }, 184 | transaction_payment: Default::default(), 185 | balances: BalancesConfig { 186 | balances: endowed_accounts.iter().cloned().map(|k| (k, 1_000_000 * SSC)).collect(), 187 | }, 188 | // this is set to default and chain_id will be set into genesis during the domain 189 | // instantiation on Consensus runtime. 190 | evm_chain_id: EVMChainIdConfig::default(), 191 | evm: EVMConfig { 192 | // We need _some_ code inserted at the precompile address so that 193 | // the evm will actually call the address. 194 | accounts: Precompiles::used_addresses() 195 | .into_iter() 196 | .map(|addr| { 197 | ( 198 | addr, 199 | fp_evm::GenesisAccount { 200 | nonce: Default::default(), 201 | balance: Default::default(), 202 | storage: Default::default(), 203 | code: revert_bytecode.clone(), 204 | }, 205 | ) 206 | }) 207 | .collect(), 208 | ..Default::default() 209 | }, 210 | ..Default::default() 211 | } 212 | } 213 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Archive Notice :warning: 2 | As of 2024-03-01, this project has been archived and is no longer actively maintained. 3 | 4 | # What does this mean 5 | 6 | - **No Updates:** The repository will not be receiving any updates or accepting pull requests. The code is provided as-is. 7 | - **Read-Only:** The repository is now read-only. You can still fork, download, or star the repository. 8 | - **No Support:** We will no longer be responding to issues or questions regarding this project. However, you may still find community support through existing issues or outside forums. 9 | 10 | # Why is this project archived? 11 | 12 | This project is being archived due to the lack of a sustainable user community and our decision to concentrate our resources on more widely-used projects that are critical to our roadmap towards mainnet. 13 | 14 | We believe that focusing our efforts on projects with a broader user base and strategic importance will allow us to make a more significant impact and deliver better value to our community. 15 | 16 | # Looking Forward 17 | 18 | While this project is being archived, we encourage our vibrant community to take the reins! If you've found value in this project and have ideas for its evolution, we wholeheartedly support and encourage you to fork and develop your own versions. This is an opportunity for innovation and creativity – your contributions could lead to something even more impactful. 19 | 20 | For those who are looking for alternatives to this project, we recommend exploring [Space Acres](https://github.com/subspace/space-acres), an opinionated GUI application for farming on [Subspace Network](https://subspace.network/). For those who prefer a CLI experience see the [Advanced CLI](https://docs.subspace.network/docs/farming-&-staking/farming/advanced-cli/cli-install) instructions. 21 | 22 | We extend our deepest gratitude to everyone who has contributed to and supported this project. Your engagement and feedback have been invaluable, and we look forward to seeing how the community takes these ideas forward in new and exciting directions. 23 | 24 |
25 | 26 | # Pulsar 27 | 28 | Pulsar simplifies the farming process on Subspace Network. 29 | 30 | [![ci-tests](https://img.shields.io/github/actions/workflow/status/subspace/pulsar/ci-tests.yml?branch=main&label=CI&logo=github&style=for-the-badge)](https://github.com/subspace/pulsar/actions/workflows/ci-tests.yml) 31 | [![Rust Docs](https://img.shields.io/github/actions/workflow/status/subspace/pulsar/rustdoc.yml?branch=main&label=RUST-DOCS&logo=github&style=for-the-badge)](https://github.com/subspace/pulsar/actions/workflows/rustdoc.yml) 32 | [![Latest Release](https://img.shields.io/github/v/release/subspace/pulsar?include_prereleases&logo=github&style=for-the-badge)](https://github.com/subspace/pulsar/releases) 33 | 34 | ![prompt](images/pulsar-prompt.png) 35 | 36 |
37 | 38 | --- 39 | 40 | Instead of running a terminal instance for the farmer, and running another terminal instance for the node, now you can run a SINGLE terminal instance to farm! 41 | 42 | ## How to Use (commands) 43 | 44 | 1. Download the executable from [releases](https://github.com/subspace/pulsar/releases) 45 | 2. In your terminal, change your directory to where you download the file for example: if you downloaded your file to your `Downloads` folder, `cd Downloads`. 46 | 3. We will address your executable name as `pulsar`, change the below commands accordingly to your full executable name. 47 | 4. Run `./pulsar init` -> this will initialize your config file, which will store the necessary information for you to farm. 48 | 5. Run `./pulsar farm` -> this will start farming. Yes, it is that simple! Enjoy! 🎉 49 | 50 | ## Other commands 51 | 52 | - `wipe` -> This is a dangerous one. If you want to delete everything and start over, this will permanently delete your plots and your node data (this will not erase any rewards you have gained, don't worry). 53 | - `info` -> This will show info for your farming. 54 | 55 | ## Daemonizing the Process (Moving it to the Background) 56 | 57 | In some instances, you may want to move the farming process to the background. Tools like [`screen`](https://www.gnu.org/software/screen/manual/screen.html) and [`tmux`](https://github.com/tmux/tmux) can help manage this. 58 | 59 | ![Alt text](images/culture.jpeg) 60 | 61 | ### Example with `tmux` 62 | 63 | ```sh 64 | $ tmux -S farming 65 | ``` 66 | 67 | This will create a new `tmux` session using a socket file named `farming`. 68 | 69 | Once the tmux session is created, you can go ahead and run the farming process. 70 | 71 | ```sh 72 | $ ./pulsar farm 73 | ``` 74 | 75 | Once it's running, you can detach the process by pressing `CTRL+b d` (read more about [detaching a sessions](https://linuxhint.com/detach-session-tmux/)) 76 | 77 | That's it, you should be back to your terminal, with _subspace farming_ running in the background as a session. 78 | 79 | To re-attach to your session, use tmux: 80 | 81 | ```sh 82 | $ tmux -S farming attach 83 | ``` 84 | 85 | If you ever want to delete/kill your farming session, enter the command: 86 | 87 | ```sh 88 | tmux kill-session -t farming 89 | ``` 90 | 91 | ### Example with `screen` 92 | 93 | ```sh 94 | screen -S farming 95 | ``` 96 | 97 | This will create a new `screen` session. 98 | 99 | ```sh 100 | ./pulsar farm 101 | ``` 102 | 103 | Once it's running, you can detach the process by pressing `CTRL+d a`. 104 | 105 | To re-attach it to your current session: 106 | 107 | ```sh 108 | screen -r farming 109 | ``` 110 | 111 | If you ever want to delete/kill your farming session, enter the command: 112 | 113 | ```sh 114 | screen -S farming -X quit 115 | ``` 116 | 117 | ## Binary 118 | 119 | ### macOS 120 | 121 | Install using [homebrew](https://brew.sh/) package manager: 122 | 123 | ```sh 124 | brew tap subspace/homebrew-pulsar 125 | brew install pulsar 126 | ``` 127 | 128 | ## Developer 129 | 130 | ### Pre-requisites 131 | 132 | You'll have to have [Rust toolchain](https://rustup.rs/) installed as well as LLVM, Clang and CMake in addition to usual developer tooling. 133 | 134 | Below are some examples of how to install these dependencies on different operating systems. 135 | 136 | #### Ubuntu 137 | 138 | ```bash 139 | sudo apt-get install llvm clang cmake 140 | ``` 141 | 142 | #### macOS 143 | 144 | 1. Install via Homebrew: 145 | 146 | ```bash 147 | brew install llvm@15 clang cmake 148 | ``` 149 | 150 | 2. Add `llvm` to your `~/.zshrc` or `~/.bashrc`: 151 | 152 | ```bash 153 | export PATH="/opt/homebrew/opt/llvm@15/bin:$PATH" 154 | ``` 155 | 156 | 3. Activate the changes: 157 | 158 | ```bash 159 | source ~/.zshrc 160 | # or 161 | source ~/.bashrc 162 | ``` 163 | 164 | 4. Verify that `llvm` is installed: 165 | 166 | ```bash 167 | llvm-config --version 168 | ``` 169 | 170 | ### Build from Source 171 | 172 | Ensure the [pre-requisites](#pre-requisites). 173 | 174 | And then run: 175 | 176 | ```sh 177 | $ cargo build 178 | ``` 179 | 180 | > Use `--release` flag for a release build and optimized binary - `./target/release/pulsar` 181 | 182 | ### Install CLI 183 | 184 | #### Using cargo 185 | 186 | After ensuring the [pre-requisites](#pre-requisites), just build using cargo: 187 | 188 | ```sh 189 | $ cargo build --release 190 | ``` 191 | 192 | This would generate an optimized binary. 193 | 194 | And then, you can install the binary (optimized) to your system: 195 | 196 | ```sh 197 | $ cargo install --path . 198 | ``` 199 | 200 | The binary gets added to `~/.cargo/bin`, which is included in the PATH environment variable by default during installation of Rust tools. So you can run it immediately from the shell. 201 | 202 | Using this, one doesn't need to download the executable (binary) from the [releases](https://github.com/subspace/pulsar/releases) page each time when there is a new release. They just need to pull the latest code (if already maintained) from the repository and build it locally. 203 | -------------------------------------------------------------------------------- /pulsar/src/main.rs: -------------------------------------------------------------------------------- 1 | //! CLI application for farming 2 | //! brings `farmer` and `node` together 3 | 4 | #![deny(missing_docs, clippy::unwrap_used)] 5 | #![feature(concat_idents)] 6 | 7 | mod commands; 8 | mod config; 9 | mod summary; 10 | mod utils; 11 | 12 | #[cfg(test)] 13 | mod tests; 14 | 15 | use std::io::{self, Write}; 16 | 17 | use clap::{Parser, Subcommand}; 18 | use color_eyre::eyre::{Context, Report}; 19 | use color_eyre::Help; 20 | use crossterm::event::{Event, KeyCode}; 21 | use crossterm::terminal::{disable_raw_mode, enable_raw_mode}; 22 | use crossterm::{cursor, execute}; 23 | use owo_colors::OwoColorize; 24 | use strum::IntoEnumIterator; 25 | use strum_macros::EnumIter; 26 | use tracing::instrument; 27 | 28 | use crate::commands::farm::farm; 29 | use crate::commands::info::info; 30 | use crate::commands::init::init; 31 | use crate::commands::wipe::wipe_config; 32 | use crate::utils::{get_user_input, open_log_dir, support_message, yes_or_no_parser}; 33 | 34 | #[global_allocator] 35 | static GLOBAL: mimalloc::MiMalloc = mimalloc::MiMalloc; 36 | 37 | #[derive(Debug, Parser)] 38 | #[command(subcommand_required = false)] 39 | #[command(name = "pulsar")] 40 | #[command(about = "pulsar", long_about = None)] 41 | #[command(version)] 42 | struct Cli { 43 | #[command(subcommand)] 44 | command: Option, 45 | } 46 | 47 | /// Available commands for the CLI 48 | #[derive(Debug, Subcommand, EnumIter)] 49 | enum Commands { 50 | #[command(about = "initializes the config file required for the farming")] 51 | Init, 52 | #[command(about = "starting the farming process (along with node in the background)")] 53 | Farm { 54 | #[arg(short, long, action)] 55 | verbose: bool, 56 | #[arg(short, long, action)] 57 | enable_domains: bool, 58 | #[arg(long, action)] 59 | no_rotation: bool, 60 | }, 61 | #[command(about = "wipes the node and farm instance (along with your farms)")] 62 | Wipe { 63 | #[arg(long, action)] 64 | farmer: bool, 65 | #[arg(long, action)] 66 | node: bool, 67 | }, 68 | #[command(about = "displays info about the farmer instance (i.e. total amount of rewards, \ 69 | and status of initial plotting)")] 70 | Info, 71 | OpenLogs, 72 | } 73 | 74 | #[tokio::main] 75 | #[instrument] 76 | async fn main() -> Result<(), Report> { 77 | let args = Cli::parse(); 78 | match args.command { 79 | Some(Commands::Info) => { 80 | info().await.suggestion(support_message())?; 81 | } 82 | Some(Commands::Init) => { 83 | init().suggestion(support_message())?; 84 | } 85 | Some(Commands::Farm { verbose, enable_domains, no_rotation }) => { 86 | farm(verbose, enable_domains, no_rotation).await.suggestion(support_message())?; 87 | } 88 | Some(Commands::Wipe { farmer, node }) => { 89 | wipe_config(farmer, node).await.suggestion(support_message())?; 90 | } 91 | Some(Commands::OpenLogs) => { 92 | open_log_dir().suggestion(support_message())?; 93 | } 94 | None => arrow_key_mode().await.suggestion(support_message())?, 95 | } 96 | 97 | Ok(()) 98 | } 99 | 100 | #[instrument] 101 | async fn arrow_key_mode() -> Result<(), Report> { 102 | let mut stdout = io::stdout(); 103 | 104 | // Options to be displayed 105 | let options = Commands::iter().map(|command| command.to_string()).collect::>(); 106 | 107 | // Selected option index 108 | let mut selected = 0; 109 | 110 | // get the current location of the cursor 111 | let position = cursor::position()?.1; 112 | 113 | enable_raw_mode()?; 114 | 115 | // Print options to the terminal 116 | print_options(&mut stdout, &options, selected, position)?; 117 | 118 | // Process input events 119 | loop { 120 | if let Event::Key(event) = crossterm::event::read()? { 121 | match event.code { 122 | KeyCode::Up | KeyCode::Char('k') => { 123 | // Move selection up 124 | if selected > 0 { 125 | selected -= 1; 126 | print_options(&mut stdout, &options, selected, position)?; 127 | } 128 | } 129 | KeyCode::Down | KeyCode::Char('j') => { 130 | // Move selection down 131 | if selected < options.len() - 1 { 132 | selected += 1; 133 | print_options(&mut stdout, &options, selected, position)?; 134 | } 135 | } 136 | KeyCode::Enter => { 137 | break; 138 | } 139 | KeyCode::Char('c') 140 | if event.modifiers.contains(crossterm::event::KeyModifiers::CONTROL) => 141 | { 142 | return Ok(()); 143 | } 144 | _ => {} 145 | } 146 | } 147 | } 148 | 149 | disable_raw_mode()?; 150 | 151 | // Move the cursor two lines below the options 152 | execute!(stdout, cursor::MoveTo(0, position + options.len() as u16 + 6))?; 153 | 154 | match selected { 155 | 0 => { 156 | init().suggestion(support_message())?; 157 | } 158 | 1 => { 159 | let prompt = "Do you want to initialize farmer in verbose mode? [y/n]: "; 160 | let verbose = 161 | get_user_input(prompt, None, yes_or_no_parser).context("prompt failed")?; 162 | 163 | let prompt = "Do you want to run a domain node? [y/n]: "; 164 | let enable_domains = 165 | get_user_input(prompt, None, yes_or_no_parser).context("prompt failed")?; 166 | 167 | let prompt = "Do you want to disable rotation for logs? [y/n]: "; 168 | let no_rotation = 169 | get_user_input(prompt, None, yes_or_no_parser).context("prompt failed")?; 170 | 171 | farm(verbose, enable_domains, no_rotation).await.suggestion(support_message())?; 172 | } 173 | 2 => { 174 | wipe_config(false, false).await.suggestion(support_message())?; 175 | } 176 | 3 => { 177 | info().await.suggestion(support_message())?; 178 | } 179 | 4 => { 180 | open_log_dir().suggestion(support_message())?; 181 | } 182 | _ => { 183 | unreachable!("this number must stay in [0-4]") 184 | } 185 | } 186 | 187 | Ok(()) 188 | } 189 | 190 | // Helper function to print options to the terminal 191 | fn print_options( 192 | stdout: &mut io::Stdout, 193 | options: &[String], 194 | selected: usize, 195 | position: u16, 196 | ) -> io::Result<()> { 197 | execute!(stdout, cursor::MoveTo(1, position + 2), cursor::SavePosition)?; 198 | writeln!(stdout, "Please select an option below using arrow keys (or `j` and `k`):\n",)?; 199 | 200 | // Print options to the terminal 201 | for (i, option) in options.iter().enumerate() { 202 | if i == selected { 203 | let output = format!(" > {} ", option); 204 | writeln!(stdout, "{} {}", cursor::MoveTo(1, i as u16 + position + 4), output.green())?; 205 | } else { 206 | let output = format!(" {} ", option); 207 | writeln!(stdout, "{} {}", cursor::MoveTo(1, i as u16 + position + 4), output)?; 208 | } 209 | } 210 | writeln!(stdout, "\n\r")?; 211 | stdout.flush()?; 212 | 213 | execute!(stdout, cursor::RestorePosition)?; 214 | 215 | Ok(()) 216 | } 217 | 218 | impl std::fmt::Display for Commands { 219 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 220 | match *self { 221 | Commands::Farm { verbose: _, enable_domains: _, no_rotation: _ } => write!(f, "farm"), 222 | Commands::Wipe { farmer: _, node: _ } => write!(f, "wipe"), 223 | Commands::Info => write!(f, "info"), 224 | Commands::Init => write!(f, "init"), 225 | Commands::OpenLogs => write!(f, "open logs directory"), 226 | } 227 | } 228 | } 229 | -------------------------------------------------------------------------------- /.github/workflows/ci-tests.yml: -------------------------------------------------------------------------------- 1 | name: "ci tests" 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | paths-ignore: 8 | - "**.md" 9 | pull_request: 10 | paths-ignore: 11 | - "**.md" 12 | workflow_dispatch: 13 | inputs: 14 | test-macos-and-windows: 15 | description: "run macOS and Windows tests" 16 | required: true 17 | default: false 18 | type: boolean 19 | 20 | concurrency: 21 | group: push-${{ github.workflow }}-${{ github.ref }} 22 | cancel-in-progress: true 23 | 24 | env: 25 | # Not needed in CI, should make things a bit faster 26 | CARGO_INCREMENTAL: 0 27 | CARGO_TERM_COLOR: always 28 | MAX_TARGET_SIZE: 1024 # MB 29 | # TODO: AES flag is such that we have decent performance on ARMv8, remove once `aes` crate bumps MSRV to at least 30 | # 1.61: https://github.com/RustCrypto/block-ciphers/issues/373 31 | RUSTFLAGS: -C strip=symbols -C opt-level=s --cfg aes_armv8 32 | 33 | jobs: 34 | fmt: 35 | runs-on: ${{ fromJson(github.repository_owner == 'subspace' && '["self-hosted", "ubuntu-20.04-x86-64"]' || '"ubuntu-22.04"') }} 36 | steps: 37 | - name: git checkout 38 | uses: actions/checkout@93ea575cb5d8a053eaa0ac8fa3b40d7e05a33cc8 # v3.1.0 39 | 40 | - name: cargo fmt 41 | uses: actions-rs/cargo@ae10961054e4aa8b4aa7dffede299aaf087aa33b # @v1.0.1 42 | with: 43 | command: fmt 44 | args: --all -- --check 45 | 46 | clippy: 47 | strategy: 48 | fail-fast: false 49 | matrix: 50 | os: ${{ fromJson(github.repository_owner == 'subspace' && '[["self-hosted", "ubuntu-20.04-x86-64"], ["self-hosted", "macos-14-arm64"], ["self-hosted", "windows-server-2022-x86-64"]]' || '["ubuntu-20.04", "macos-14", "windows-2022"]') }} 51 | run-all: 52 | - ${{ inputs.test-macos-and-windows == true || github.ref == 'refs/heads/main' }} 53 | exclude: # exclude macos-14 and windows-2022 when the condition is false 54 | - run-all: false 55 | os: macos-14 56 | - run-all: false 57 | os: windows-2022 58 | 59 | runs-on: ${{ matrix.os }} 60 | steps: 61 | - name: git checkout 62 | uses: actions/checkout@93ea575cb5d8a053eaa0ac8fa3b40d7e05a33cc8 # v3.1.0 63 | 64 | # On macOS, we need a proper Clang version, not Apple's custom version without wasm32 support 65 | - name: Install LLVM and Clang 66 | uses: KyleMayes/install-llvm-action@8852e4d5c58653ed05135c0a5d949d9c2febcb00 # v1.6.1 67 | with: 68 | version: "15.0" 69 | 70 | - name: Install Protoc 71 | uses: arduino/setup-protoc@9b1ee5b22b0a3f1feb8c2ff99b32c89b3c3191e9 # v2.0.0 72 | with: 73 | repo-token: ${{ secrets.GITHUB_TOKEN }} 74 | 75 | # Needed for hwloc 76 | - name: Install automake (macOS) 77 | run: brew install automake 78 | if: runner.os == 'macOS' 79 | 80 | # Workaround to resolve link error with C:\msys64\mingw64\bin\libclang.dll 81 | - name: Remove msys64 82 | run: Remove-Item -LiteralPath "C:\msys64\" -Force -Recurse 83 | if: runner.os == 'Windows' 84 | continue-on-error: true 85 | 86 | - name: Configure cache 87 | uses: actions/cache@704facf57e6136b1bc63b828d79edcd491f0ee84 # @v3.3.2 88 | with: 89 | path: | 90 | ~/.cargo/registry 91 | ~/.cargo/bin 92 | ~/.cargo/git 93 | key: ${{ runner.os }}-${{ runner.arch }}-cargo-${{ hashFiles('**/Cargo.toml') }} 94 | restore-keys: | 95 | ${{ runner.os }}-${{ runner.arch }}-cargo- 96 | 97 | - name: Clean unused crate source checkouts and git repo checkouts 98 | run: cargo cache 99 | 100 | - name: cargo clippy (Linux & Windows) 101 | uses: actions-rs/clippy-check@b5b5f21f4797c02da247df37026fcd0a5024aa4d # v1.0.7 102 | with: 103 | token: ${{ secrets.GITHUB_TOKEN }} 104 | args: --locked --all-targets -- -D warnings 105 | if: runner.os != 'macOS' 106 | 107 | - name: cargo clippy (MacOS) 108 | uses: actions-rs/clippy-check@b5b5f21f4797c02da247df37026fcd0a5024aa4d # @v1.0.7 109 | with: 110 | token: ${{ secrets.GITHUB_TOKEN }} 111 | args: --locked --no-default-features -- -D warnings 112 | if: runner.os == 'macOS' 113 | 114 | - name: Clean unused artifacts 115 | run: cargo sweep --maxsize ${{ env.MAX_TARGET_SIZE }} 116 | 117 | test: 118 | strategy: 119 | matrix: 120 | os: ${{ fromJson(github.repository_owner == 'subspace' && '[["self-hosted", "ubuntu-20.04-x86-64"], ["self-hosted", "macos-14-arm64"], ["self-hosted", "windows-server-2022-x86-64"]]' || '["ubuntu-20.04", "macos-14", "windows-2022"]') }} 121 | run-all: 122 | - ${{ inputs.test-macos-and-windows || github.ref == 'refs/heads/master' }} 123 | exclude: # exclude macos-14 and window-2022 when the condition is false 124 | - run-all: false 125 | os: macos-14 126 | - run-all: false 127 | os: windows-2022 128 | 129 | runs-on: ${{ matrix.os }} 130 | 131 | steps: 132 | - name: git checkout 133 | uses: actions/checkout@93ea575cb5d8a053eaa0ac8fa3b40d7e05a33cc8 # v3.1.0 134 | 135 | # On macOS, we need a proper Clang version, not Apple's custom version without wasm32 support 136 | - name: Install LLVM and Clang 137 | uses: KyleMayes/install-llvm-action@8852e4d5c58653ed05135c0a5d949d9c2febcb00 # v1.6.1 138 | with: 139 | version: "15.0" 140 | if: runner.os == 'macOS' 141 | 142 | - name: Install Protoc 143 | uses: arduino/setup-protoc@9b1ee5b22b0a3f1feb8c2ff99b32c89b3c3191e9 # v2.0.0 144 | with: 145 | repo-token: ${{ secrets.GITHUB_TOKEN }} 146 | 147 | # Needed for hwloc 148 | - name: Install automake (macOS) 149 | run: brew install automake 150 | if: runner.os == 'macOS' 151 | 152 | # Workaround to resolve link error with C:\msys64\mingw64\bin\libclang.dll 153 | - name: Remove msys64 154 | run: Remove-Item -LiteralPath "C:\msys64\" -Force -Recurse 155 | if: runner.os == 'Windows' 156 | continue-on-error: true 157 | 158 | - name: Add cache 159 | uses: actions/cache@704facf57e6136b1bc63b828d79edcd491f0ee84 # @v3.3.2 160 | if: runner.os != 'Windows' 161 | with: 162 | path: | 163 | ~/.cargo/registry 164 | ~/.cargo/bin 165 | ~/.cargo/git 166 | key: ${{ runner.os }}-${{ runner.arch }}-cargo-${{ hashFiles('**/Cargo.toml') }} 167 | restore-keys: | 168 | ${{ runner.os }}-${{ runner.arch }}-cargo- 169 | 170 | - name: Install utils for caching 171 | if: runner.os != 'Windows' 172 | run: | 173 | test -x ~/.cargo/bin/cargo-sweep || cargo install cargo-sweep 174 | test -x ~/.cargo/bin/cargo-cache || cargo install cargo-cache --no-default-features --features ci-autoclean cargo-cache 175 | 176 | - name: Clean unused crate source checkouts and git repo checkouts 177 | if: runner.os != 'Windows' 178 | run: cargo cache 179 | 180 | - name: Build and run tests (linux & windows) 181 | run: cargo test --locked -- --test-threads 1 182 | if: runner.os != 'macOS' 183 | 184 | - name: Build and run tests (macOS) 185 | run: cargo test --locked --no-default-features -- --test-threads 1 186 | if: runner.os == 'macOS' 187 | 188 | - name: Clean unused artifacts 189 | if: runner.os != 'Windows' 190 | run: cargo sweep --maxsize ${{ env.MAX_TARGET_SIZE }} 191 | 192 | docs: 193 | runs-on: ubuntu-22.04 194 | steps: 195 | - name: git checkout 196 | uses: actions/checkout@93ea575cb5d8a053eaa0ac8fa3b40d7e05a33cc8 # v3.1.0 197 | 198 | - name: Install Protoc 199 | uses: arduino/setup-protoc@9b1ee5b22b0a3f1feb8c2ff99b32c89b3c3191e9 # v2.0.0 200 | with: 201 | repo-token: ${{ secrets.GITHUB_TOKEN }} 202 | 203 | - name: Configure cache 204 | uses: Swatinem/rust-cache@359a70e43a0bb8a13953b04a90f76428b4959bb6 # v2.2.0 205 | 206 | - name: Check Documentation 207 | run: cargo doc --locked --no-deps 208 | env: 209 | RUSTDOCFLAGS: "-D rustdoc::broken-intra-doc-links -D rustdoc::private_intra_doc_links" 210 | -------------------------------------------------------------------------------- /pulsar/src/summary.rs: -------------------------------------------------------------------------------- 1 | /// Stores the summary of the farming process into a file. 2 | /// This allows to retrieve farming information with `info` command, 3 | /// and also store the amount of potentially farmed blocks during the initial 4 | /// plotting progress, so that progress bar won't be affected with `println!`, 5 | /// and user will still know about them when initial plotting is finished. 6 | use std::fs::remove_file; 7 | use std::path::PathBuf; 8 | use std::sync::Arc; 9 | 10 | use color_eyre::eyre::{Context, Result}; 11 | use derive_more::{AddAssign, Display, From, FromStr}; 12 | use serde::{Deserialize, Serialize}; 13 | use subspace_sdk::node::BlockNumber; 14 | use subspace_sdk::ByteSize; 15 | use tokio::fs::{create_dir_all, File, OpenOptions}; 16 | use tokio::io::{AsyncReadExt, AsyncSeekExt, AsyncWriteExt}; 17 | use tokio::sync::{Mutex, MutexGuard}; 18 | use tracing::instrument; 19 | 20 | // TODO: delete this when https://github.com/toml-rs/toml/issues/540 is solved 21 | #[derive(Debug, Clone, Copy, Default, Display, AddAssign, FromStr, From)] 22 | pub(crate) struct Rewards(pub(crate) u128); 23 | 24 | /// struct for updating the fields of the summary 25 | #[derive(Default, Debug)] 26 | pub(crate) struct SummaryUpdateFields { 27 | pub(crate) is_plotting_finished: bool, 28 | pub(crate) new_authored_count: u64, 29 | pub(crate) new_parsed_blocks: BlockNumber, 30 | } 31 | 32 | /// Struct for holding the info of what to be displayed with the `info` command, 33 | /// and printing rewards to user in `farm` command 34 | #[derive(Deserialize, Serialize, Default, Debug, Clone, Copy)] 35 | pub(crate) struct Summary { 36 | pub(crate) initial_plotting_finished: bool, 37 | pub(crate) authored_count: u64, 38 | // Deprecated: Will be removed in next version 39 | #[serde(skip)] 40 | pub(crate) vote_count: u64, 41 | // Deprecated: Will be removed in next version 42 | #[serde(skip)] 43 | pub(crate) total_rewards: Rewards, 44 | pub(crate) user_space_pledged: ByteSize, 45 | pub(crate) last_processed_block_num: BlockNumber, 46 | } 47 | 48 | /// utilizing persistent storage for the information to be displayed for the 49 | /// `info` command 50 | #[derive(Debug, Clone)] 51 | pub(crate) struct SummaryFile { 52 | inner: Arc>, 53 | } 54 | 55 | impl SummaryFile { 56 | /// creates a new summary file Mutex 57 | /// 58 | /// if user_space_pledged is provided, it creates a new summary file 59 | /// else, it tries to open the existing summary file 60 | #[instrument] 61 | pub(crate) async fn new(user_space_pledged: Option) -> Result { 62 | let summary_path = summary_path(); 63 | let summary_dir = summary_dir(); 64 | 65 | let mut summary_file; 66 | // providing `Some` value for `user_space_pledged` means, we are creating a new 67 | // file, so, first check if the file exists to not erase its content 68 | if let Some(user_space_pledged) = user_space_pledged { 69 | // File::create will truncate the existing file, so first 70 | // check if the file exists, if not, `open` will return an error 71 | // in this case, create the file and necessary directories 72 | if File::open(&summary_path).await.is_err() { 73 | let _ = create_dir_all(&summary_dir).await; 74 | let _ = File::create(&summary_path).await; 75 | let initialization = Summary { 76 | initial_plotting_finished: false, 77 | authored_count: 0, 78 | vote_count: 0, 79 | total_rewards: Rewards(0), 80 | user_space_pledged, 81 | last_processed_block_num: 0, 82 | }; 83 | let summary_text = 84 | toml::to_string(&initialization).context("Failed to serialize Summary")?; 85 | summary_file = OpenOptions::new() 86 | .read(true) 87 | .write(true) 88 | .truncate(true) 89 | .open(&summary_path) 90 | .await 91 | .context("couldn't open new summary file")?; 92 | summary_file 93 | .write_all(summary_text.as_bytes()) 94 | .await 95 | .context("write to summary failed")?; 96 | summary_file.flush().await.context("flush at creation failed")?; 97 | summary_file 98 | .seek(std::io::SeekFrom::Start(0)) 99 | .await 100 | .context("couldn't seek to the beginning of the summary file")?; 101 | 102 | return Ok(SummaryFile { inner: Arc::new(Mutex::new(summary_file)) }); 103 | } 104 | } 105 | // for all the other cases, the SummaryFile should be there 106 | summary_file = OpenOptions::new() 107 | .read(true) 108 | .write(true) 109 | .open(&summary_path) 110 | .await 111 | .context("couldn't open existing summary file")?; 112 | Ok(SummaryFile { inner: Arc::new(Mutex::new(summary_file)) }) 113 | } 114 | 115 | /// Parses the summary file and returns [`Summary`] 116 | #[instrument] 117 | pub(crate) async fn parse(&self) -> Result { 118 | let (summary, _) = self.read_and_deserialize().await?; 119 | Ok(summary) 120 | } 121 | 122 | /// updates the summary file, and returns the content of the new summary 123 | /// 124 | /// this function will be called by the farmer when 125 | /// the status of the `plotting_finished` 126 | /// or value of `farmed_block_count` changes 127 | #[instrument] 128 | pub(crate) async fn update( 129 | &self, 130 | SummaryUpdateFields { 131 | is_plotting_finished, 132 | new_authored_count, 133 | new_parsed_blocks, 134 | }: SummaryUpdateFields, 135 | ) -> Result { 136 | let (mut summary, mut guard) = self.read_and_deserialize().await?; 137 | 138 | if is_plotting_finished { 139 | summary.initial_plotting_finished = true; 140 | } 141 | 142 | summary.authored_count += new_authored_count; 143 | 144 | summary.last_processed_block_num += new_parsed_blocks; 145 | 146 | let serialized_summary = 147 | toml::to_string(&summary).context("Failed to serialize Summary")?; 148 | 149 | guard.set_len(0).await.context("couldn't truncate the summary file")?; 150 | guard 151 | .write_all(serialized_summary.as_bytes()) 152 | .await 153 | .context("couldn't write to summary file")?; 154 | guard.flush().await.context("flushing failed for summary file")?; 155 | guard 156 | .seek(std::io::SeekFrom::Start(0)) 157 | .await 158 | .context("couldn't seek to the beginning of the summary file")?; 159 | 160 | Ok(summary) 161 | } 162 | 163 | /// Reads the file, serializes it into `Summary` and seeks to the beginning 164 | /// of the file 165 | #[instrument] 166 | async fn read_and_deserialize(&self) -> Result<(Summary, MutexGuard<'_, File>)> { 167 | let mut guard = self.inner.lock().await; 168 | let mut contents = String::new(); 169 | 170 | guard 171 | .read_to_string(&mut contents) 172 | .await 173 | .context("couldn't read the contents of the summary file")?; 174 | let summary: Summary = 175 | toml::from_str(&contents).context("couldn't serialize the summary content")?; 176 | 177 | guard 178 | .seek(std::io::SeekFrom::Start(0)) 179 | .await 180 | .context("couldn't seek to the beginning of the summary file")?; 181 | 182 | Ok((summary, guard)) 183 | } 184 | } 185 | 186 | /// deletes the summary file 187 | #[instrument] 188 | pub(crate) fn delete_summary() -> Result<()> { 189 | remove_file(summary_path()).context("couldn't delete summary file") 190 | } 191 | 192 | /// returns the path for the summary file 193 | #[instrument] 194 | pub(crate) fn summary_path() -> PathBuf { 195 | summary_dir().join("summary.toml") 196 | } 197 | 198 | #[instrument] 199 | fn summary_dir() -> PathBuf { 200 | dirs::cache_dir().expect("couldn't get the directory!").join("pulsar") 201 | } 202 | -------------------------------------------------------------------------------- /pulsar/src/config.rs: -------------------------------------------------------------------------------- 1 | use std::fs::{create_dir_all, remove_file, File}; 2 | use std::num::NonZeroU8; 3 | use std::path::PathBuf; 4 | 5 | use color_eyre::eyre::{eyre, Report, Result, WrapErr}; 6 | use derivative::Derivative; 7 | use serde::{Deserialize, Serialize}; 8 | use strum_macros::EnumIter; 9 | use subspace_sdk::farmer::Farmer; 10 | use subspace_sdk::node::{DomainConfigBuilder, DsnBuilder, NetworkBuilder, Node, Role}; 11 | use subspace_sdk::{chain_spec, ByteSize, FarmDescription, PublicKey}; 12 | use tracing::instrument; 13 | 14 | use crate::utils::IntoEyre; 15 | 16 | /// defaults for the user config file 17 | pub(crate) const DEFAULT_FARM_SIZE: ByteSize = ByteSize::gb(2); 18 | pub(crate) const MIN_FARM_SIZE: ByteSize = ByteSize::gb(2); 19 | 20 | /// structure of the config toml file 21 | #[derive(Deserialize, Serialize, Debug)] 22 | pub(crate) struct Config { 23 | pub(crate) chain: ChainConfig, 24 | pub(crate) farmer: FarmerConfig, 25 | pub(crate) node: NodeConfig, 26 | } 27 | 28 | /// Advanced Node Settings Wrapper for CLI 29 | #[derive(Deserialize, Serialize, Clone, Debug, Default, PartialEq)] 30 | pub(crate) struct AdvancedNodeSettings { 31 | #[serde(default, skip_serializing_if = "crate::utils::is_default")] 32 | pub(crate) enable_domains: bool, 33 | #[serde(default, flatten)] 34 | pub(crate) extra: toml::Table, 35 | } 36 | 37 | /// Node Options Wrapper for CLI 38 | #[derive(Deserialize, Serialize, Clone, Debug)] 39 | pub(crate) struct NodeConfig { 40 | pub(crate) directory: PathBuf, 41 | pub(crate) name: String, 42 | #[serde(default, skip_serializing_if = "crate::utils::is_default")] 43 | pub(crate) advanced: AdvancedNodeSettings, 44 | } 45 | 46 | impl NodeConfig { 47 | pub async fn build(self, chain: ChainConfig, is_verbose: bool) -> Result { 48 | let Self { directory, name, advanced: AdvancedNodeSettings { enable_domains, extra } } = 49 | self; 50 | 51 | let (mut node, chain_spec) = match chain { 52 | ChainConfig::Gemini3h => { 53 | let mut node = Node::gemini_3h() 54 | .network(NetworkBuilder::gemini_3h().name(name)) 55 | .dsn(DsnBuilder::gemini_3h()) 56 | .sync_from_dsn(true) 57 | .enable_subspace_block_relay(true); 58 | if enable_domains { 59 | node = node.domain(Some(DomainConfigBuilder::gemini_3h().configuration())); 60 | } 61 | let chain_spec = chain_spec::gemini_3h(); 62 | (node, chain_spec) 63 | } 64 | ChainConfig::Dev => { 65 | let mut node = Node::dev(); 66 | if enable_domains { 67 | node = node.domain(Some( 68 | DomainConfigBuilder::dev().role(Role::Authority).configuration(), 69 | )); 70 | } 71 | let chain_spec = chain_spec::dev_config(); 72 | (node, chain_spec) 73 | } 74 | ChainConfig::DevNet => { 75 | let mut node = Node::devnet() 76 | .network(NetworkBuilder::devnet().name(name)) 77 | .dsn(DsnBuilder::devnet()) 78 | .sync_from_dsn(true) 79 | .enable_subspace_block_relay(true); 80 | if enable_domains { 81 | node = node.domain(Some(DomainConfigBuilder::devnet().configuration())); 82 | } 83 | let chain_spec = chain_spec::devnet_config(); 84 | (node, chain_spec) 85 | } 86 | }; 87 | 88 | if is_verbose { 89 | node = node.informant_enable_color(true); 90 | } 91 | 92 | node = node 93 | .role(Role::Authority) 94 | .impl_version(format!("{}-{}", env!("CARGO_PKG_VERSION"), env!("GIT_HASH"))) 95 | .impl_name("pulsar".to_string()); 96 | 97 | crate::utils::apply_extra_options(&node.configuration(), extra) 98 | .context("Failed to deserialize node config")? 99 | .build(directory, chain_spec) 100 | .await 101 | .into_eyre() 102 | .wrap_err("Failed to build subspace node") 103 | } 104 | } 105 | 106 | /// Advanced Farmer Settings Wrapper for CLI 107 | #[derive(Deserialize, Serialize, Clone, Derivative, Debug, PartialEq)] 108 | #[derivative(Default)] 109 | pub(crate) struct AdvancedFarmerSettings { 110 | #[serde(default, skip_serializing_if = "crate::utils::is_default")] 111 | //TODO: change this back to 1GB when DSN is working properly 112 | #[derivative(Default(value = "subspace_sdk::ByteSize::gb(3)"))] 113 | pub(crate) cache_size: ByteSize, 114 | #[serde(default, flatten)] 115 | pub(crate) extra: toml::Table, 116 | } 117 | 118 | /// Farmer Options Wrapper for CLI 119 | #[derive(Deserialize, Serialize, Clone, Debug)] 120 | pub(crate) struct FarmerConfig { 121 | pub(crate) reward_address: PublicKey, 122 | pub(crate) farm_directory: PathBuf, 123 | pub(crate) farm_size: ByteSize, 124 | #[serde(default, skip_serializing_if = "crate::utils::is_default")] 125 | pub(crate) advanced: AdvancedFarmerSettings, 126 | } 127 | 128 | impl FarmerConfig { 129 | pub async fn build(self, node: &Node) -> Result { 130 | let farm_description = &[FarmDescription::new(self.farm_directory, self.farm_size)]; 131 | 132 | // currently we do not have different configuration for the farmer w.r.t 133 | // different chains, but we may in the future 134 | let farmer = Farmer::builder(); 135 | crate::utils::apply_extra_options(&farmer.configuration(), self.advanced.extra) 136 | .context("Failed to deserialize node config")? 137 | .build( 138 | self.reward_address, 139 | node, 140 | farm_description, 141 | // TODO: Make this configurable via user input 142 | NonZeroU8::new(1).expect("static value should not fail; qed"), 143 | ) 144 | .await 145 | .context("Failed to build a farmer") 146 | } 147 | } 148 | 149 | /// Enum for Chain 150 | #[derive(Deserialize, Serialize, Default, Clone, Debug, EnumIter)] 151 | pub(crate) enum ChainConfig { 152 | #[default] 153 | Gemini3h, 154 | Dev, 155 | DevNet, 156 | } 157 | 158 | impl std::str::FromStr for ChainConfig { 159 | type Err = Report; 160 | 161 | fn from_str(s: &str) -> Result { 162 | match s.to_lowercase().as_str() { 163 | "gemini3h" => Ok(ChainConfig::Gemini3h), 164 | "dev" => Ok(ChainConfig::Dev), 165 | "devnet" => Ok(ChainConfig::DevNet), 166 | _ => Err(eyre!("given chain: `{s}` is not recognized!")), 167 | } 168 | } 169 | } 170 | 171 | /// Creates a config file at the location 172 | /// - **Linux:** `$HOME/.config/pulsar/settings.toml`. 173 | /// - **macOS:** `$HOME/Library/Application Support/pulsar/settings.toml`. 174 | /// - **Windows:** `{FOLDERID_RoamingAppData}/pulsar/settings.toml`. 175 | pub(crate) fn create_config() -> Result<(File, PathBuf)> { 176 | let config_path = 177 | dirs::config_dir().expect("couldn't get the default config directory!").join("pulsar"); 178 | 179 | if let Err(err) = create_dir_all(&config_path) { 180 | let config_path = config_path.to_str().expect("couldn't get pulsar config path!"); 181 | return Err(err).wrap_err(format!("could not create the directory: `{config_path}`")); 182 | } 183 | 184 | let file = File::create(config_path.join("settings.toml"))?; 185 | 186 | Ok((file, config_path)) 187 | } 188 | 189 | /// parses the config, and returns [`Config`] 190 | #[instrument] 191 | pub(crate) fn parse_config() -> Result { 192 | let config_path = dirs::config_dir().expect("couldn't get the default config directory!"); 193 | let config_path = config_path.join("pulsar").join("settings.toml"); 194 | 195 | let config: Config = toml::from_str(&std::fs::read_to_string(config_path)?)?; 196 | Ok(config) 197 | } 198 | 199 | /// validates the config for farming 200 | #[instrument] 201 | pub(crate) fn validate_config() -> Result { 202 | let config = parse_config()?; 203 | 204 | // validity checks 205 | if config.farmer.farm_size < MIN_FARM_SIZE { 206 | return Err(eyre!("farm size should be bigger than {MIN_FARM_SIZE}!")); 207 | } 208 | 209 | Ok(config) 210 | } 211 | 212 | /// deletes the config file 213 | #[instrument] 214 | pub(crate) fn delete_config() -> Result<()> { 215 | let config_path = dirs::config_dir().expect("couldn't get the default config directory!"); 216 | remove_file(config_path).context("couldn't delete config file") 217 | } 218 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [workspace] 2 | resolver = "2" 3 | members = [ 4 | "pulsar", 5 | "sdk/*" 6 | ] 7 | 8 | # The list of dependencies below (which can be both direct and indirect dependencies) are crates 9 | # that are suspected to be CPU-intensive, and that are unlikely to require debugging (as some of 10 | # their debug info might be missing) or to require to be frequently recompiled. We compile these 11 | # dependencies with `opt-level=3` even in "dev" mode in order to make "dev" mode more usable. 12 | # The majority of these crates are cryptographic libraries. 13 | # 14 | # This list is ordered alphabetically. 15 | [profile.dev.package] 16 | bitvec = { opt-level = 3 } 17 | blake2 = { opt-level = 3 } 18 | blake3 = { opt-level = 3 } 19 | blake2b_simd = { opt-level = 3 } 20 | blst = { opt-level = 3 } 21 | rust-kzg-blst = { opt-level = 3 } 22 | chacha20 = { opt-level = 3 } 23 | chacha20poly1305 = { opt-level = 3 } 24 | cranelift-codegen = { opt-level = 3 } 25 | cranelift-wasm = { opt-level = 3 } 26 | crc32fast = { opt-level = 3 } 27 | crossbeam-deque = { opt-level = 3 } 28 | crypto-mac = { opt-level = 3 } 29 | curve25519-dalek = { opt-level = 3 } 30 | ed25519-zebra = { opt-level = 3 } 31 | flate2 = { opt-level = 3 } 32 | futures-channel = { opt-level = 3 } 33 | hashbrown = { opt-level = 3 } 34 | hash-db = { opt-level = 3 } 35 | hmac = { opt-level = 3 } 36 | httparse = { opt-level = 3 } 37 | integer-sqrt = { opt-level = 3 } 38 | k256 = { opt-level = 3 } 39 | keccak = { opt-level = 3 } 40 | kzg = { opt-level = 3 } 41 | libsecp256k1 = { opt-level = 3 } 42 | libz-sys = { opt-level = 3 } 43 | mio = { opt-level = 3 } 44 | nalgebra = { opt-level = 3 } 45 | num-bigint = { opt-level = 3 } 46 | parking_lot = { opt-level = 3 } 47 | parking_lot_core = { opt-level = 3 } 48 | percent-encoding = { opt-level = 3 } 49 | primitive-types = { opt-level = 3 } 50 | ring = { opt-level = 3 } 51 | rustls = { opt-level = 3 } 52 | secp256k1 = { opt-level = 3 } 53 | sha2 = { opt-level = 3 } 54 | sha3 = { opt-level = 3 } 55 | smallvec = { opt-level = 3 } 56 | snow = { opt-level = 3 } 57 | subspace-archiving = { opt-level = 3 } 58 | subspace-chiapos = { opt-level = 3 } 59 | subspace-core-primitives = { opt-level = 3 } 60 | subspace-erasure-coding = { opt-level = 3 } 61 | subspace-farmer-components = { opt-level = 3 } 62 | subspace-proof-of-space = { opt-level = 3 } 63 | subspace-proof-of-time = { opt-level = 3 } 64 | twox-hash = { opt-level = 3 } 65 | uint = { opt-level = 3 } 66 | x25519-dalek = { opt-level = 3 } 67 | yamux = { opt-level = 3 } 68 | zeroize = { opt-level = 3 } 69 | 70 | [profile.release] 71 | # Substrate runtime requires unwinding. 72 | panic = "unwind" 73 | 74 | [profile.production] 75 | inherits = "release" 76 | lto = "fat" 77 | codegen-units = 1 78 | 79 | # TODO: get rid of this when we have bigger RAM for aarch64 linux 80 | [profile.aarch64linux] 81 | inherits = "release" 82 | codegen-units = 1 83 | 84 | [patch.crates-io] 85 | # TODO: Switch to release once v1.0.0-alpha.2 or newer is out 86 | hwlocality = { git = "https://github.com/HadrienG2/hwlocality", rev = "0f248573bcad584960fe20293c826203a265b833" } 87 | # TODO: Switch to stable release once https://github.com/paritytech/substrate-bip39/pull/20 is published 88 | substrate-bip39 = { git = "https://github.com/paritytech/substrate-bip39", rev = "03f02a7225d9bc5add92b7657790ee1ac8ab90a4" } 89 | # TODO: remove once tracing-appender has a new release 90 | tracing = { git = "https://github.com/tokio-rs/tracing", branch = "v0.1.x" } 91 | tracing-appender = { git = "https://github.com/tokio-rs/tracing", branch = "v0.1.x" } 92 | tracing-core = { git = "https://github.com/tokio-rs/tracing", branch = "v0.1.x" } 93 | tracing-error = { git = "https://github.com/tokio-rs/tracing", branch = "v0.1.x" } 94 | tracing-subscriber = { git = "https://github.com/tokio-rs/tracing", branch = "v0.1.x" } 95 | 96 | # Reason: We need to patch substrate dependency of snowfork and frontier libraries to our fork 97 | # TODO: Remove when we are using upstream substrate instead of fork 98 | [patch."https://github.com/paritytech/polkadot-sdk.git"] 99 | frame-benchmarking = { version = "4.0.0-dev", git = "https://github.com/subspace/polkadot-sdk", rev = "d6b500960579d73c43fc4ef550b703acfa61c4c8" } 100 | frame-support = { version = "4.0.0-dev", git = "https://github.com/subspace/polkadot-sdk", rev = "d6b500960579d73c43fc4ef550b703acfa61c4c8" } 101 | frame-system = { version = "4.0.0-dev", git = "https://github.com/subspace/polkadot-sdk", rev = "d6b500960579d73c43fc4ef550b703acfa61c4c8" } 102 | sc-block-builder = { version = "0.10.0-dev", git = "https://github.com/subspace/polkadot-sdk", rev = "d6b500960579d73c43fc4ef550b703acfa61c4c8" } 103 | sc-client-db = { version = "0.10.0-dev", git = "https://github.com/subspace/polkadot-sdk", rev = "d6b500960579d73c43fc4ef550b703acfa61c4c8" } 104 | sc-consensus = { version = "0.10.0-dev", git = "https://github.com/subspace/polkadot-sdk", rev = "d6b500960579d73c43fc4ef550b703acfa61c4c8" } 105 | sc-consensus-aura = { version = "0.10.0-dev", git = "https://github.com/subspace/polkadot-sdk", rev = "d6b500960579d73c43fc4ef550b703acfa61c4c8" } 106 | sc-consensus-slots = { version = "0.10.0-dev", git = "https://github.com/subspace/polkadot-sdk", rev = "d6b500960579d73c43fc4ef550b703acfa61c4c8" } 107 | sc-client-api = { version = "4.0.0-dev", git = "https://github.com/subspace/polkadot-sdk", rev = "d6b500960579d73c43fc4ef550b703acfa61c4c8" } 108 | sc-network = { version = "0.10.0-dev", git = "https://github.com/subspace/polkadot-sdk", rev = "d6b500960579d73c43fc4ef550b703acfa61c4c8" } 109 | sc-network-common = { version = "0.10.0-dev", git = "https://github.com/subspace/polkadot-sdk", rev = "d6b500960579d73c43fc4ef550b703acfa61c4c8" } 110 | sc-network-sync = { version = "0.10.0-dev", git = "https://github.com/subspace/polkadot-sdk", rev = "d6b500960579d73c43fc4ef550b703acfa61c4c8" } 111 | sc-rpc = { version = "4.0.0-dev", git = "https://github.com/subspace/polkadot-sdk", rev = "d6b500960579d73c43fc4ef550b703acfa61c4c8" } 112 | sc-service = { version = "0.10.0-dev", git = "https://github.com/subspace/polkadot-sdk", rev = "d6b500960579d73c43fc4ef550b703acfa61c4c8" } 113 | sc-telemetry = { version = "4.0.0-dev", git = "https://github.com/subspace/polkadot-sdk", rev = "d6b500960579d73c43fc4ef550b703acfa61c4c8" } 114 | sc-transaction-pool = { version = "4.0.0-dev", git = "https://github.com/subspace/polkadot-sdk", rev = "d6b500960579d73c43fc4ef550b703acfa61c4c8" } 115 | sc-transaction-pool-api = { version = "4.0.0-dev", git = "https://github.com/subspace/polkadot-sdk", rev = "d6b500960579d73c43fc4ef550b703acfa61c4c8" } 116 | sc-utils = { version = "4.0.0-dev", git = "https://github.com/subspace/polkadot-sdk", rev = "d6b500960579d73c43fc4ef550b703acfa61c4c8" } 117 | sp-api = { version = "4.0.0-dev", git = "https://github.com/subspace/polkadot-sdk", rev = "d6b500960579d73c43fc4ef550b703acfa61c4c8" } 118 | sp-application-crypto = { version = "23.0.0", git = "https://github.com/subspace/polkadot-sdk", rev = "d6b500960579d73c43fc4ef550b703acfa61c4c8" } 119 | sp-block-builder = { version = "4.0.0-dev", git = "https://github.com/subspace/polkadot-sdk", rev = "d6b500960579d73c43fc4ef550b703acfa61c4c8" } 120 | sp-blockchain = { version = "4.0.0-dev", git = "https://github.com/subspace/polkadot-sdk", rev = "d6b500960579d73c43fc4ef550b703acfa61c4c8" } 121 | sp-consensus = { version = "0.10.0-dev", git = "https://github.com/subspace/polkadot-sdk", rev = "d6b500960579d73c43fc4ef550b703acfa61c4c8" } 122 | sp-consensus-aura = { version = "0.10.0-dev", git = "https://github.com/subspace/polkadot-sdk", rev = "d6b500960579d73c43fc4ef550b703acfa61c4c8" } 123 | sp-consensus-slots = { version = "0.10.0-dev", git = "https://github.com/subspace/polkadot-sdk", rev = "d6b500960579d73c43fc4ef550b703acfa61c4c8" } 124 | sp-core = { version = "21.0.0", git = "https://github.com/subspace/polkadot-sdk", rev = "d6b500960579d73c43fc4ef550b703acfa61c4c8" } 125 | sp-database = { version = "4.0.0-dev", git = "https://github.com/subspace/polkadot-sdk", rev = "d6b500960579d73c43fc4ef550b703acfa61c4c8" } 126 | sp-externalities = { version = "0.19.0", git = "https://github.com/subspace/polkadot-sdk", rev = "d6b500960579d73c43fc4ef550b703acfa61c4c8" } 127 | sp-keystore = { version = "0.27.0", git = "https://github.com/subspace/polkadot-sdk", rev = "d6b500960579d73c43fc4ef550b703acfa61c4c8" } 128 | sp-runtime-interface = { version = "17.0.0", git = "https://github.com/subspace/polkadot-sdk", rev = "d6b500960579d73c43fc4ef550b703acfa61c4c8" } 129 | sp-state-machine = { version = "0.28.0", git = "https://github.com/subspace/polkadot-sdk", rev = "d6b500960579d73c43fc4ef550b703acfa61c4c8" } 130 | sp-std = { version = "8.0.0", git = "https://github.com/subspace/polkadot-sdk", rev = "d6b500960579d73c43fc4ef550b703acfa61c4c8" } 131 | sp-storage = { version = "13.0.0", git = "https://github.com/subspace/polkadot-sdk", rev = "d6b500960579d73c43fc4ef550b703acfa61c4c8" } 132 | sp-timestamp = { version = "4.0.0-dev", git = "https://github.com/subspace/polkadot-sdk", rev = "d6b500960579d73c43fc4ef550b703acfa61c4c8" } 133 | sp-trie = { version = "22.0.0", git = "https://github.com/subspace/polkadot-sdk", rev = "d6b500960579d73c43fc4ef550b703acfa61c4c8" } 134 | sp-inherents = { version = "4.0.0-dev", git = "https://github.com/subspace/polkadot-sdk", rev = "d6b500960579d73c43fc4ef550b703acfa61c4c8" } 135 | sp-io = { version = "23.0.0", git = "https://github.com/subspace/polkadot-sdk", rev = "d6b500960579d73c43fc4ef550b703acfa61c4c8" } 136 | sp-runtime = { version = "24.0.0", git = "https://github.com/subspace/polkadot-sdk", rev = "d6b500960579d73c43fc4ef550b703acfa61c4c8" } 137 | substrate-prometheus-endpoint = { version = "0.10.0-dev", git = "https://github.com/subspace/polkadot-sdk", rev = "d6b500960579d73c43fc4ef550b703acfa61c4c8" } 138 | -------------------------------------------------------------------------------- /.github/workflows/release.yml: -------------------------------------------------------------------------------- 1 | # This action enables building the executables for cli, can be triggered manually or by release creation. 2 | name: release 3 | 4 | on: 5 | workflow_dispatch: 6 | push: 7 | tags: 8 | - "*" 9 | 10 | # Incremental compilation here isn't helpful 11 | env: 12 | CARGO_INCREMENTAL: 0 13 | 14 | # TODO: get rid of `production_target` variable when we have bigger RAM for aarch64 linux 15 | jobs: 16 | executables: 17 | strategy: 18 | matrix: 19 | build: 20 | - os: ${{ fromJson(github.repository_owner == 'subspace' && '["self-hosted", "ubuntu-20.04-x86-64"]' || '"ubuntu-20.04"') }} 21 | target: x86_64-unknown-linux-gnu 22 | production_target: target/x86_64-unknown-linux-gnu/production 23 | suffix: ubuntu-x86_64-skylake-${{ github.ref_name }} 24 | rustflags: "-C target-cpu=skylake" 25 | - os: ${{ fromJson(github.repository_owner == 'subspace' && '["self-hosted", "ubuntu-20.04-x86-64"]' || '"ubuntu-20.04"') }} 26 | target: x86_64-unknown-linux-gnu 27 | production_target: target/x86_64-unknown-linux-gnu/production 28 | suffix: ubuntu-x86_64-v2-${{ github.ref_name }} 29 | rustflags: "-C target-cpu=x86-64-v2 -C target-feature=+aes" 30 | - os: ${{ fromJson(github.repository_owner == 'subspace' && '["self-hosted", "ubuntu-20.04-x86-64"]' || '"ubuntu-20.04"') }} 31 | target: aarch64-unknown-linux-gnu 32 | production_target: target/aarch64-unknown-linux-gnu/aarch64linux 33 | suffix: ubuntu-aarch64-${{ github.ref_name }} 34 | # TODO: AES flag is such that we have decent performance on ARMv8, remove once `aes` crate bumps MSRV to 35 | # at least 1.61: https://github.com/RustCrypto/block-ciphers/issues/373 36 | rustflags: "-C linker=aarch64-linux-gnu-gcc --cfg aes_armv8" 37 | - os: ${{ fromJson(github.repository_owner == 'subspace' && '["self-hosted", "macos-14-arm64"]' || '"macos-14"') }} 38 | target: aarch64-apple-darwin 39 | production_target: target/aarch64-apple-darwin/production 40 | suffix: macos-aarch64-${{ github.ref_name }} 41 | # TODO: AES flag is such that we have decent performance on ARMv8, remove once `aes` crate bumps MSRV to 42 | # at least 1.61: https://github.com/RustCrypto/block-ciphers/issues/373 43 | rustflags: "--cfg aes_armv8" 44 | - os: ${{ fromJson(github.repository_owner == 'subspace' && '["self-hosted", "macos-14-arm64"]' || '"macos-14"') }} 45 | target: x86_64-apple-darwin 46 | production_target: target/x86_64-apple-darwin/production 47 | suffix: macos-x86_64-${{ github.ref_name }} 48 | rustflags: "" 49 | - os: ${{ fromJson(github.repository_owner == 'subspace' && '["self-hosted", "windows-server-2022-x86-64"]' || '"windows-2022"') }} 50 | target: x86_64-pc-windows-msvc 51 | production_target: target/x86_64-pc-windows-msvc/production 52 | suffix: windows-x86_64-skylake-${{ github.ref_name }} 53 | rustflags: "-C target-cpu=skylake" 54 | - os: ${{ fromJson(github.repository_owner == 'subspace' && '["self-hosted", "windows-server-2022-x86-64"]' || '"windows-2022"') }} 55 | target: x86_64-pc-windows-msvc 56 | production_target: target/x86_64-pc-windows-msvc/production 57 | suffix: windows-x86_64-v2-${{ github.ref_name }} 58 | rustflags: "-C target-cpu=x86-64-v2 -C target-feature=+aes" 59 | runs-on: ${{ matrix.build.os }} 60 | 61 | env: 62 | RUSTFLAGS: ${{ matrix.build.rustflags }} 63 | # TODO: use the commented out one when this issue is resolved: https://github.com/tokio-rs/console/issues/299 64 | # RUSTFLAGS: ${{ matrix.build.rustflags }} --cfg tokio_unstable 65 | 66 | steps: 67 | - name: Checkout 68 | uses: actions/checkout@93ea575cb5d8a053eaa0ac8fa3b40d7e05a33cc8 # @v3.1.0 69 | 70 | # On macOS, we need a proper Clang version, not Apple's custom version without wasm32 support 71 | - name: Install LLVM and Clang 72 | uses: KyleMayes/install-llvm-action@8852e4d5c58653ed05135c0a5d949d9c2febcb00 # v1.6.1 73 | with: 74 | version: "15.0" 75 | if: runner.os == 'macOS' 76 | 77 | - name: Install Protoc 78 | uses: arduino/setup-protoc@9b1ee5b22b0a3f1feb8c2ff99b32c89b3c3191e9 # v2.0.0 79 | with: 80 | repo-token: ${{ secrets.GITHUB_TOKEN }} 81 | 82 | # Needed for hwloc 83 | - name: Install automake (macOS) 84 | run: brew install automake 85 | if: runner.os == 'macOS' 86 | 87 | # Workaround to resolve link error with C:\msys64\mingw64\bin\libclang.dll 88 | - name: Remove msys64 89 | run: Remove-Item -LiteralPath "C:\msys64\" -Force -Recurse 90 | if: runner.os == 'Windows' 91 | continue-on-error: true 92 | 93 | - name: Linux AArch64 cross-compile packages 94 | run: | 95 | FLAVOR="$(lsb_release -sc)" 96 | 97 | sudo tee /etc/apt/sources.list.d/arm64.list <> $GITHUB_ENV 121 | if: matrix.build.target == 'aarch64-unknown-linux-gnu' 122 | 123 | - name: Build the executable (other than aarch64 linux) 124 | uses: actions-rs/cargo@ae10961054e4aa8b4aa7dffede299aaf087aa33b # @v1.0.1 125 | if: matrix.build.target != 'aarch64-unknown-linux-gnu' && runner.os != 'macOS' 126 | with: 127 | command: build 128 | args: --locked -Z build-std --target ${{ matrix.build.target }} --profile production --bin pulsar 129 | 130 | # TODO: get rid of this when we have bigger RAM for aarch64 linux 131 | - name: Build the executable for aarch64 linux separately 132 | uses: actions-rs/cargo@ae10961054e4aa8b4aa7dffede299aaf087aa33b # @v1.0.1 133 | if: matrix.build.target == 'aarch64-unknown-linux-gnu' && runner.os == 'Linux' 134 | with: 135 | command: build 136 | args: --locked -Z build-std --target ${{ matrix.build.target }} --profile aarch64linux --bin pulsar 137 | 138 | # We build macOS without `numa` feature, primarily because of https://github.com/HadrienG2/hwlocality/issues/31 139 | - name: Build the executable (macOS) 140 | uses: actions-rs/cargo@ae10961054e4aa8b4aa7dffede299aaf087aa33b # @v1.0.1 141 | with: 142 | command: build 143 | args: --locked -Z build-std --target ${{ matrix.build.target }} --profile production --no-default-features --bin pulsar 144 | if: runner.os == 'macOS' 145 | 146 | - name: Sign and Notarize Application (macOS) 147 | run: | 148 | echo "Importing certificate" 149 | echo "${{ secrets.MACOS_CERTIFICATE }}" | base64 --decode > certificate.p12 150 | security create-keychain -p "${{ secrets.MACOS_CERTIFICATE_PASSWORD }}" build.keychain 151 | security default-keychain -s build.keychain 152 | security unlock-keychain -p "${{ secrets.MACOS_CERTIFICATE_PASSWORD }}" build.keychain 153 | security import certificate.p12 -k build.keychain -P "${{ secrets.MACOS_CERTIFICATE_PASSWORD }}" -T /usr/bin/codesign 154 | security set-key-partition-list -S apple-tool:,apple:,codesign: -s -k "${{ secrets.MACOS_CERTIFICATE_PASSWORD }}" build.keychain 155 | echo "Signing farmer" 156 | codesign --force --options=runtime --entitlements .github/workflows/Entitlements.plist -s "${{ secrets.MACOS_IDENTITY }}" --timestamp ${{ matrix.build.production_target }}/pulsar 157 | echo "Creating an archive" 158 | mkdir ${{ matrix.build.production_target }}/macos-binaries 159 | cp ${{ matrix.build.production_target }}/pulsar ${{ matrix.build.production_target }}/macos-binaries 160 | ditto -c -k --rsrc ${{ matrix.build.production_target }}/macos-binaries subspace-binaries.zip 161 | echo "Notarizing" 162 | brew update 163 | brew install mitchellh/gon/gon 164 | cat << EOF > gon.hcl 165 | source = ["subspace-binaries.zip"] 166 | bundle_id = "${{ secrets.MACOS_BUNDLE_ID }}" 167 | sign { 168 | application_identity = "${{ secrets.MACOS_IDENTITY }}" 169 | } 170 | apple_id { 171 | username = "${{ secrets.MACOS_APPLE_ID }}" 172 | password = "${{ secrets.MACOS_APP_PASSWORD }}" 173 | } 174 | EOF 175 | gon -log-level=info -log-json gon.hcl 176 | 177 | # Notarize the ZIP using notarytool 178 | xcrun notarytool submit subspace-binaries.zip --apple-id "${{ secrets.MACOS_APPLE_ID }}" --password "${{ secrets.MACOS_APP_PASSWORD }}" --team-id "${{ secrets.MACOS_TEAM_ID }}" --wait 179 | 180 | # // todo stapling for macOS artifacts 181 | # Staple the zip package 182 | # xcrun stapler staple subspace-binaries.zip 183 | 184 | echo "Done!" 185 | # Allow code signing to fail on non-release builds and in non-subspace repos (forks) 186 | continue-on-error: ${{ github.repository_owner != 'subspace' || github.event_name != 'push' || github.ref_type != 'tag' }} 187 | if: runner.os == 'macOS' 188 | 189 | - name: Sign Application (Windows) 190 | run: | 191 | AzureSignTool sign --azure-key-vault-url "${{ secrets.AZURE_KEY_VAULT_URI }}" --azure-key-vault-client-id "${{ secrets.AZURE_CLIENT_ID }}" --azure-key-vault-client-secret "${{ secrets.AZURE_CLIENT_SECRET }}" --azure-key-vault-tenant-id "${{ secrets.AZURE_TENANT_ID }}" --azure-key-vault-certificate "${{ secrets.AZURE_CERT_NAME }}" --file-digest sha512 --timestamp-rfc3161 http://timestamp.digicert.com -v "${{ matrix.build.production_target }}/pulsar.exe" 192 | continue-on-error: ${{ github.repository_owner != 'subspace' || github.event_name != 'push' || github.ref_type != 'tag' }} 193 | if: runner.os == 'Windows' 194 | 195 | - name: Prepare executables for uploading (Ubuntu) 196 | run: | 197 | mkdir executables 198 | mv ${{ matrix.build.production_target }}/pulsar executables/pulsar-${{ matrix.build.suffix }} 199 | if: runner.os == 'Linux' 200 | 201 | - name: Prepare executables for uploading (macOS) 202 | run: | 203 | mkdir executables 204 | mv ${{ matrix.build.production_target }}/pulsar executables/pulsar-${{ matrix.build.suffix }} 205 | # Zip it so that signature is not lost 206 | ditto -c -k --rsrc executables/pulsar-${{ matrix.build.suffix }} executables/pulsar-${{ matrix.build.suffix }}.zip 207 | rm executables/pulsar-${{ matrix.build.suffix }} 208 | if: runner.os == 'macOS' 209 | 210 | - name: Prepare executables for uploading (Windows) 211 | run: | 212 | mkdir executables 213 | move ${{ matrix.build.production_target }}/pulsar.exe executables/pulsar-${{ matrix.build.suffix }}.exe 214 | if: runner.os == 'Windows' 215 | 216 | - name: Upload executable to artifacts 217 | uses: actions/upload-artifact@83fd05a356d7e2593de66fc9913b3002723633cb # @v3.1.1 218 | with: 219 | name: executables-${{ matrix.build.suffix }} 220 | path: | 221 | executables/* 222 | if-no-files-found: error 223 | 224 | - name: Upload executable to assets 225 | uses: alexellis/upload-assets@259de5111cb56966d046ced998941e93f91d2c93 # @0.4.0 226 | env: 227 | GITHUB_TOKEN: ${{ github.token }} 228 | with: 229 | asset_paths: '["executables/*"]' 230 | if: github.event_name == 'push' && github.ref_type == 'tag' 231 | -------------------------------------------------------------------------------- /sdk/dsn/src/builder.rs: -------------------------------------------------------------------------------- 1 | use std::collections::HashSet; 2 | use std::path::PathBuf; 3 | use std::sync::{Arc, Weak}; 4 | 5 | use anyhow::Context; 6 | use derivative::Derivative; 7 | use derive_builder::Builder; 8 | use derive_more::{Deref, DerefMut, Display, From}; 9 | use futures::prelude::*; 10 | use prometheus_client::registry::Registry; 11 | use sc_consensus_subspace::archiver::SegmentHeadersStore; 12 | use sdk_utils::{self, DestructorSet, Multiaddr, MultiaddrWithPeerId}; 13 | use serde::{Deserialize, Serialize}; 14 | use subspace_farmer::piece_cache::PieceCache as FarmerPieceCache; 15 | use subspace_farmer::utils::readers_and_pieces::ReadersAndPieces; 16 | use subspace_farmer::KNOWN_PEERS_CACHE_SIZE; 17 | use subspace_networking::utils::strip_peer_id; 18 | use subspace_networking::{ 19 | KademliaMode, KnownPeersManager, KnownPeersManagerConfig, PieceByIndexRequest, 20 | PieceByIndexRequestHandler, PieceByIndexResponse, SegmentHeaderBySegmentIndexesRequestHandler, 21 | SegmentHeaderRequest, SegmentHeaderResponse, 22 | }; 23 | 24 | use super::local_provider_record_utils::MaybeLocalRecordProvider; 25 | use super::LocalRecordProvider; 26 | 27 | /// Wrapper with default value for listen address 28 | #[derive( 29 | Debug, Clone, Derivative, Deserialize, Serialize, PartialEq, Eq, From, Deref, DerefMut, 30 | )] 31 | #[derivative(Default)] 32 | #[serde(transparent)] 33 | pub struct ListenAddresses( 34 | #[derivative(Default( 35 | // TODO: get rid of it, once it won't be required by monorepo 36 | value = "vec![\"/ip4/127.0.0.1/tcp/0\".parse().expect(\"Always valid\")]" 37 | ))] 38 | pub Vec, 39 | ); 40 | 41 | /// Wrapper with default value for number of incoming connections 42 | #[derive( 43 | Debug, Clone, Derivative, Deserialize, Serialize, PartialEq, Eq, From, Deref, DerefMut, Display, 44 | )] 45 | #[derivative(Default)] 46 | #[serde(transparent)] 47 | pub struct InConnections(#[derivative(Default(value = "300"))] pub u32); 48 | 49 | /// Wrapper with default value for number of outgoing connections 50 | #[derive( 51 | Debug, Clone, Derivative, Deserialize, Serialize, PartialEq, Eq, From, Deref, DerefMut, Display, 52 | )] 53 | #[derivative(Default)] 54 | #[serde(transparent)] 55 | pub struct OutConnections(#[derivative(Default(value = "150"))] pub u32); 56 | 57 | /// Wrapper with default value for number of target connections 58 | #[derive( 59 | Debug, Clone, Derivative, Deserialize, Serialize, PartialEq, Eq, From, Deref, DerefMut, Display, 60 | )] 61 | #[derivative(Default)] 62 | #[serde(transparent)] 63 | pub struct TargetConnections(#[derivative(Default(value = "15"))] pub u32); 64 | 65 | /// Wrapper with default value for number of pending incoming connections 66 | #[derive( 67 | Debug, Clone, Derivative, Deserialize, Serialize, PartialEq, Eq, From, Deref, DerefMut, Display, 68 | )] 69 | #[derivative(Default)] 70 | #[serde(transparent)] 71 | pub struct PendingInConnections(#[derivative(Default(value = "100"))] pub u32); 72 | 73 | /// Wrapper with default value for number of pending outgoing connections 74 | #[derive( 75 | Debug, Clone, Derivative, Deserialize, Serialize, PartialEq, Eq, From, Deref, DerefMut, Display, 76 | )] 77 | #[derivative(Default)] 78 | #[serde(transparent)] 79 | pub struct PendingOutConnections(#[derivative(Default(value = "150"))] pub u32); 80 | 81 | /// Node DSN builder 82 | #[derive(Debug, Clone, Derivative, Builder, Deserialize, Serialize, PartialEq)] 83 | #[derivative(Default)] 84 | #[builder(pattern = "immutable", build_fn(private, name = "_build"), name = "DsnBuilder")] 85 | #[non_exhaustive] 86 | pub struct Dsn { 87 | /// Listen on some address for other nodes 88 | #[builder(default, setter(into))] 89 | #[serde(default, skip_serializing_if = "sdk_utils::is_default")] 90 | pub listen_addresses: ListenAddresses, 91 | /// Boot nodes 92 | #[builder(default)] 93 | #[serde(default, skip_serializing_if = "Vec::is_empty")] 94 | pub boot_nodes: Vec, 95 | /// Known external addresses 96 | #[builder(setter(into), default)] 97 | #[serde(default, skip_serializing_if = "sdk_utils::is_default")] 98 | pub external_addresses: Vec, 99 | /// Reserved nodes 100 | #[builder(default)] 101 | #[serde(default, skip_serializing_if = "sdk_utils::is_default")] 102 | pub reserved_nodes: Vec, 103 | /// Determines whether we allow keeping non-global (private, shared, 104 | /// loopback..) addresses in Kademlia DHT. 105 | #[builder(default)] 106 | #[serde(default, skip_serializing_if = "sdk_utils::is_default")] 107 | pub allow_non_global_addresses_in_dht: bool, 108 | /// Defines max established incoming swarm connection limit. 109 | #[builder(setter(into), default)] 110 | #[serde(default, skip_serializing_if = "sdk_utils::is_default")] 111 | pub in_connections: InConnections, 112 | /// Defines max established outgoing swarm connection limit. 113 | #[builder(setter(into), default)] 114 | #[serde(default, skip_serializing_if = "sdk_utils::is_default")] 115 | pub out_connections: OutConnections, 116 | /// Pending incoming swarm connection limit. 117 | #[builder(setter(into), default)] 118 | #[serde(default, skip_serializing_if = "sdk_utils::is_default")] 119 | pub pending_in_connections: PendingInConnections, 120 | /// Pending outgoing swarm connection limit. 121 | #[builder(setter(into), default)] 122 | #[serde(default, skip_serializing_if = "sdk_utils::is_default")] 123 | pub pending_out_connections: PendingOutConnections, 124 | } 125 | 126 | sdk_utils::generate_builder!(Dsn); 127 | 128 | impl DsnBuilder { 129 | /// Dev chain configuration 130 | pub fn dev() -> Self { 131 | Self::new().allow_non_global_addresses_in_dht(true) 132 | } 133 | 134 | /// Gemini 3g configuration 135 | pub fn gemini_3h() -> Self { 136 | Self::new().listen_addresses(vec![ 137 | "/ip6/::/tcp/30433".parse().expect("hardcoded value is true"), 138 | "/ip4/0.0.0.0/tcp/30433".parse().expect("hardcoded value is true"), 139 | ]) 140 | } 141 | 142 | /// Gemini 3g configuration 143 | pub fn devnet() -> Self { 144 | Self::new().listen_addresses(vec![ 145 | "/ip6/::/tcp/30433".parse().expect("hardcoded value is true"), 146 | "/ip4/0.0.0.0/tcp/30433".parse().expect("hardcoded value is true"), 147 | ]) 148 | } 149 | } 150 | 151 | /// Options for DSN 152 | pub struct DsnOptions { 153 | /// Client to aux storage for node piece cache 154 | pub client: Arc, 155 | /// Path for dsn 156 | pub base_path: PathBuf, 157 | /// Keypair for networking 158 | pub keypair: subspace_networking::libp2p::identity::Keypair, 159 | /// Get piece by hash handler 160 | pub get_piece_by_index: PieceByIndex, 161 | /// Get segment header by segment indexes handler 162 | pub get_segment_header_by_segment_indexes: SegmentHeaderByIndexes, 163 | /// Segment header store 164 | pub segment_header_store: SegmentHeadersStore, 165 | /// Is libp2p metrics enabled 166 | pub is_metrics_enabled: bool, 167 | } 168 | 169 | /// Shared Dsn structure between node and farmer 170 | #[derive(Derivative)] 171 | #[derivative(Debug)] 172 | pub struct DsnShared { 173 | /// Dsn node 174 | pub node: subspace_networking::Node, 175 | /// Farmer readers and pieces 176 | pub farmer_readers_and_pieces: Arc>>, 177 | /// Farmer piece cache 178 | pub farmer_piece_cache: Arc>>, 179 | _destructors: DestructorSet, 180 | } 181 | 182 | impl Dsn { 183 | /// Build dsn 184 | pub fn build_dsn( 185 | self, 186 | options: DsnOptions, 187 | ) -> anyhow::Result<( 188 | DsnShared, 189 | subspace_networking::NodeRunner, 190 | Option, 191 | )> 192 | where 193 | B: sp_runtime::traits::Block, 194 | C: sc_client_api::AuxStore + sp_blockchain::HeaderBackend + Send + Sync + 'static, 195 | PieceByIndex: Fn( 196 | &PieceByIndexRequest, 197 | Weak>>, 198 | Arc>>, 199 | ) -> F1 200 | + Send 201 | + Sync 202 | + 'static, 203 | F1: Future> + Send + 'static, 204 | SegmentHeaderByIndexes: Fn(&SegmentHeaderRequest, &SegmentHeadersStore) -> Option 205 | + Send 206 | + Sync 207 | + 'static, 208 | { 209 | let DsnOptions { 210 | client, 211 | base_path, 212 | keypair, 213 | get_piece_by_index, 214 | get_segment_header_by_segment_indexes, 215 | segment_header_store, 216 | is_metrics_enabled, 217 | } = options; 218 | let farmer_readers_and_pieces = Arc::new(parking_lot::Mutex::new(None)); 219 | let protocol_version = hex::encode(client.info().genesis_hash); 220 | let farmer_piece_cache = Arc::new(parking_lot::RwLock::new(None)); 221 | let local_records_provider = MaybeLocalRecordProvider::new(farmer_piece_cache.clone()); 222 | 223 | let mut metrics_registry = Registry::default(); 224 | 225 | tracing::debug!(genesis_hash = protocol_version, "Setting DSN protocol version..."); 226 | 227 | let Self { 228 | listen_addresses, 229 | reserved_nodes, 230 | allow_non_global_addresses_in_dht, 231 | in_connections: InConnections(max_established_incoming_connections), 232 | out_connections: OutConnections(max_established_outgoing_connections), 233 | pending_in_connections: PendingInConnections(max_pending_incoming_connections), 234 | pending_out_connections: PendingOutConnections(max_pending_outgoing_connections), 235 | boot_nodes, 236 | external_addresses, 237 | } = self; 238 | 239 | let bootstrap_nodes = boot_nodes.into_iter().map(Into::into).collect::>(); 240 | 241 | let listen_on = listen_addresses.0.into_iter().map(Into::into).collect(); 242 | 243 | let networking_parameters_registry = KnownPeersManager::new(KnownPeersManagerConfig { 244 | path: Some(base_path.join("known_addresses.bin").into_boxed_path()), 245 | ignore_peer_list: strip_peer_id(bootstrap_nodes.clone()) 246 | .into_iter() 247 | .map(|(peer_id, _)| peer_id) 248 | .collect::>(), 249 | cache_size: KNOWN_PEERS_CACHE_SIZE, 250 | ..Default::default() 251 | }) 252 | .context("Failed to open known addresses database for DSN")? 253 | .boxed(); 254 | 255 | let default_networking_config = subspace_networking::Config::new( 256 | protocol_version, 257 | keypair, 258 | local_records_provider.clone(), 259 | is_metrics_enabled.then_some(&mut metrics_registry), 260 | ); 261 | 262 | let config = subspace_networking::Config { 263 | listen_on, 264 | allow_non_global_addresses_in_dht, 265 | networking_parameters_registry, 266 | request_response_protocols: vec![ 267 | PieceByIndexRequestHandler::create({ 268 | let weak_readers_and_pieces = Arc::downgrade(&farmer_readers_and_pieces); 269 | let farmer_piece_cache = farmer_piece_cache.clone(); 270 | move |_, req| { 271 | let weak_readers_and_pieces = weak_readers_and_pieces.clone(); 272 | let farmer_piece_cache = farmer_piece_cache.clone(); 273 | 274 | get_piece_by_index(req, weak_readers_and_pieces, farmer_piece_cache) 275 | } 276 | }), 277 | SegmentHeaderBySegmentIndexesRequestHandler::create({ 278 | let segment_header_store = segment_header_store.clone(); 279 | move |_, req| { 280 | futures::future::ready(get_segment_header_by_segment_indexes( 281 | req, 282 | &segment_header_store, 283 | )) 284 | } 285 | }), 286 | ], 287 | reserved_peers: reserved_nodes.into_iter().map(Into::into).collect(), 288 | max_established_incoming_connections, 289 | max_established_outgoing_connections, 290 | max_pending_incoming_connections, 291 | max_pending_outgoing_connections, 292 | bootstrap_addresses: bootstrap_nodes, 293 | kademlia_mode: KademliaMode::Dynamic, 294 | external_addresses: external_addresses.into_iter().map(Into::into).collect(), 295 | ..default_networking_config 296 | }; 297 | 298 | let (node, runner) = subspace_networking::construct(config)?; 299 | 300 | let mut destructors = DestructorSet::new_without_async("dsn-destructors"); 301 | let on_new_listener = node.on_new_listener(Arc::new({ 302 | let node = node.clone(); 303 | 304 | move |address| { 305 | tracing::info!( 306 | "DSN listening on {}", 307 | address 308 | .clone() 309 | .with(subspace_networking::libp2p::multiaddr::Protocol::P2p(node.id())) 310 | ); 311 | } 312 | })); 313 | destructors.add_items_to_drop(on_new_listener)?; 314 | 315 | Ok(( 316 | DsnShared { 317 | node, 318 | farmer_readers_and_pieces, 319 | _destructors: destructors, 320 | farmer_piece_cache, 321 | }, 322 | runner, 323 | is_metrics_enabled.then_some(metrics_registry), 324 | )) 325 | } 326 | } 327 | -------------------------------------------------------------------------------- /sdk/substrate/src/lib.rs: -------------------------------------------------------------------------------- 1 | //! Crate with abstraction over substrate logic 2 | 3 | #![warn( 4 | missing_docs, 5 | clippy::dbg_macro, 6 | clippy::unwrap_used, 7 | clippy::disallowed_types, 8 | unused_features 9 | )] 10 | #![cfg_attr(not(test), warn(unused_crate_dependencies))] 11 | #![feature(concat_idents)] 12 | 13 | use std::net::{IpAddr, Ipv4Addr, SocketAddr}; 14 | use std::path::Path; 15 | 16 | use derivative::Derivative; 17 | use derive_builder::Builder; 18 | use sc_network::config::{NodeKeyConfig, NonReservedPeerMode, Secret, SetConfig}; 19 | use sc_service::{BasePath, Configuration}; 20 | use sdk_utils::{Multiaddr, MultiaddrWithPeerId}; 21 | use serde::{Deserialize, Serialize}; 22 | use subspace_service::config::{ 23 | SubstrateConfiguration, SubstrateNetworkConfiguration, SubstrateRpcConfiguration, 24 | }; 25 | pub use types::*; 26 | 27 | mod types; 28 | 29 | #[doc(hidden)] 30 | #[derive(Debug, Clone, Derivative, Builder, Deserialize, Serialize, PartialEq)] 31 | #[derivative(Default)] 32 | #[builder(pattern = "owned", build_fn(private, name = "_build"), name = "BaseBuilder")] 33 | #[non_exhaustive] 34 | pub struct Base { 35 | /// Force block authoring 36 | #[builder(default)] 37 | #[serde(default, skip_serializing_if = "sdk_utils::is_default")] 38 | pub force_authoring: bool, 39 | /// Set node role 40 | #[builder(default)] 41 | #[serde(default, skip_serializing_if = "sdk_utils::is_default")] 42 | pub role: Role, 43 | /// Blocks pruning options 44 | #[builder(default)] 45 | #[serde(default, skip_serializing_if = "sdk_utils::is_default")] 46 | pub blocks_pruning: BlocksPruning, 47 | /// State pruning options 48 | #[builder(default)] 49 | #[serde(default, skip_serializing_if = "sdk_utils::is_default")] 50 | pub state_pruning: PruningMode, 51 | /// Implementation name 52 | #[builder(default)] 53 | #[serde(default, skip_serializing_if = "sdk_utils::is_default")] 54 | pub impl_name: ImplName, 55 | /// Implementation version 56 | #[builder(default)] 57 | #[serde(default, skip_serializing_if = "sdk_utils::is_default")] 58 | pub impl_version: ImplVersion, 59 | /// Rpc settings 60 | #[builder(setter(into), default)] 61 | #[serde(default, skip_serializing_if = "sdk_utils::is_default")] 62 | pub rpc: Rpc, 63 | /// Network settings 64 | #[builder(setter(into), default)] 65 | #[serde(default, skip_serializing_if = "sdk_utils::is_default")] 66 | pub network: Network, 67 | /// Enable color for substrate informant 68 | #[builder(default)] 69 | #[serde(default, skip_serializing_if = "sdk_utils::is_default")] 70 | pub informant_enable_color: bool, 71 | /// Additional telemetry endpoints 72 | #[builder(default)] 73 | #[serde(default, skip_serializing_if = "sdk_utils::is_default")] 74 | pub telemetry: Vec<(Multiaddr, u8)>, 75 | } 76 | 77 | #[doc(hidden)] 78 | #[macro_export] 79 | macro_rules! derive_base { 80 | ( 81 | $(< $( $lt:tt $( : $clt:tt $(+ $dlt:tt )* )? ),+ >)? @ $base:ty => $builder:ident { 82 | $( 83 | #[doc = $doc:literal] 84 | $field:ident : $field_ty:ty 85 | ),+ 86 | $(,)? 87 | } 88 | ) => { 89 | impl $(< $( $lt $( : $clt $(+ $dlt )* )? ),+ >)? $builder $(< $($lt),+ >)? { 90 | $( 91 | #[doc = $doc] 92 | pub fn $field(mut self, $field: impl Into<$field_ty>) -> Self { 93 | self.base = self.base.$field($field.into()); 94 | self 95 | } 96 | )* 97 | } 98 | }; 99 | 100 | ( $(< $( $lt:tt $( : $clt:tt $(+ $dlt:tt )* )? ),+ >)? @ $base:ty => $builder:ident ) => { 101 | $crate::derive_base!( 102 | $(< $( $lt $( : $clt $(+ $dlt )* )? ),+ >)? @ $base => $builder { 103 | /// Force block authoring 104 | force_authoring: bool, 105 | /// Set node role 106 | role: $crate::Role, 107 | /// Blocks pruning options 108 | blocks_pruning: $crate::BlocksPruning, 109 | /// State pruning options 110 | state_pruning: $crate::PruningMode, 111 | /// Implementation name 112 | impl_name: $crate::ImplName, 113 | /// Implementation version 114 | impl_version: $crate::ImplVersion, 115 | /// Rpc settings 116 | rpc: $crate::Rpc, 117 | /// Network settings 118 | network: $crate::Network, 119 | /// Enable color for substrate informant 120 | informant_enable_color: bool, 121 | /// Additional telemetry endpoints 122 | telemetry: Vec<(sdk_utils::Multiaddr, u8)>, 123 | }); 124 | } 125 | } 126 | 127 | impl Base { 128 | const NODE_NAME_MAX_LENGTH: usize = 64; 129 | 130 | pub async fn configuration( 131 | self, 132 | directory: impl AsRef, 133 | chain_spec: CS, 134 | ) -> Configuration 135 | where 136 | CS: sc_chain_spec::ChainSpec + sp_runtime::BuildStorage + 'static, 137 | { 138 | const NODE_KEY_ED25519_FILE: &str = "secret_ed25519"; 139 | const DEFAULT_NETWORK_CONFIG_PATH: &str = "network"; 140 | 141 | let Self { 142 | force_authoring, 143 | role, 144 | blocks_pruning, 145 | state_pruning, 146 | impl_name: ImplName(impl_name), 147 | impl_version: ImplVersion(impl_version), 148 | rpc: 149 | Rpc { 150 | addr: rpc_addr, 151 | port: rpc_port, 152 | max_connections: rpc_max_connections, 153 | cors: rpc_cors, 154 | methods: rpc_methods, 155 | max_subs_per_conn: rpc_max_subs_per_conn, 156 | }, 157 | network, 158 | informant_enable_color, 159 | telemetry, 160 | } = self; 161 | 162 | let base_path = BasePath::new(directory.as_ref()); 163 | let config_dir = base_path.config_dir(chain_spec.id()); 164 | 165 | let network = { 166 | let Network { listen_addresses, boot_nodes, force_synced, name, allow_private_ip } = 167 | network; 168 | let name = name.unwrap_or_else(|| { 169 | names::Generator::with_naming(names::Name::Numbered) 170 | .next() 171 | .filter(|name| name.chars().count() < Self::NODE_NAME_MAX_LENGTH) 172 | .expect("RNG is available on all supported platforms; qed") 173 | }); 174 | 175 | let config_dir = config_dir.join(DEFAULT_NETWORK_CONFIG_PATH); 176 | let listen_addresses = listen_addresses.into_iter().map(Into::into).collect::>(); 177 | 178 | SubstrateNetworkConfiguration { 179 | listen_on: listen_addresses, 180 | bootstrap_nodes: chain_spec 181 | .boot_nodes() 182 | .iter() 183 | .cloned() 184 | .chain(boot_nodes.into_iter().map(Into::into)) 185 | .collect(), 186 | node_key: NodeKeyConfig::Ed25519(Secret::File( 187 | config_dir.join(NODE_KEY_ED25519_FILE), 188 | )), 189 | default_peers_set: SetConfig { 190 | in_peers: 125, 191 | out_peers: 50, 192 | reserved_nodes: vec![], 193 | non_reserved_mode: NonReservedPeerMode::Accept, 194 | }, 195 | node_name: name, 196 | allow_private_ips: allow_private_ip, 197 | force_synced, 198 | public_addresses: vec![], 199 | } 200 | }; 201 | 202 | let telemetry_endpoints = match chain_spec.telemetry_endpoints() { 203 | Some(endpoints) => endpoints.clone(), 204 | None => sc_service::config::TelemetryEndpoints::new( 205 | telemetry.into_iter().map(|(endpoint, n)| (endpoint.to_string(), n)).collect(), 206 | ) 207 | .expect("Never returns an error"), 208 | }; 209 | 210 | SubstrateConfiguration { 211 | impl_name, 212 | impl_version, 213 | transaction_pool: Default::default(), 214 | network, 215 | state_pruning: state_pruning.into(), 216 | blocks_pruning: blocks_pruning.into(), 217 | rpc_options: SubstrateRpcConfiguration { 218 | listen_on: rpc_addr.unwrap_or(SocketAddr::new( 219 | IpAddr::V4(Ipv4Addr::LOCALHOST), 220 | rpc_port.unwrap_or(9944), 221 | )), 222 | max_connections: rpc_max_connections.unwrap_or(100), 223 | cors: rpc_cors, 224 | methods: rpc_methods.into(), 225 | max_subscriptions_per_connection: rpc_max_subs_per_conn.unwrap_or(100), 226 | }, 227 | prometheus_listen_on: None, 228 | telemetry_endpoints: Some(telemetry_endpoints), 229 | force_authoring, 230 | chain_spec: Box::new(chain_spec), 231 | base_path: base_path.path().to_path_buf(), 232 | informant_output_format: sc_informant::OutputFormat { 233 | enable_color: informant_enable_color, 234 | }, 235 | farmer: role == Role::Authority, 236 | } 237 | .into() 238 | } 239 | } 240 | 241 | /// Node RPC builder 242 | #[derive(Debug, Clone, Derivative, Builder, Deserialize, Serialize, PartialEq, Eq)] 243 | #[derivative(Default)] 244 | #[builder(pattern = "owned", build_fn(private, name = "_build"), name = "RpcBuilder")] 245 | #[non_exhaustive] 246 | pub struct Rpc { 247 | /// Rpc address 248 | #[builder(setter(strip_option), default)] 249 | #[serde(default, skip_serializing_if = "sdk_utils::is_default")] 250 | pub addr: Option, 251 | /// RPC port 252 | #[builder(setter(strip_option), default)] 253 | #[serde(default, skip_serializing_if = "sdk_utils::is_default")] 254 | pub port: Option, 255 | /// Maximum number of connections for RPC server. `None` if default. 256 | #[builder(setter(strip_option), default)] 257 | #[serde(default, skip_serializing_if = "sdk_utils::is_default")] 258 | pub max_connections: Option, 259 | /// CORS settings for HTTP & WS servers. `None` if all origins are 260 | /// allowed. 261 | #[builder(setter(strip_option), default)] 262 | #[serde(default, skip_serializing_if = "sdk_utils::is_default")] 263 | pub cors: Option>, 264 | /// RPC methods to expose (by default only a safe subset or all of 265 | /// them). 266 | #[builder(default)] 267 | #[serde(default, skip_serializing_if = "sdk_utils::is_default")] 268 | pub methods: RpcMethods, 269 | /// Maximum allowed subscriptions per rpc connection 270 | #[builder(default)] 271 | #[serde(default, skip_serializing_if = "sdk_utils::is_default")] 272 | pub max_subs_per_conn: Option, 273 | } 274 | 275 | impl RpcBuilder { 276 | /// Dev configuration 277 | pub fn dev() -> Self { 278 | Self::default() 279 | } 280 | 281 | /// Local test configuration to have rpc exposed locally 282 | pub fn local_test(port: u16) -> Self { 283 | Self::dev() 284 | .addr(SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), port)) 285 | .port(port) 286 | .max_connections(100) 287 | .max_subs_per_conn(Some(100)) 288 | } 289 | 290 | /// Gemini 3g configuration 291 | pub fn gemini_3h() -> Self { 292 | Self::new().addr("127.0.0.1:9944".parse().expect("hardcoded value is true")).cors(vec![ 293 | "http://localhost:*".to_owned(), 294 | "http://127.0.0.1:*".to_owned(), 295 | "https://localhost:*".to_owned(), 296 | "https://127.0.0.1:*".to_owned(), 297 | "https://polkadot.js.org".to_owned(), 298 | ]) 299 | } 300 | 301 | /// Devnet configuration 302 | pub fn devnet() -> Self { 303 | Self::new().addr("127.0.0.1:9944".parse().expect("hardcoded value is true")).cors(vec![ 304 | "http://localhost:*".to_owned(), 305 | "http://127.0.0.1:*".to_owned(), 306 | "https://localhost:*".to_owned(), 307 | "https://127.0.0.1:*".to_owned(), 308 | "https://polkadot.js.org".to_owned(), 309 | ]) 310 | } 311 | } 312 | 313 | /// Node network builder 314 | #[derive(Debug, Default, Clone, Builder, Deserialize, Serialize, PartialEq)] 315 | #[builder(pattern = "owned", build_fn(private, name = "_build"), name = "NetworkBuilder")] 316 | #[non_exhaustive] 317 | pub struct Network { 318 | /// Listen on some address for other nodes 319 | #[builder(default)] 320 | #[serde(default, skip_serializing_if = "sdk_utils::is_default")] 321 | pub allow_private_ip: bool, 322 | /// Listen on some address for other nodes 323 | #[builder(default)] 324 | #[serde(default, skip_serializing_if = "sdk_utils::is_default")] 325 | pub listen_addresses: Vec, 326 | /// Boot nodes 327 | #[builder(default)] 328 | #[serde(default, skip_serializing_if = "Vec::is_empty")] 329 | pub boot_nodes: Vec, 330 | /// Force node to think it is synced 331 | #[builder(default)] 332 | #[serde(default, skip_serializing_if = "sdk_utils::is_default")] 333 | pub force_synced: bool, 334 | /// Node name 335 | #[builder(setter(into, strip_option), default)] 336 | #[serde(default, skip_serializing_if = "sdk_utils::is_default")] 337 | pub name: Option, 338 | } 339 | 340 | impl NetworkBuilder { 341 | /// Dev chain configuration 342 | pub fn dev() -> Self { 343 | Self::default().force_synced(true).allow_private_ip(true) 344 | } 345 | 346 | /// Gemini 3g configuration 347 | pub fn gemini_3h() -> Self { 348 | Self::default().listen_addresses(vec![ 349 | "/ip6/::/tcp/30333".parse().expect("hardcoded value is true"), 350 | "/ip4/0.0.0.0/tcp/30333".parse().expect("hardcoded value is true"), 351 | ]) 352 | } 353 | 354 | /// Dev network configuration 355 | pub fn devnet() -> Self { 356 | Self::default().listen_addresses(vec![ 357 | "/ip6/::/tcp/30333".parse().expect("hardcoded value is true"), 358 | "/ip4/0.0.0.0/tcp/30333".parse().expect("hardcoded value is true"), 359 | ]) 360 | } 361 | } 362 | 363 | sdk_utils::generate_builder!(Base, Rpc, Network); 364 | --------------------------------------------------------------------------------