├── iroh ├── fixtures │ ├── dir │ │ ├── a.txt │ │ └── subdir │ │ │ ├── c.txt │ │ │ └── b.txt │ └── file.txt ├── src │ ├── lib.rs │ ├── metrics.rs │ ├── main.rs │ ├── size.rs │ └── config.rs ├── Cargo.toml └── README.md ├── iroh-localops ├── src │ ├── lib.rs │ └── process.rs ├── Cargo.toml └── README.md ├── iroh-util ├── .gitignore ├── tests │ ├── config.a.toml │ └── config.b.toml ├── src │ ├── exitcodes.rs │ └── human.rs ├── Cargo.toml └── README.md ├── .gitignore ├── iroh-resolver ├── fixtures │ ├── QmTh6zphkkZXhLimR5hfy1QnWrzf6EwP15r5aQqSzhUCYz │ ├── QmRZQMR6cpczdJAF4xXtisda3DbvFrHxuwi5nF2NJKZvzC │ ├── QmZULkCELmmk5XNfCgTnCyFgAVxBRBXyDHGGMVoLFLiXEN │ ├── QmaRGe7bVmVaLmxbrMiVNXqW4pRNNp3xq7hFtyRKA3mtJL │ ├── bafkreicysg23kiwv34eg2d7qweipxwosdo2py4ldv42nbauguluen5v6am │ ├── bafkreihcldjer7njjrrxknqh67cestxa7s7jf4nhnp62y6k4twcbahvtc4 │ ├── QmZSCBhytmu1Mr5gVrsXsB6D8S2XMQXSoofHdPxtPGrZBj │ ├── QmP9yKRwuji5i7RTgrevwJwXp7uqQu1prv88nxq9uj99rW │ ├── big-foo.car │ ├── zero_0B.zst │ ├── uicro_1B.zst │ ├── uicro_34B.zst │ ├── uicro_50B.zst │ ├── large_repeat_1GiB.zst │ ├── large_repeat_5GiB.zst │ ├── repeat_0.04GiB_174.zst │ ├── repeat_0.04GiB_174_1.zst │ ├── repeat_0.04GiB_175.zst │ ├── repeat_0.04GiB_175_1.zst │ ├── QmNyLad1dWGS6mv2zno4iEviBSYSUR2SrQ8JoZNDz1UHYy │ ├── QmcXoBdCgmFMoNbASaQCNVswRuuuqbw4VvA7e5GtHbhRNp │ ├── QmT7qkMZnZNDACJ8CT4PnVkxXKJfcKNVggkygzRcvZE72B │ ├── QmUr9cs4mhWxabKqm9PYPSQQ6AQGbHJBtyrNmxtKgxqUx9 │ ├── QmcHTZfwWWYG2Gbv9wR6bWZBvAgpFV5BcDoLrC2XMCkggn │ ├── QmdkGfDx42RNdAZFALHn5hjHqUq7L9o6Ef4zLnFEu3Y4Go │ ├── QmfTVUNatSpmZUERu62hwSEuLHEUNuY8FFuzFL5n187yGq │ ├── QmccJ8pV5hG7DEbq66ih1ZtowxgvqVS6imt98Ku62J2WRw │ ├── QmUajVwSkEp9JvdW914Qh1BCMRSUf2ztiQa6jqy1aWhwJv │ ├── bafybeietod5kx72jgbngoontthoax6nva4edkjnieghwqfzenstg4gil5i │ └── bafybeihmgpuwcdrfi47gfxisll7kmurvi6kd7rht5hlq2ed5omxobfip3a ├── src │ └── lib.rs ├── Cargo.toml ├── README.md └── tests │ └── unixfs.rs ├── iroh-car ├── tests │ ├── testv1.car │ ├── carv1_basic.car │ └── car_file_test.rs ├── src │ ├── lib.rs │ ├── error.rs │ ├── writer.rs │ ├── header.rs │ ├── util.rs │ └── reader.rs ├── Cargo.toml └── README.md ├── iroh-bitswap ├── src │ ├── server │ │ ├── ewma.rs │ │ ├── peer_ledger.rs │ │ ├── ledger.rs │ │ ├── blockstore_manager.rs │ │ └── task_merger.rs │ ├── metrics.rs │ ├── error.rs │ ├── client │ │ ├── session │ │ │ ├── sent_want_blocks_tracker.rs │ │ │ ├── cid_queue.rs │ │ │ └── peer_response_tracker.rs │ │ └── message_queue │ │ │ └── wantlist.rs │ ├── bitswap_pb.proto │ ├── block.rs │ ├── peer_task_queue │ │ └── peer_task.rs │ └── prefix.rs ├── build.rs ├── README.md └── Cargo.toml ├── .dockerignore ├── iroh-one ├── src │ ├── lib.rs │ ├── mem_p2p.rs │ ├── mem_store.rs │ ├── cli.rs │ └── uds.rs ├── Cargo.toml └── README.md ├── .cargo └── config.toml ├── iroh-unixfs ├── build.rs ├── src │ ├── unixfs.proto │ ├── merkledag.proto │ ├── lib.rs │ └── hamt │ │ └── hash_bits.rs └── Cargo.toml ├── iroh-store ├── src │ ├── lib.rs │ ├── metrics.rs │ ├── cli.rs │ ├── cf.rs │ └── main.rs ├── README.md ├── Cargo.toml └── benches │ └── store.rs ├── xtask ├── README.md └── Cargo.toml ├── codecov.yml ├── iroh-p2p ├── src │ ├── lib.rs │ ├── metrics.rs │ ├── cli.rs │ ├── behaviour │ │ └── event.rs │ └── main.rs ├── README.md └── Cargo.toml ├── iroh-gateway ├── src │ ├── lib.rs │ ├── metrics.rs │ ├── templates.rs │ ├── cli.rs │ ├── constants.rs │ ├── cors.rs │ ├── rpc.rs │ └── text.rs ├── Cargo.toml └── README.md ├── .github ├── dependabot.yml └── workflows │ └── weekly.yml ├── LICENSE-APACHE ├── examples ├── embed │ ├── Cargo.toml │ └── src │ │ └── main.rs └── importer │ ├── Cargo.toml │ └── src │ └── main.rs ├── docker ├── install_protoc.sh ├── Dockerfile.iroh-gateway ├── Dockerfile.iroh-store ├── Dockerfile.iroh-one ├── Dockerfile.iroh-p2p └── docker-compose.yaml ├── iroh-rpc-types ├── Cargo.toml ├── README.md └── src │ ├── lib.rs │ └── gateway.rs ├── iroh-api ├── src │ ├── lib.rs │ ├── error.rs │ ├── p2p.rs │ ├── store.rs │ └── config.rs ├── README.md └── Cargo.toml ├── iroh-embed ├── Cargo.toml ├── README.md └── src │ └── store.rs ├── iroh-rpc-client ├── README.md ├── Cargo.toml └── src │ ├── gateway.rs │ ├── store.rs │ └── lib.rs ├── LICENSE-MIT ├── RELEASE.md ├── iroh-share ├── README.md └── Cargo.toml ├── Dockerfile-ci ├── iroh-metrics ├── README.md ├── Cargo.toml └── src │ └── bitswap.rs ├── code_of_conduct.md └── README.md /iroh/fixtures/dir/a.txt: -------------------------------------------------------------------------------- 1 | File A 2 | -------------------------------------------------------------------------------- /iroh-localops/src/lib.rs: -------------------------------------------------------------------------------- 1 | pub mod process; 2 | -------------------------------------------------------------------------------- /iroh/fixtures/dir/subdir/c.txt: -------------------------------------------------------------------------------- 1 | And file C... 2 | -------------------------------------------------------------------------------- /iroh-util/.gitignore: -------------------------------------------------------------------------------- 1 | lock_test.result 2 | *.lock 3 | -------------------------------------------------------------------------------- /iroh/fixtures/dir/subdir/b.txt: -------------------------------------------------------------------------------- 1 | This is file B 2 | -------------------------------------------------------------------------------- /iroh/fixtures/file.txt: -------------------------------------------------------------------------------- 1 | This is a file which has a size 2 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | /iroh_gateway/test_files/* 3 | .env 4 | Cargo.lock 5 | -------------------------------------------------------------------------------- /iroh-resolver/fixtures/QmTh6zphkkZXhLimR5hfy1QnWrzf6EwP15r5aQqSzhUCYz: -------------------------------------------------------------------------------- 1 | 2 |  ./bar.txt -------------------------------------------------------------------------------- /iroh-resolver/fixtures/QmRZQMR6cpczdJAF4xXtisda3DbvFrHxuwi5nF2NJKZvzC: -------------------------------------------------------------------------------- 1 | 2 |  ../hello.txt -------------------------------------------------------------------------------- /iroh-resolver/fixtures/QmZULkCELmmk5XNfCgTnCyFgAVxBRBXyDHGGMVoLFLiXEN: -------------------------------------------------------------------------------- 1 | 2 | hello 3 |  -------------------------------------------------------------------------------- /iroh-resolver/fixtures/QmaRGe7bVmVaLmxbrMiVNXqW4pRNNp3xq7hFtyRKA3mtJL: -------------------------------------------------------------------------------- 1 | 2 | world 3 |  -------------------------------------------------------------------------------- /iroh-resolver/fixtures/bafkreicysg23kiwv34eg2d7qweipxwosdo2py4ldv42nbauguluen5v6am: -------------------------------------------------------------------------------- 1 | hello 2 | -------------------------------------------------------------------------------- /iroh-resolver/fixtures/bafkreihcldjer7njjrrxknqh67cestxa7s7jf4nhnp62y6k4twcbahvtc4: -------------------------------------------------------------------------------- 1 | world 2 | -------------------------------------------------------------------------------- /iroh-resolver/fixtures/QmZSCBhytmu1Mr5gVrsXsB6D8S2XMQXSoofHdPxtPGrZBj: -------------------------------------------------------------------------------- 1 | 2 | ../../hello.txt -------------------------------------------------------------------------------- /iroh-util/tests/config.a.toml: -------------------------------------------------------------------------------- 1 | port = 5000 2 | 3 | [map] 4 | seven = 7 5 | eight = 8 6 | nine = 9 7 | -------------------------------------------------------------------------------- /iroh-car/tests/testv1.car: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/n0-computer/beetle/HEAD/iroh-car/tests/testv1.car -------------------------------------------------------------------------------- /iroh-car/tests/carv1_basic.car: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/n0-computer/beetle/HEAD/iroh-car/tests/carv1_basic.car -------------------------------------------------------------------------------- /iroh-resolver/src/lib.rs: -------------------------------------------------------------------------------- 1 | pub mod dns_resolver; 2 | pub mod resolver; 3 | 4 | pub use resolver::{Path, PathType}; 5 | -------------------------------------------------------------------------------- /iroh-resolver/fixtures/QmP9yKRwuji5i7RTgrevwJwXp7uqQu1prv88nxq9uj99rW: -------------------------------------------------------------------------------- 1 | 2 | ms or conditions. 3 | 4 | 5 |  -------------------------------------------------------------------------------- /iroh-resolver/fixtures/big-foo.car: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/n0-computer/beetle/HEAD/iroh-resolver/fixtures/big-foo.car -------------------------------------------------------------------------------- /iroh-resolver/fixtures/zero_0B.zst: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/n0-computer/beetle/HEAD/iroh-resolver/fixtures/zero_0B.zst -------------------------------------------------------------------------------- /iroh-resolver/fixtures/uicro_1B.zst: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/n0-computer/beetle/HEAD/iroh-resolver/fixtures/uicro_1B.zst -------------------------------------------------------------------------------- /iroh-resolver/fixtures/uicro_34B.zst: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/n0-computer/beetle/HEAD/iroh-resolver/fixtures/uicro_34B.zst -------------------------------------------------------------------------------- /iroh-resolver/fixtures/uicro_50B.zst: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/n0-computer/beetle/HEAD/iroh-resolver/fixtures/uicro_50B.zst -------------------------------------------------------------------------------- /iroh-bitswap/src/server/ewma.rs: -------------------------------------------------------------------------------- 1 | pub fn ewma(old: f64, new: f64, alpha: f64) -> f64 { 2 | new * alpha + (1. - alpha) * old 3 | } 4 | -------------------------------------------------------------------------------- /iroh-resolver/fixtures/large_repeat_1GiB.zst: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/n0-computer/beetle/HEAD/iroh-resolver/fixtures/large_repeat_1GiB.zst -------------------------------------------------------------------------------- /iroh-resolver/fixtures/large_repeat_5GiB.zst: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/n0-computer/beetle/HEAD/iroh-resolver/fixtures/large_repeat_5GiB.zst -------------------------------------------------------------------------------- /iroh/src/lib.rs: -------------------------------------------------------------------------------- 1 | mod config; 2 | pub mod doc; 3 | pub mod metrics; 4 | pub mod p2p; 5 | pub mod run; 6 | pub mod services; 7 | mod size; 8 | -------------------------------------------------------------------------------- /iroh-resolver/fixtures/repeat_0.04GiB_174.zst: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/n0-computer/beetle/HEAD/iroh-resolver/fixtures/repeat_0.04GiB_174.zst -------------------------------------------------------------------------------- /iroh-resolver/fixtures/repeat_0.04GiB_174_1.zst: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/n0-computer/beetle/HEAD/iroh-resolver/fixtures/repeat_0.04GiB_174_1.zst -------------------------------------------------------------------------------- /iroh-resolver/fixtures/repeat_0.04GiB_175.zst: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/n0-computer/beetle/HEAD/iroh-resolver/fixtures/repeat_0.04GiB_175.zst -------------------------------------------------------------------------------- /iroh-resolver/fixtures/repeat_0.04GiB_175_1.zst: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/n0-computer/beetle/HEAD/iroh-resolver/fixtures/repeat_0.04GiB_175_1.zst -------------------------------------------------------------------------------- /iroh-util/tests/config.b.toml: -------------------------------------------------------------------------------- 1 | port = 5000 2 | enabled = true 3 | list = ["changed", "values"] 4 | 5 | [map] 6 | four = 4 7 | five = 5 8 | six = 6 9 | -------------------------------------------------------------------------------- /.dockerignore: -------------------------------------------------------------------------------- 1 | **/target* 2 | # ignore edits to dockerfiles so they don't invalidate cache 3 | # dockerfiles shouldn't be in the build image anyway 4 | **/docker/Dockerfile* -------------------------------------------------------------------------------- /iroh-one/src/lib.rs: -------------------------------------------------------------------------------- 1 | pub mod cli; 2 | pub mod config; 3 | pub mod mem_p2p; 4 | pub mod mem_store; 5 | #[cfg(all(feature = "http-uds-gateway", unix))] 6 | pub mod uds; 7 | -------------------------------------------------------------------------------- /iroh-resolver/fixtures/QmNyLad1dWGS6mv2zno4iEviBSYSUR2SrQ8JoZNDz1UHYy: -------------------------------------------------------------------------------- 1 | 2 | jdplicitly state otherwise, any contribution intentionally submitted 3 | for inclusion in this crate by yod -------------------------------------------------------------------------------- /iroh-resolver/fixtures/QmcXoBdCgmFMoNbASaQCNVswRuuuqbw4VvA7e5GtHbhRNp: -------------------------------------------------------------------------------- 1 | 2 | jdu, as defined in the Apache-2.0 license, shall 3 | be dual licensed as above, without any additional terd -------------------------------------------------------------------------------- /iroh-resolver/fixtures/QmT7qkMZnZNDACJ8CT4PnVkxXKJfcKNVggkygzRcvZE72B: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/n0-computer/beetle/HEAD/iroh-resolver/fixtures/QmT7qkMZnZNDACJ8CT4PnVkxXKJfcKNVggkygzRcvZE72B -------------------------------------------------------------------------------- /iroh-resolver/fixtures/QmUr9cs4mhWxabKqm9PYPSQQ6AQGbHJBtyrNmxtKgxqUx9: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/n0-computer/beetle/HEAD/iroh-resolver/fixtures/QmUr9cs4mhWxabKqm9PYPSQQ6AQGbHJBtyrNmxtKgxqUx9 -------------------------------------------------------------------------------- /iroh-resolver/fixtures/QmcHTZfwWWYG2Gbv9wR6bWZBvAgpFV5BcDoLrC2XMCkggn: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/n0-computer/beetle/HEAD/iroh-resolver/fixtures/QmcHTZfwWWYG2Gbv9wR6bWZBvAgpFV5BcDoLrC2XMCkggn -------------------------------------------------------------------------------- /iroh-resolver/fixtures/QmdkGfDx42RNdAZFALHn5hjHqUq7L9o6Ef4zLnFEu3Y4Go: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/n0-computer/beetle/HEAD/iroh-resolver/fixtures/QmdkGfDx42RNdAZFALHn5hjHqUq7L9o6Ef4zLnFEu3Y4Go -------------------------------------------------------------------------------- /iroh-resolver/fixtures/QmfTVUNatSpmZUERu62hwSEuLHEUNuY8FFuzFL5n187yGq: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/n0-computer/beetle/HEAD/iroh-resolver/fixtures/QmfTVUNatSpmZUERu62hwSEuLHEUNuY8FFuzFL5n187yGq -------------------------------------------------------------------------------- /iroh-resolver/fixtures/QmccJ8pV5hG7DEbq66ih1ZtowxgvqVS6imt98Ku62J2WRw: -------------------------------------------------------------------------------- 1 | 2 | jd# iroh 3 | 4 | 5 | ## License 6 | 7 | 8 | Licensed under either of Apache License, Versiond -------------------------------------------------------------------------------- /iroh-resolver/fixtures/QmUajVwSkEp9JvdW914Qh1BCMRSUf2ztiQa6jqy1aWhwJv: -------------------------------------------------------------------------------- 1 | 2 | jd 3 | 2.0 or MIT license at your option. 4 | 5 | 6 |
7 | 8 | 9 | Unless you exd -------------------------------------------------------------------------------- /.cargo/config.toml: -------------------------------------------------------------------------------- 1 | [alias] 2 | xtask = "run --package xtask --" 3 | 4 | [target.x86_64-pc-windows-gnu] 5 | linker = "x86_64-w64-mingw32-gcc" 6 | 7 | [build] 8 | rustflags = ["-Wmissing_debug_implementations"] 9 | -------------------------------------------------------------------------------- /iroh-resolver/fixtures/bafybeietod5kx72jgbngoontthoax6nva4edkjnieghwqfzenstg4gil5i: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/n0-computer/beetle/HEAD/iroh-resolver/fixtures/bafybeietod5kx72jgbngoontthoax6nva4edkjnieghwqfzenstg4gil5i -------------------------------------------------------------------------------- /iroh-resolver/fixtures/bafybeihmgpuwcdrfi47gfxisll7kmurvi6kd7rht5hlq2ed5omxobfip3a: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/n0-computer/beetle/HEAD/iroh-resolver/fixtures/bafybeihmgpuwcdrfi47gfxisll7kmurvi6kd7rht5hlq2ed5omxobfip3a -------------------------------------------------------------------------------- /iroh-unixfs/build.rs: -------------------------------------------------------------------------------- 1 | fn main() { 2 | prost_build::Config::new() 3 | .bytes([".unixfs_pb.Data", ".merkledag_pb.PBNode.Data"]) 4 | .compile_protos(&["src/unixfs.proto", "src/merkledag.proto"], &["src"]) 5 | .unwrap(); 6 | } 7 | -------------------------------------------------------------------------------- /iroh-store/src/lib.rs: -------------------------------------------------------------------------------- 1 | mod cf; 2 | pub mod cli; 3 | pub mod config; 4 | pub mod metrics; 5 | pub mod rpc; 6 | mod store; 7 | 8 | pub use crate::config::Config; 9 | pub use crate::store::Store; 10 | 11 | pub(crate) const VERSION: &str = env!("CARGO_PKG_VERSION"); 12 | -------------------------------------------------------------------------------- /xtask/README.md: -------------------------------------------------------------------------------- 1 | # iroh xtasks 2 | 3 | This crate contains automation tasks similar to `make` or `npm run` commands in other languages, using the [cargo xtask pattern](https://github.com/matklad/cargo-xtask). 4 | 5 | from the root of the project run `cargo xtask $TASK` to execute a task. -------------------------------------------------------------------------------- /iroh-bitswap/build.rs: -------------------------------------------------------------------------------- 1 | fn main() { 2 | prost_build::Config::new() 3 | .bytes([ 4 | ".bitswap_pb.Message.Block.data", 5 | ".bitswap_pb.Message.blocks", 6 | ]) 7 | .compile_protos(&["src/bitswap_pb.proto"], &["src"]) 8 | .unwrap(); 9 | } 10 | -------------------------------------------------------------------------------- /iroh-car/src/lib.rs: -------------------------------------------------------------------------------- 1 | //! Implementation of the [car](https://ipld.io/specs/transport/car/) format. 2 | 3 | mod error; 4 | mod header; 5 | mod reader; 6 | mod util; 7 | mod writer; 8 | 9 | pub use crate::header::CarHeader; 10 | pub use crate::reader::CarReader; 11 | pub use crate::writer::CarWriter; 12 | -------------------------------------------------------------------------------- /codecov.yml: -------------------------------------------------------------------------------- 1 | codecov: 2 | notify: 3 | require_ci_to_pass: false 4 | after_n_builds: 1 5 | comment: false 6 | coverage: 7 | status: 8 | project: 9 | default: 10 | informational: true 11 | patch: 12 | default: 13 | informational: true 14 | github_checks: 15 | annotations: false -------------------------------------------------------------------------------- /iroh-p2p/src/lib.rs: -------------------------------------------------------------------------------- 1 | mod behaviour; 2 | pub mod cli; 3 | pub mod config; 4 | mod keys; 5 | pub mod metrics; 6 | mod node; 7 | mod providers; 8 | pub mod rpc; 9 | mod swarm; 10 | 11 | pub use self::config::*; 12 | pub use self::keys::{DiskStorage, Keychain, MemoryStorage}; 13 | pub use self::node::*; 14 | 15 | pub(crate) const VERSION: &str = env!("CARGO_PKG_VERSION"); 16 | -------------------------------------------------------------------------------- /xtask/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "xtask" 3 | version.workspace = true 4 | edition.workspace = true 5 | publish = false 6 | rust-version.workspace = true 7 | 8 | [dependencies] 9 | anyhow.workspace = true 10 | clap = { workspace = true, features = ["derive"] } 11 | clap_mangen.workspace = true 12 | dirs-next.workspace = true 13 | xtaskops.workspace = true 14 | iroh.workspace = true 15 | -------------------------------------------------------------------------------- /iroh-gateway/src/lib.rs: -------------------------------------------------------------------------------- 1 | pub mod bad_bits; 2 | pub mod cli; 3 | pub mod client; 4 | pub mod config; 5 | pub mod constants; 6 | pub mod core; 7 | mod cors; 8 | mod error; 9 | pub mod handler_params; 10 | pub mod handlers; 11 | pub mod headers; 12 | pub mod metrics; 13 | pub mod response; 14 | mod rpc; 15 | pub mod templates; 16 | mod text; 17 | 18 | pub(crate) const VERSION: &str = env!("CARGO_PKG_VERSION"); 19 | -------------------------------------------------------------------------------- /iroh-util/src/exitcodes.rs: -------------------------------------------------------------------------------- 1 | //! iroh exit codes 2 | //! 3 | //! Exit code constants intended to be passed to 4 | //! `std::process::exit()` 5 | 6 | /// Alias for the numeric type that holds iroh exit codes. 7 | pub type IrohExitCode = i32; 8 | 9 | /// Successful exit 10 | pub const OK: IrohExitCode = 0; 11 | 12 | /// Generic error exit 13 | pub const ERROR: IrohExitCode = 1; 14 | 15 | /// Cannot acquire a resource lock 16 | pub const LOCKED: IrohExitCode = 2; 17 | -------------------------------------------------------------------------------- /iroh-util/src/human.rs: -------------------------------------------------------------------------------- 1 | use humansize::{format_size, DECIMAL}; 2 | 3 | /// Format byte count as a human-readable size string eg: 1_000_000u64 -> "1 MB" 4 | /// this func isolates a library + configuration choice 5 | pub fn format_bytes(size: u64) -> String { 6 | format_size(size, DECIMAL) 7 | } 8 | 9 | #[cfg(test)] 10 | mod tests { 11 | use super::*; 12 | #[test] 13 | fn test_format_bytes() { 14 | assert_eq!(format_bytes(1_000_000u64), "1 MB"); 15 | } 16 | } 17 | -------------------------------------------------------------------------------- /iroh-localops/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "iroh-localops" 3 | authors = ["dignifiedquire ", "b5 "] 4 | description = "Iroh specific process management." 5 | version.workspace = true 6 | edition.workspace = true 7 | rust-version.workspace = true 8 | license.workspace = true 9 | repository.workspace = true 10 | 11 | [dependencies] 12 | anyhow.workspace = true 13 | 14 | [target.'cfg(unix)'.dependencies] 15 | nix = { workspace = true, features = ["signal", "process"] } 16 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | # To get started with Dependabot version updates, you'll need to specify which 2 | # package ecosystems to update and where the package manifests are located. 3 | # Please see the documentation for all configuration options: 4 | # https://docs.github.com/github/administering-a-repository/configuration-options-for-dependency-updates 5 | 6 | version: 2 7 | updates: 8 | - package-ecosystem: "cargo" 9 | directory: "/" # Location of package manifests 10 | schedule: 11 | interval: "monthly" 12 | -------------------------------------------------------------------------------- /LICENSE-APACHE: -------------------------------------------------------------------------------- 1 | Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at 2 | 3 | http://www.apache.org/licenses/LICENSE-2.0 4 | 5 | Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -------------------------------------------------------------------------------- /examples/embed/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "iroh-example-embed" 3 | authors = ["team@n0.computer"] 4 | description = "Example of using iroh-embed" 5 | publish = false 6 | version.workspace = true 7 | edition.workspace = true 8 | license.workspace = true 9 | repository.workspace = true 10 | 11 | [dependencies] 12 | anyhow.workspace = true 13 | futures-util.workspace = true 14 | iroh-api.workspace = true 15 | iroh-embed.workspace = true 16 | testdir.workspace = true 17 | tokio = { workspace = true, features = ["rt-multi-thread"] } 18 | -------------------------------------------------------------------------------- /iroh-unixfs/src/unixfs.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package unixfs_pb; 4 | 5 | message Data { 6 | enum DataType { 7 | Raw = 0; 8 | Directory = 1; 9 | File = 2; 10 | Metadata = 3; 11 | Symlink = 4; 12 | HAMTShard = 5; 13 | } 14 | 15 | DataType Type = 1; 16 | optional bytes Data = 2; 17 | optional uint64 filesize = 3; 18 | repeated uint64 blocksizes = 4; 19 | 20 | optional uint64 hashType = 5; 21 | optional uint64 fanout = 6; 22 | } 23 | 24 | message Metadata { 25 | optional string MimeType = 1; 26 | } 27 | -------------------------------------------------------------------------------- /iroh-unixfs/src/merkledag.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package merkledag_pb; 4 | 5 | // An IPFS MerkleDAG Link 6 | message PBLink { 7 | // multihash of the target object 8 | optional bytes Hash = 1; 9 | 10 | // utf string name. should be unique per object 11 | optional string Name = 2; 12 | 13 | // cumulative size of target object 14 | optional uint64 Tsize = 3; 15 | } 16 | 17 | // An IPFS MerkleDAG Node 18 | message PBNode { 19 | 20 | // refs to other objects 21 | repeated PBLink Links = 2; 22 | 23 | // opaque user data 24 | optional bytes Data = 1; 25 | } 26 | -------------------------------------------------------------------------------- /iroh/src/metrics.rs: -------------------------------------------------------------------------------- 1 | use iroh_metrics::config::Config as MetricsConfig; 2 | 3 | pub fn metrics_config_with_compile_time_info(cfg: MetricsConfig) -> MetricsConfig { 4 | // compile time configuration 5 | cfg.with_service_name(env!("CARGO_PKG_NAME").to_string()) 6 | .with_build( 7 | git_version::git_version!( 8 | prefix = "git:", 9 | cargo_prefix = "cargo:", 10 | fallback = "unknown" 11 | ) 12 | .to_string(), 13 | ) 14 | .with_version(env!("CARGO_PKG_VERSION").to_string()) 15 | } 16 | -------------------------------------------------------------------------------- /iroh-bitswap/src/metrics.rs: -------------------------------------------------------------------------------- 1 | use iroh_metrics::config::Config as MetricsConfig; 2 | 3 | pub fn metrics_config_with_compile_time_info(cfg: MetricsConfig) -> MetricsConfig { 4 | // compile time configuration 5 | cfg.with_service_name(env!("CARGO_PKG_NAME").to_string()) 6 | .with_build( 7 | git_version::git_version!( 8 | prefix = "git:", 9 | cargo_prefix = "cargo:", 10 | fallback = "unknown" 11 | ) 12 | .to_string(), 13 | ) 14 | .with_version(env!("CARGO_PKG_VERSION").to_string()) 15 | } 16 | -------------------------------------------------------------------------------- /iroh-p2p/src/metrics.rs: -------------------------------------------------------------------------------- 1 | use crate::VERSION; 2 | use iroh_metrics::config::Config as MetricsConfig; 3 | 4 | pub fn metrics_config_with_compile_time_info(cfg: MetricsConfig) -> MetricsConfig { 5 | // compile time configuration 6 | cfg.with_service_name(env!("CARGO_PKG_NAME").to_string()) 7 | .with_build( 8 | git_version::git_version!( 9 | prefix = "git:", 10 | cargo_prefix = "cargo:", 11 | fallback = "unknown" 12 | ) 13 | .to_string(), 14 | ) 15 | .with_version(VERSION.to_string()) 16 | } 17 | -------------------------------------------------------------------------------- /iroh-store/src/metrics.rs: -------------------------------------------------------------------------------- 1 | use crate::VERSION; 2 | use iroh_metrics::config::Config as MetricsConfig; 3 | 4 | pub fn metrics_config_with_compile_time_info(cfg: MetricsConfig) -> MetricsConfig { 5 | // compile time configuration 6 | cfg.with_service_name(env!("CARGO_PKG_NAME").to_string()) 7 | .with_build( 8 | git_version::git_version!( 9 | prefix = "git:", 10 | cargo_prefix = "cargo:", 11 | fallback = "unknown" 12 | ) 13 | .to_string(), 14 | ) 15 | .with_version(VERSION.to_string()) 16 | } 17 | -------------------------------------------------------------------------------- /iroh-gateway/src/metrics.rs: -------------------------------------------------------------------------------- 1 | use iroh_metrics::config::Config as MetricsConfig; 2 | 3 | use crate::VERSION; 4 | 5 | pub fn metrics_config_with_compile_time_info(cfg: MetricsConfig) -> MetricsConfig { 6 | // compile time configuration 7 | cfg.with_service_name(env!("CARGO_PKG_NAME").to_string()) 8 | .with_build( 9 | git_version::git_version!( 10 | prefix = "git:", 11 | cargo_prefix = "cargo:", 12 | fallback = "unknown" 13 | ) 14 | .to_string(), 15 | ) 16 | .with_version(VERSION.to_string()) 17 | } 18 | -------------------------------------------------------------------------------- /docker/install_protoc.sh: -------------------------------------------------------------------------------- 1 | # install latest protocol buffer compiler. Yes, it's really this irritating. 2 | # recent build URLs are missing "3" version prefix. version is actually "3.21.9" 3 | PROTOC_VERSION=21.9 4 | case ${TARGETPLATFORM} in 5 | "linux/amd64") PROTOC_ZIP=protoc-21.9-linux-x86_64.zip ;; 6 | "linux/arm64") PROTOC_ZIP=protoc-21.9-linux-aarch_64.zip ;; 7 | *) exit 1 8 | esac 9 | 10 | curl -OL https://github.com/protocolbuffers/protobuf/releases/download/v$PROTOC_VERSION/$PROTOC_ZIP 11 | unzip -o $PROTOC_ZIP -d /usr/local bin/protoc 12 | unzip -o $PROTOC_ZIP -d /usr/local 'include/*' 13 | rm -f $PROTOC_ZIP 14 | echo "installed $($PROTOC --version)" 15 | -------------------------------------------------------------------------------- /iroh-p2p/src/cli.rs: -------------------------------------------------------------------------------- 1 | use std::{collections::HashMap, path::PathBuf}; 2 | 3 | use clap::Parser; 4 | 5 | #[derive(Parser, Debug, Clone)] 6 | #[clap(author, version, about, long_about = None)] 7 | pub struct Args { 8 | #[clap(long = "metrics")] 9 | metrics: bool, 10 | #[clap(long = "tracing")] 11 | tracing: bool, 12 | #[clap(long)] 13 | pub cfg: Option, 14 | } 15 | 16 | impl Args { 17 | pub fn make_overrides_map(&self) -> HashMap { 18 | let mut map = HashMap::new(); 19 | map.insert("metrics.collect".to_string(), self.metrics.to_string()); 20 | map.insert("metrics.tracing".to_string(), self.tracing.to_string()); 21 | map 22 | } 23 | } 24 | -------------------------------------------------------------------------------- /iroh-bitswap/src/server/peer_ledger.rs: -------------------------------------------------------------------------------- 1 | use ahash::{AHashMap, AHashSet}; 2 | use cid::Cid; 3 | use libp2p::PeerId; 4 | 5 | #[derive(Debug, Default, Clone, PartialEq, Eq)] 6 | pub struct PeerLedger { 7 | cids: AHashMap>, 8 | } 9 | 10 | impl PeerLedger { 11 | pub fn wants(&mut self, peer: PeerId, cid: Cid) { 12 | self.cids.entry(cid).or_default().insert(peer); 13 | } 14 | 15 | pub fn cancel_want(&mut self, peer: &PeerId, cid: &Cid) { 16 | if let Some(peers) = self.cids.get_mut(cid) { 17 | peers.remove(peer); 18 | } 19 | } 20 | 21 | pub fn peers(&self, cid: &Cid) -> Option<&AHashSet> { 22 | self.cids.get(cid) 23 | } 24 | } 25 | -------------------------------------------------------------------------------- /examples/importer/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "iroh-example-importer" 3 | authors = ["team@n0.computer"] 4 | publish = false 5 | version.workspace = true 6 | edition.workspace = true 7 | license.workspace = true 8 | repository.workspace = true 9 | 10 | [dependencies] 11 | anyhow.workspace = true 12 | bytes.workspace = true 13 | bytesize.workspace = true 14 | clap = { workspace = true, features = ["derive"] } 15 | futures.workspace = true 16 | indicatif.workspace = true 17 | iroh-car.workspace = true 18 | iroh-unixfs.workspace = true 19 | iroh-resolver.workspace = true 20 | iroh-rpc-client.workspace = true 21 | iroh-util.workspace = true 22 | par-stream = { workspace = true, features = ["runtime-tokio"]} 23 | tokio = { workspace = true, features = ["rt-multi-thread", "macros"] } 24 | -------------------------------------------------------------------------------- /iroh-rpc-types/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "iroh-rpc-types" 3 | authors = ["Friedel Ziegelmayer ", "ramfox"] 4 | description = "RPC type definitions for iroh" 5 | version.workspace = true 6 | edition.workspace = true 7 | license.workspace = true 8 | repository.workspace = true 9 | rust-version.workspace = true 10 | 11 | [dependencies] 12 | anyhow.workspace = true 13 | bytes = { workspace = true, features = ["serde"] } 14 | cid = { workspace = true, features = ["serde-codec"] } 15 | derive_more.workspace = true 16 | futures.workspace = true 17 | libp2p = { workspace = true, features = ["serde"] } 18 | quic-rpc.workspace = true 19 | serde.workspace = true 20 | serde-error.workspace = true 21 | serde_with.workspace = true 22 | tokio = { workspace = true, default-features = false } 23 | -------------------------------------------------------------------------------- /iroh-bitswap/src/error.rs: -------------------------------------------------------------------------------- 1 | use thiserror::Error; 2 | 3 | use crate::message::{BlockPresenceType, WantType}; 4 | 5 | #[derive(Debug, Error)] 6 | pub enum Error { 7 | #[error("Error while reading from socket: {0}")] 8 | Read(#[from] std::io::Error), 9 | #[error("Error while decoding bitswap message: {0}")] 10 | Protobuf(#[from] prost::DecodeError), 11 | #[error("Error while parsing cid: {0}")] 12 | Cid(#[from] cid::Error), 13 | #[error("Error while parsing multihash: {0}")] 14 | Multihash(#[from] multihash::Error), 15 | #[error("Invalid block presence type {0}")] 16 | InvalidBlockPresenceType(#[from] num_enum::TryFromPrimitiveError), 17 | #[error("Invalid want type {0}")] 18 | InvalidWantType(#[from] num_enum::TryFromPrimitiveError), 19 | } 20 | -------------------------------------------------------------------------------- /iroh-one/src/mem_p2p.rs: -------------------------------------------------------------------------------- 1 | /// A p2p instance listening on a memory rpc channel. 2 | use iroh_p2p::config::Config; 3 | use iroh_p2p::{DiskStorage, Keychain, Node}; 4 | use iroh_rpc_types::p2p::P2pAddr; 5 | use tokio::task; 6 | use tokio::task::JoinHandle; 7 | use tracing::error; 8 | 9 | /// Starts a new p2p node, using the given mem rpc channel. 10 | pub async fn start(rpc_addr: P2pAddr, config: Config) -> anyhow::Result> { 11 | let kc = Keychain::::new(config.key_store_path.clone()).await?; 12 | 13 | let mut p2p = Node::new(config, rpc_addr, kc).await?; 14 | 15 | // Start services 16 | let p2p_task = task::spawn(async move { 17 | if let Err(err) = p2p.run().await { 18 | error!("{:?}", err); 19 | } 20 | }); 21 | 22 | Ok(p2p_task) 23 | } 24 | -------------------------------------------------------------------------------- /iroh-car/src/error.rs: -------------------------------------------------------------------------------- 1 | use thiserror::Error; 2 | 3 | /// Car utility error 4 | #[derive(Debug, Error)] 5 | pub enum Error { 6 | #[error("Failed to parse CAR file: {0}")] 7 | Parsing(String), 8 | #[error("Invalid CAR file: {0}")] 9 | InvalidFile(String), 10 | #[error("Io error: {0}")] 11 | Io(#[from] std::io::Error), 12 | #[error("Cbor encoding error: {0}")] 13 | Cbor(#[from] ipld::error::Error), 14 | #[error("ld read too large {0}")] 15 | LdReadTooLarge(usize), 16 | } 17 | 18 | impl From for Error { 19 | fn from(err: cid::Error) -> Error { 20 | Error::Parsing(err.to_string()) 21 | } 22 | } 23 | 24 | impl From for Error { 25 | fn from(err: cid::multihash::Error) -> Error { 26 | Error::Parsing(err.to_string()) 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /iroh-car/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "iroh-car" 3 | authors = ["dignifiedquire "] 4 | description = "Implementation the car files for iroh" 5 | version.workspace = true 6 | edition.workspace = true 7 | license.workspace = true 8 | repository.workspace = true 9 | rust-version.workspace = true 10 | 11 | [dependencies] 12 | cid.workspace = true 13 | futures.workspace = true 14 | integer-encoding = { workspace = true, features = ["tokio_async"] } 15 | ipld = { package = "libipld", version = "0.15" } 16 | ipld-cbor = { package = "libipld-cbor", version = "0.15" } 17 | thiserror.workspace = true 18 | tokio = { workspace = true, features = ["io-util"] } 19 | 20 | [dev-dependencies] 21 | multihash.workspace = true 22 | tokio = { workspace = true, features = ["macros", "sync", "rt", "fs", "io-util"] } 23 | 24 | [features] 25 | -------------------------------------------------------------------------------- /iroh-api/src/lib.rs: -------------------------------------------------------------------------------- 1 | pub use crate::api::Api; 2 | pub use crate::api::OutType; 3 | pub use crate::config::Config; 4 | pub use crate::error::ApiError; 5 | pub use crate::p2p::P2p as P2pApi; 6 | pub use crate::p2p::PeerIdOrAddr; 7 | pub use bytes::Bytes; 8 | pub use cid::Cid; 9 | pub use iroh_resolver::resolver::Path as IpfsPath; 10 | pub use iroh_rpc_client::{ClientStatus, Lookup, ServiceStatus, ServiceType, StatusType}; 11 | pub use iroh_unixfs::builder::{ 12 | Config as UnixfsConfig, DirectoryBuilder, Entry as UnixfsEntry, FileBuilder, SymlinkBuilder, 13 | }; 14 | pub use iroh_unixfs::chunker::{ChunkerConfig, DEFAULT_CHUNKS_SIZE}; 15 | pub use iroh_unixfs::Block; 16 | pub use libp2p::gossipsub::MessageId; 17 | pub use libp2p::{Multiaddr, PeerId}; 18 | 19 | mod api; 20 | mod error; 21 | mod p2p; 22 | mod store; 23 | 24 | pub mod config; 25 | pub mod fs; 26 | -------------------------------------------------------------------------------- /iroh-bitswap/src/client/session/sent_want_blocks_tracker.rs: -------------------------------------------------------------------------------- 1 | use ahash::{AHashMap, AHashSet}; 2 | use cid::Cid; 3 | use libp2p::PeerId; 4 | 5 | /// Keeps track of which peers we've sent a want-block to. 6 | #[derive(Debug, Default)] 7 | pub struct SentWantBlocksTracker { 8 | sent_want_blocks: AHashMap>, 9 | } 10 | 11 | impl SentWantBlocksTracker { 12 | pub fn add_sent_want_blocks_to(&mut self, peer: &PeerId, keys: &[Cid]) { 13 | let entry = self.sent_want_blocks.entry(*peer).or_default(); 14 | for key in keys { 15 | entry.insert(*key); 16 | } 17 | } 18 | 19 | pub fn have_sent_want_block_to(&self, peer: &PeerId, cid: &Cid) -> bool { 20 | self.sent_want_blocks 21 | .get(peer) 22 | .map(|cids| cids.contains(cid)) 23 | .unwrap_or_default() 24 | } 25 | } 26 | -------------------------------------------------------------------------------- /iroh-api/src/error.rs: -------------------------------------------------------------------------------- 1 | use anyhow::{anyhow, Error}; 2 | use std::io; 3 | use thiserror::Error as ThisError; 4 | 5 | /// LockError is the set of known program lock errors 6 | #[derive(ThisError, Debug)] 7 | pub enum ApiError<'a> { 8 | #[error("Can't connect to {service}. Is the service running?")] 9 | ConnectionRefused { service: &'a str }, 10 | /// catchall error type 11 | #[error("{source}")] 12 | Uncategorized { 13 | #[from] 14 | source: anyhow::Error, 15 | }, 16 | } 17 | 18 | pub fn map_service_error(service: &'static str, e: Error) -> Error { 19 | let io_error = e.root_cause().downcast_ref::(); 20 | if let Some(io_error) = io_error { 21 | if io_error.kind() == io::ErrorKind::ConnectionRefused { 22 | return anyhow!(ApiError::ConnectionRefused { service }); 23 | } 24 | } 25 | e 26 | } 27 | -------------------------------------------------------------------------------- /iroh-util/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "iroh-util" 3 | authors = ["dignifiedquire "] 4 | description = "Utilities for iroh" 5 | version.workspace = true 6 | edition.workspace = true 7 | license.workspace = true 8 | repository.workspace = true 9 | rust-version.workspace = true 10 | 11 | [dependencies] 12 | anyhow.workspace = true 13 | cid.workspace = true 14 | config.workspace = true 15 | ctrlc.workspace = true 16 | dirs-next.workspace = true 17 | futures.workspace = true 18 | humansize.workspace = true 19 | rlimit.workspace = true 20 | serde = { workspace = true, features = ["derive"] } 21 | sysinfo.workspace = true 22 | thiserror.workspace = true 23 | toml.workspace = true 24 | tracing.workspace = true 25 | 26 | [dev-dependencies] 27 | temp-env.workspace = true 28 | testdir.workspace = true 29 | 30 | [target.'cfg(unix)'.dev-dependencies] 31 | nix.workspace = true 32 | -------------------------------------------------------------------------------- /iroh-embed/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "iroh-embed" 3 | authors = ["Floris Bruynooghe "] 4 | description = "Embedable iroh library" 5 | version.workspace = true 6 | edition.workspace = true 7 | license.workspace = true 8 | repository.workspace = true 9 | rust-version.workspace = true 10 | 11 | [dependencies] 12 | anyhow.workspace = true 13 | futures.workspace = true 14 | iroh-api.workspace = true 15 | iroh-gateway.workspace = true 16 | iroh-metrics.workspace = true 17 | iroh-one.workspace = true 18 | iroh-p2p.workspace = true 19 | iroh-resolver.workspace = true 20 | iroh-rpc-client.workspace = true 21 | iroh-rpc-types.workspace = true 22 | iroh-store.workspace = true 23 | iroh-unixfs.workspace = true 24 | reqwest = { workspace = true, features = ["rustls-tls", "json"] } 25 | tokio.workspace = true 26 | 27 | [dev-dependencies] 28 | testdir.workspace = true 29 | tokio-test.workspace = true 30 | -------------------------------------------------------------------------------- /iroh-store/src/cli.rs: -------------------------------------------------------------------------------- 1 | use std::{collections::HashMap, path::PathBuf}; 2 | 3 | use clap::Parser; 4 | 5 | #[derive(Parser, Debug)] 6 | #[clap(author, version, about)] 7 | pub struct Args { 8 | /// Path to the store 9 | #[clap(long, short)] 10 | pub path: Option, 11 | #[clap(long = "metrics")] 12 | metrics: bool, 13 | #[clap(long = "tracing")] 14 | tracing: bool, 15 | /// Path to the config file 16 | #[clap(long)] 17 | pub cfg: Option, 18 | } 19 | 20 | impl Args { 21 | pub fn make_overrides_map(&self) -> HashMap { 22 | let mut map = HashMap::new(); 23 | if let Some(path) = self.path.clone() { 24 | map.insert("path".to_string(), path.to_str().unwrap_or("").to_string()); 25 | } 26 | map.insert("metrics.collect".to_string(), self.metrics.to_string()); 27 | map.insert("metrics.tracing".to_string(), self.tracing.to_string()); 28 | map 29 | } 30 | } 31 | -------------------------------------------------------------------------------- /iroh-rpc-types/README.md: -------------------------------------------------------------------------------- 1 | # iroh-rpc-types 2 | 3 | This crate defines the protocol for use by the 4 | [iroh-rpc-client](https://github.com/n0-computer/iroh/tree/main/iroh-rpc-client), 5 | which is used for [iroh](https://github.com/n0-computer/iroh) services to 6 | communicate internally via RPC. 7 | 8 | It defines a set of messages and traits that specify how each message is to be 9 | processed. 10 | 11 | These types can be used with any transport protocol in 12 | [quic-rpc](https://github.com/n0-computer/quic-rpc). 13 | 14 | ## License 15 | 16 | 17 | Licensed under either of Apache License, Version 18 | 2.0 or MIT license at your option. 19 | 20 | 21 |
22 | 23 | 24 | Unless you explicitly state otherwise, any contribution intentionally submitted 25 | for inclusion in this crate by you, as defined in the Apache-2.0 license, shall 26 | be dual licensed as above, without any additional terms or conditions. 27 | 28 | -------------------------------------------------------------------------------- /iroh-rpc-client/README.md: -------------------------------------------------------------------------------- 1 | # iroh-rpc-client 2 | 3 | [iroh](https://github.com/n0-computer/iroh) services internally communicate via 4 | RPC, using the [quic-rpc](https://github.com/n0-computer/quic-rpc) RPC system. 5 | 6 | TLDR: currently bincode encoded messages sent as http2 frames. 7 | 8 | These channels are meant for internal communication and are not a stable API. 9 | 10 | The types that define the RPC protocol are maintained in 11 | [iroh-rpc-types](https://github.com/n0-computer/iroh/tree/main/iroh-rpc-types). 12 | 13 | ## License 14 | 15 | 16 | Licensed under either of Apache License, Version 17 | 2.0 or MIT license at your option. 18 | 19 | 20 |
21 | 22 | 23 | Unless you explicitly state otherwise, any contribution intentionally submitted 24 | for inclusion in this crate by you, as defined in the Apache-2.0 license, shall 25 | be dual licensed as above, without any additional terms or conditions. 26 | 27 | 28 | -------------------------------------------------------------------------------- /iroh-rpc-client/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "iroh-rpc-client" 3 | authors = ["Friedel Ziegelmayer ", "ramfox"] 4 | description = "RPC type client for iroh" 5 | version.workspace = true 6 | edition.workspace = true 7 | license.workspace = true 8 | repository.workspace = true 9 | rust-version.workspace = true 10 | 11 | [dependencies] 12 | anyhow.workspace = true 13 | async-stream.workspace = true 14 | bytes.workspace = true 15 | cid.workspace = true 16 | config.workspace = true 17 | futures.workspace = true 18 | hyper.workspace = true 19 | iroh-metrics.workspace = true 20 | iroh-rpc-types.workspace = true 21 | iroh-util.workspace = true 22 | libp2p = { workspace = true, features = ["gossipsub"] } 23 | quic-rpc = { workspace = true, features = ["http2"] } 24 | serde = { workspace = true, features = ["derive"] } 25 | tokio = { workspace = true, features = ["sync"] } 26 | toml.workspace = true 27 | tracing.workspace = true 28 | 29 | [dev-dependencies] 30 | tokio-stream = { workspace = true, features = ["net"] } 31 | -------------------------------------------------------------------------------- /LICENSE-MIT: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Permission is hereby granted, free of charge, to any person obtaining a copy 4 | of this software and associated documentation files (the "Software"), to deal 5 | in the Software without restriction, including without limitation the rights 6 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 | copies of the Software, and to permit persons to whom the Software is 8 | furnished to do so, subject to the following conditions: 9 | 10 | The above copyright notice and this permission notice shall be included in 11 | all copies or substantial portions of the Software. 12 | 13 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 19 | THE SOFTWARE. -------------------------------------------------------------------------------- /iroh-api/README.md: -------------------------------------------------------------------------------- 1 | # iroh-api 2 | 3 | [![crates.io](https://img.shields.io/crates/v/iroh-api.svg?style=flat-square)](https://crates.io/crates/iroh-api) 4 | [![Released API docs](https://img.shields.io/docsrs/iroh-api?style=flat-square)](https://docs.rs/iroh-api) 5 | [![MIT/Apache-2.0 licensed](https://img.shields.io/crates/l/iroh-api?style=flat-square)](../LICENSE-MIT) 6 | [![CI](https://img.shields.io/github/workflow/status/n0-computer/iroh/Continuous%20integration?style=flat-square)](https://github.com/n0-computer/iroh/actions?query=workflow%3A%22Continuous+integration%22) 7 | 8 | This contains the API for controlling [iroh](https://github.com/n0-computer/iroh). 9 | 10 | ## License 11 | 12 | 13 | Licensed under either of Apache License, Version 14 | 2.0 or MIT license at your option. 15 | 16 | 17 |
18 | 19 | 20 | Unless you explicitly state otherwise, any contribution intentionally submitted 21 | for inclusion in this crate by you, as defined in the Apache-2.0 license, shall 22 | be dual licensed as above, without any additional terms or conditions. 23 | 24 | -------------------------------------------------------------------------------- /iroh-api/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "iroh-api" 3 | readme = "README.md" 4 | description = "Rust API for Iroh" 5 | version.workspace = true 6 | edition.workspace = true 7 | license.workspace = true 8 | rust-version.workspace = true 9 | repository.workspace = true 10 | 11 | [dependencies] 12 | anyhow.workspace = true 13 | async-stream.workspace = true 14 | async-trait.workspace = true 15 | bytes.workspace = true 16 | cid.workspace = true 17 | config.workspace = true 18 | futures.workspace = true 19 | iroh-metrics.workspace = true 20 | iroh-resolver.workspace = true 21 | iroh-rpc-client.workspace = true 22 | iroh-rpc-types.workspace = true 23 | iroh-unixfs.workspace = true 24 | iroh-util.workspace = true 25 | libp2p.workspace = true 26 | relative-path.workspace = true 27 | serde = { workspace = true, features = ["derive"] } 28 | thiserror.workspace = true 29 | tokio.workspace = true 30 | tracing.workspace = true 31 | 32 | [dev-dependencies] 33 | criterion = { workspace = true, features = ["async_tokio"] } 34 | iroh-rpc-types.workspace = true 35 | iroh-store.workspace = true 36 | tempfile.workspace = true 37 | 38 | [[bench]] 39 | name = "add" 40 | harness = false 41 | -------------------------------------------------------------------------------- /iroh-rpc-types/src/lib.rs: -------------------------------------------------------------------------------- 1 | pub mod addr; 2 | pub mod gateway; 3 | pub mod p2p; 4 | pub mod store; 5 | 6 | use std::fmt; 7 | 8 | pub use addr::Addr; 9 | 10 | use serde::{Deserialize, Serialize}; 11 | 12 | pub trait NamedService { 13 | const NAME: &'static str; 14 | } 15 | 16 | #[derive(Serialize, Deserialize, Debug)] 17 | pub struct RpcError(serde_error::Error); 18 | 19 | impl std::error::Error for RpcError {} 20 | 21 | impl fmt::Display for RpcError { 22 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 23 | fmt::Debug::fmt(self, f) 24 | } 25 | } 26 | 27 | impl From for RpcError { 28 | fn from(e: anyhow::Error) -> Self { 29 | RpcError(serde_error::Error::new(&*e)) 30 | } 31 | } 32 | 33 | pub type RpcResult = std::result::Result; 34 | 35 | #[derive(Serialize, Deserialize, Debug)] 36 | pub struct WatchRequest; 37 | 38 | #[derive(Serialize, Deserialize, Debug)] 39 | pub struct WatchResponse { 40 | pub version: String, 41 | } 42 | 43 | #[derive(Serialize, Deserialize, Debug)] 44 | pub struct VersionRequest; 45 | 46 | #[derive(Serialize, Deserialize, Debug)] 47 | pub struct VersionResponse { 48 | pub version: String, 49 | } 50 | -------------------------------------------------------------------------------- /iroh-unixfs/src/lib.rs: -------------------------------------------------------------------------------- 1 | pub mod balanced_tree; 2 | pub mod builder; 3 | pub mod chunker; 4 | pub mod codecs; 5 | pub mod content_loader; 6 | pub mod hamt; 7 | pub mod indexer; 8 | mod types; 9 | pub mod unixfs; 10 | 11 | pub use crate::types::{Block, Link, LinkRef, Links, LoadedCid, PbLinks, Source}; 12 | 13 | use std::collections::BTreeSet; 14 | 15 | use crate::codecs::Codec; 16 | use anyhow::{bail, Context as _, Result}; 17 | use cid::Cid; 18 | use libipld::{prelude::Codec as _, Ipld, IpldCodec}; 19 | 20 | /// Extract links from the given content. 21 | /// 22 | /// Links will be returned as a sorted vec 23 | pub fn parse_links(cid: &Cid, bytes: &[u8]) -> Result> { 24 | let codec = Codec::try_from(cid.codec()).context("unknown codec")?; 25 | let mut cids = BTreeSet::new(); 26 | let codec = match codec { 27 | Codec::DagCbor => IpldCodec::DagCbor, 28 | Codec::DagPb => IpldCodec::DagPb, 29 | Codec::DagJson => IpldCodec::DagJson, 30 | Codec::Raw => IpldCodec::Raw, 31 | _ => bail!("unsupported codec {:?}", codec), 32 | }; 33 | codec.references::(bytes, &mut cids)?; 34 | let links = cids.into_iter().collect(); 35 | Ok(links) 36 | } 37 | -------------------------------------------------------------------------------- /RELEASE.md: -------------------------------------------------------------------------------- 1 | # Release process 2 | 3 | 4 | ## Generating Changelog 5 | 6 | Install dependencies 7 | 8 | ```sh 9 | $ npm install -g conventional-changelog-cli 10 | $ cd iroh 11 | $ conventional-changelog -p angular 12 | ``` 13 | 14 | Add the output of that to `CHANGELOG.md`, and write a human-centric summary of changes. 15 | Update the linked output to reference the new version, which conventional-changelog doesn't know about: 16 | 17 | ```md 18 | # [](https://github.com/n0-computer/iroh/compare/v0.1.1...v) (2022-11-28) 19 | ``` 20 | becomes: 21 | ```md 22 | # [v0.1.2](https://github.com/n0-computer/iroh/compare/v0.1.1...v0.1.2) (2022-11-28) 23 | ``` 24 | 25 | ## Publishing 26 | 27 | Publishing on crates.io, bumping version & generating tags is done using [`cargo-release`](https://github.com/crate-ci/cargo-release). 28 | 29 | This requires the following permissions 30 | 31 | - on github.com/n0-computer/iroh 32 | - creating tags 33 | - pushing to `main` 34 | - on crates.io 35 | - publish access to all published crates 36 | 37 | Dry run 38 | 39 | ```sh 40 | $ cargo release 41 | ``` 42 | 43 | Actual publishing 44 | 45 | ```sh 46 | $ cargo release --execute 47 | ``` 48 | -------------------------------------------------------------------------------- /iroh-util/README.md: -------------------------------------------------------------------------------- 1 | # iroh util 2 | 3 | [![crates.io](https://img.shields.io/crates/v/iroh-util.svg?style=flat-square)](https://crates.io/crates/iroh-util) 4 | [![Released API docs](https://img.shields.io/docsrs/iroh-util?style=flat-square)](https://docs.rs/iroh-util) 5 | [![MIT/Apache-2.0 licensed](https://img.shields.io/crates/l/iroh-util?style=flat-square)](../LICENSE-MIT) 6 | [![CI](https://img.shields.io/github/workflow/status/n0-computer/iroh/Continuous%20integration?style=flat-square)](https://github.com/n0-computer/iroh/actions?query=workflow%3A%22Continuous+integration%22) 7 | 8 | Utility functions for [iroh](https://github.com/n0-computer/iroh). This 9 | provides shared functionality to be used in other iroh crates. 10 | 11 | ## License 12 | 13 | 14 | Licensed under either of Apache License, Version 15 | 2.0 or MIT license at your option. 16 | 17 | 18 |
19 | 20 | 21 | Unless you explicitly state otherwise, any contribution intentionally submitted 22 | for inclusion in this crate by you, as defined in the Apache-2.0 license, shall 23 | be dual licensed as above, without any additional terms or conditions. 24 | 25 | 26 | -------------------------------------------------------------------------------- /iroh/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "iroh" 3 | authors = ["Kasey Huizinga ", "Martijn Faassen "] 4 | description = "Command line interface for interacting with iroh." 5 | version.workspace = true 6 | edition.workspace = true 7 | license.workspace = true 8 | repository.workspace = true 9 | rust-version.workspace = true 10 | 11 | exclude = [ 12 | "tests/**", 13 | "fixtures/**", 14 | ] 15 | 16 | [dependencies] 17 | anyhow.workspace = true 18 | async-stream.workspace = true 19 | clap = { workspace = true, features = ["derive"] } 20 | config.workspace = true 21 | console.workspace = true 22 | crossterm.workspace = true 23 | futures.workspace = true 24 | git-version.workspace = true 25 | indicatif.workspace = true 26 | iroh-api.workspace = true 27 | iroh-localops.workspace = true 28 | iroh-metrics.workspace = true 29 | iroh-rpc-client.workspace = true 30 | iroh-util.workspace = true 31 | iroh-unixfs.workspace = true 32 | relative-path = { workspace = true, optional = true } 33 | serde = { workspace = true, features = ["derive"] } 34 | sysinfo.workspace = true 35 | tokio = { workspace = true, features = ["fs", "io-util"] } 36 | tracing.workspace = true 37 | which.workspace = true 38 | -------------------------------------------------------------------------------- /iroh-one/src/mem_store.rs: -------------------------------------------------------------------------------- 1 | /// A store instance listening on a memory rpc channel. 2 | use anyhow::Context; 3 | use iroh_rpc_types::store::StoreAddr; 4 | use iroh_store::{rpc, Config, Store}; 5 | use tokio::task::JoinHandle; 6 | use tracing::info; 7 | 8 | /// Starts a new store, using the given mem rpc channel. 9 | pub async fn start(rpc_addr: StoreAddr, config: Config) -> anyhow::Result> { 10 | // This is the file RocksDB itself is looking for to determine if the database already 11 | // exists or not. Just knowing the directory exists does not mean the database is 12 | // created. 13 | let marker = config.path.join("CURRENT"); 14 | 15 | let store = if marker.exists() { 16 | info!("Opening store at {}", config.path.display()); 17 | Store::open(config) 18 | .await 19 | .context("failed to open existing store")? 20 | } else { 21 | info!("Creating store at {}", config.path.display()); 22 | Store::create(config) 23 | .await 24 | .context("failed to create new store")? 25 | }; 26 | 27 | let rpc_task = tokio::spawn(async move { rpc::new(rpc_addr, store).await.unwrap() }); 28 | 29 | Ok(rpc_task) 30 | } 31 | -------------------------------------------------------------------------------- /iroh-rpc-types/src/gateway.rs: -------------------------------------------------------------------------------- 1 | use derive_more::{From, TryInto}; 2 | use quic_rpc::{ 3 | message::{Msg, RpcMsg, ServerStreaming}, 4 | Service, 5 | }; 6 | use serde::{Deserialize, Serialize}; 7 | 8 | use crate::{RpcResult, VersionRequest, VersionResponse, WatchRequest, WatchResponse}; 9 | 10 | /// Gateway address 11 | pub type GatewayAddr = crate::addr::Addr; 12 | 13 | #[derive(Serialize, Deserialize, Debug, From, TryInto)] 14 | pub enum GatewayRequest { 15 | Watch(WatchRequest), 16 | Version(VersionRequest), 17 | } 18 | 19 | #[derive(Serialize, Deserialize, Debug, From, TryInto)] 20 | pub enum GatewayResponse { 21 | Watch(WatchResponse), 22 | Version(VersionResponse), 23 | UnitResult(RpcResult<()>), 24 | } 25 | 26 | #[derive(Debug, Clone, Copy)] 27 | pub struct GatewayService; 28 | 29 | impl Service for GatewayService { 30 | type Req = GatewayRequest; 31 | type Res = GatewayResponse; 32 | } 33 | 34 | impl RpcMsg for VersionRequest { 35 | type Response = VersionResponse; 36 | } 37 | 38 | impl Msg for WatchRequest { 39 | type Response = WatchResponse; 40 | 41 | type Update = Self; 42 | 43 | type Pattern = ServerStreaming; 44 | } 45 | -------------------------------------------------------------------------------- /iroh-bitswap/src/client/session/cid_queue.rs: -------------------------------------------------------------------------------- 1 | use std::collections::VecDeque; 2 | 3 | use ahash::AHashSet; 4 | use cid::Cid; 5 | 6 | #[derive(Default, Debug)] 7 | pub struct CidQueue { 8 | elements: VecDeque, 9 | set: AHashSet, 10 | } 11 | 12 | impl CidQueue { 13 | pub fn pop(&mut self) -> Option { 14 | while let Some(el) = self.elements.pop_front() { 15 | if self.set.contains(&el) { 16 | return Some(el); 17 | } 18 | } 19 | 20 | None 21 | } 22 | 23 | #[allow(dead_code)] 24 | pub fn cids(&mut self) -> Vec { 25 | // Lazily deletes cids removed from the set. 26 | self.elements.retain(|el| self.set.contains(el)); 27 | 28 | self.elements.iter().copied().collect() 29 | } 30 | 31 | pub fn push(&mut self, cid: Cid) { 32 | if self.set.insert(cid) { 33 | self.elements.push_back(cid); 34 | } 35 | } 36 | 37 | pub fn remove(&mut self, cid: &Cid) { 38 | self.set.remove(cid); 39 | } 40 | 41 | pub fn has(&self, cid: &Cid) -> bool { 42 | self.set.contains(cid) 43 | } 44 | 45 | pub fn len(&self) -> usize { 46 | self.set.len() 47 | } 48 | } 49 | -------------------------------------------------------------------------------- /iroh-share/README.md: -------------------------------------------------------------------------------- 1 | # iroh share 2 | 3 | [![crates.io](https://img.shields.io/crates/v/iroh-share.svg?style=flat-square)](https://crates.io/crates/iroh-share) 4 | [![Released API docs](https://img.shields.io/docsrs/iroh-share?style=flat-square)](https://docs.rs/iroh-share) 5 | [![MIT/Apache-2.0 licensed](https://img.shields.io/crates/l/iroh-share?style=flat-square)](../LICENSE-MIT) 6 | [![CI](https://img.shields.io/github/workflow/status/n0-computer/iroh/Continuous%20integration?style=flat-square)](https://github.com/n0-computer/iroh/actions?query=workflow%3A%22Continuous+integration%22) 7 | 8 | This provides an application that lets you easily share files accross devices 9 | using [iroh](https://github.com/n0-computer/iroh) and 10 | [IPFS](https://ipfs.tech/). 11 | 12 | ## License 13 | 14 | 15 | Licensed under either of Apache License, Version 16 | 2.0 or MIT license at your option. 17 | 18 | 19 |
20 | 21 | 22 | Unless you explicitly state otherwise, any contribution intentionally submitted 23 | for inclusion in this crate by you, as defined in the Apache-2.0 license, shall 24 | be dual licensed as above, without any additional terms or conditions. 25 | 26 | 27 | -------------------------------------------------------------------------------- /iroh-embed/README.md: -------------------------------------------------------------------------------- 1 | # iroh embed 2 | 3 | [![crates.io](https://img.shields.io/crates/v/iroh-embed.svg?style=flat-square)](https://crates.io/crates/iroh-embed) 4 | [![Released API docs](https://img.shields.io/docsrs/iroh-embed?style=flat-square)](https://docs.rs/iroh-embed) 5 | [![MIT/Apache-2.0 licensed](https://img.shields.io/crates/l/iroh-embed?style=flat-square)](../LICENSE-MIT) 6 | [![CI](https://img.shields.io/github/workflow/status/n0-computer/iroh/Continuous%20integration?style=flat-square)](https://github.com/n0-computer/iroh/actions?query=workflow%3A%22Continuous+integration%22) 7 | 8 | This provides an API to embed 9 | [iroh](https://github.com/n0-computer/iroh) into other applications, 10 | allowing them to interact directly with [IPFS](https://ipfs.tech/). 11 | 12 | ## License 13 | 14 | 15 | Licensed under either of Apache License, Version 16 | 2.0 or MIT license at your option. 17 | 18 | 19 |
20 | 21 | 22 | Unless you explicitly state otherwise, any contribution intentionally submitted 23 | for inclusion in this crate by you, as defined in the Apache-2.0 license, shall 24 | be dual licensed as above, without any additional terms or conditions. 25 | 26 | 27 | -------------------------------------------------------------------------------- /iroh-share/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "iroh-share" 3 | authors = ["dignifiedquire "] 4 | description = "Sharing files with iroh" 5 | version.workspace = true 6 | edition.workspace = true 7 | license.workspace = true 8 | repository.workspace = true 9 | rust-version.workspace = true 10 | 11 | [dependencies] 12 | anyhow.workspace = true 13 | async-trait.workspace = true 14 | bincode.workspace = true 15 | bytes.workspace = true 16 | cid = { workspace = true, features = ["serde-codec"] } 17 | clap = { workspace = true, features = ["derive"] } 18 | futures.workspace = true 19 | iroh-metrics.workspace = true 20 | iroh-p2p.workspace = true 21 | iroh-resolver.workspace = true 22 | iroh-rpc-client.workspace = true 23 | iroh-rpc-types.workspace = true 24 | iroh-store.workspace = true 25 | iroh-unixfs.workspace = true 26 | iroh-util.workspace = true 27 | libp2p = { workspace = true, features = ["gossipsub"] } 28 | multibase.workspace = true 29 | rand.workspace = true 30 | serde = { workspace = true, features = ["derive"] } 31 | tempfile.workspace = true 32 | tokio.workspace = true 33 | tokio-stream.workspace = true 34 | tracing.workspace = true 35 | tracing-subscriber.workspace = true 36 | 37 | [dev-dependencies] 38 | tokio = { workspace = true, features = ["full"] } 39 | -------------------------------------------------------------------------------- /iroh/README.md: -------------------------------------------------------------------------------- 1 | # iroh 2 | 3 | [![crates.io](https://img.shields.io/crates/v/iroh.svg?style=flat-square)](https://crates.io/crates/iroh) 4 | [![Released API docs](https://img.shields.io/docsrs/iroh?style=flat-square)](https://docs.rs/iroh) 5 | [![MIT/Apache-2.0 licensed](https://img.shields.io/crates/l/iroh?style=flat-square)](../LICENSE-MIT) 6 | [![CI](https://img.shields.io/github/workflow/status/n0-computer/iroh/Continuous%20integration?style=flat-square)](https://github.com/n0-computer/iroh/actions?query=workflow%3A%22Continuous+integration%22) 7 | 8 | This contains the implementation of a command-line tool for controlling 9 | [iroh](https://github.com/n0-computer/iroh). 10 | 11 | ## usage 12 | 13 | ``` 14 | // Track the status of your different iroh processes 15 | $ iroh-ctl status --watch 16 | ``` 17 | 18 | ## License 19 | 20 | 21 | Licensed under either of Apache License, Version 22 | 2.0 or MIT license at your option. 23 | 24 | 25 |
26 | 27 | 28 | Unless you explicitly state otherwise, any contribution intentionally submitted 29 | for inclusion in this crate by you, as defined in the Apache-2.0 license, shall 30 | be dual licensed as above, without any additional terms or conditions. 31 | 32 | -------------------------------------------------------------------------------- /Dockerfile-ci: -------------------------------------------------------------------------------- 1 | # Dockerfile for CircleCI 2 | # build with 3 | # `docker build -t dignifiedquire/iroh-ci:latest -f ./Dockerfile-ci .` 4 | # rebuild: `docker build --pull --no-cache -t dignifiedquire/iroh-ci:latest -f ./Dockerfile-ci .` 5 | 6 | FROM cimg/rust:1.62.0 7 | 8 | # Some of the dependencies I need to build a few libraries, 9 | # personalize to your needs. You can use multi-stage builds 10 | # to produce a lightweight image. 11 | RUN sudo apt-get update && \ 12 | sudo apt-get install -y \ 13 | cmake pkg-config libssl-dev git gcc build-essential git clang libclang-dev \ 14 | make curl openssh-client \ 15 | autoconf automake cmake libtool libcurl4-openssl-dev libssl-dev \ 16 | libelf-dev libdw-dev binutils-dev zlib1g-dev libiberty-dev wget \ 17 | xz-utils pkg-config python 18 | 19 | RUN curl https://sh.rustup.rs -sSf | sh -s -- -y 20 | 21 | ENV PATH "$PATH:/root/.cargo/bin" 22 | ENV RUSTFLAGS "-C link-dead-code" 23 | 24 | # set CROSS_DOCKER_IN_DOCKER to inform `cross` that it is executed from within a container 25 | ENV CROSS_DOCKER_IN_DOCKER=true 26 | # install `cross` 27 | RUN cargo install cross 28 | 29 | RUN sudo bash -l -c 'echo $(rustc --print sysroot)/lib >> /etc/ld.so.conf' 30 | RUN sudo bash -l -c 'echo /usr/local/lib >> /etc/ld.so.conf' 31 | RUN sudo ldconfig 32 | -------------------------------------------------------------------------------- /iroh-one/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "iroh-one" 3 | readme = "README.md" 4 | description = "all of iroh in a single binary" 5 | edition.workspace = true 6 | license.workspace = true 7 | repository.workspace = true 8 | version.workspace = true 9 | rust-version.workspace = true 10 | 11 | [dependencies] 12 | anyhow.workspace = true 13 | async-trait.workspace = true 14 | axum.workspace = true 15 | bytes.workspace = true 16 | cid.workspace = true 17 | clap = { workspace = true, features = ["derive"] } 18 | config.workspace = true 19 | futures.workspace = true 20 | headers.workspace = true 21 | http-serde.workspace = true 22 | hyper.workspace = true 23 | iroh-gateway.workspace = true 24 | iroh-metrics.workspace = true 25 | iroh-p2p.workspace = true 26 | iroh-resolver.workspace = true 27 | iroh-unixfs.workspace = true 28 | iroh-rpc-client.workspace = true 29 | iroh-rpc-types.workspace = true 30 | iroh-store.workspace = true 31 | iroh-util.workspace = true 32 | reqwest = { workspace = true, features = ["rustls-tls"] } 33 | serde = { workspace = true, features = ["derive"] } 34 | tempfile = { workspace = true, optional = true } 35 | tokio = { workspace = true, features = ["macros", "rt-multi-thread", "process"] } 36 | tracing.workspace = true 37 | 38 | [dev-dependencies] 39 | http.workspace = true 40 | 41 | [features] 42 | http-uds-gateway = ["tempfile"] 43 | -------------------------------------------------------------------------------- /iroh-store/README.md: -------------------------------------------------------------------------------- 1 | # iroh store 2 | 3 | [![crates.io](https://img.shields.io/crates/v/iroh-store.svg?style=flat-square)](https://crates.io/crates/iroh-store) 4 | [![Released API docs](https://img.shields.io/docsrs/iroh-store?style=flat-square)](https://docs.rs/iroh-store) 5 | [![MIT/Apache-2.0 licensed](https://img.shields.io/crates/l/iroh-store?style=flat-square)](../LICENSE-MIT) 6 | [![CI](https://img.shields.io/github/workflow/status/n0-computer/iroh/Continuous%20integration?style=flat-square)](https://github.com/n0-computer/iroh/actions?query=workflow%3A%22Continuous+integration%22) 7 | 8 | Storage for [iroh](https://github.com/n0-computer/iroh). This provides an gRPC 9 | API for storing IPFS data in a [RocksDB database](http://rocksdb.org/). 10 | 11 | ## How to run 12 | 13 | ```sh 14 | # From the root of the workspace 15 | > cargo run --release -p iroh-store 16 | ``` 17 | 18 | ## License 19 | 20 | 21 | Licensed under either of Apache License, Version 22 | 2.0 or MIT license at your option. 23 | 24 | 25 |
26 | 27 | 28 | Unless you explicitly state otherwise, any contribution intentionally submitted 29 | for inclusion in this crate by you, as defined in the Apache-2.0 license, shall 30 | be dual licensed as above, without any additional terms or conditions. 31 | 32 | 33 | -------------------------------------------------------------------------------- /iroh-bitswap/src/server/ledger.rs: -------------------------------------------------------------------------------- 1 | use cid::Cid; 2 | use libp2p::PeerId; 3 | 4 | use crate::{ 5 | client::wantlist::{Entry, Wantlist}, 6 | message::{Priority, WantType}, 7 | }; 8 | 9 | /// Tracks the wantlist for a given partner 10 | #[derive(Debug)] 11 | pub struct Ledger { 12 | /// The remote peer. 13 | partner: PeerId, 14 | wantlist: Wantlist, 15 | } 16 | 17 | impl Ledger { 18 | pub fn new(partner: PeerId) -> Self { 19 | Ledger { 20 | partner, 21 | wantlist: Wantlist::default(), 22 | } 23 | } 24 | 25 | pub fn wantlist_mut(&mut self) -> &mut Wantlist { 26 | &mut self.wantlist 27 | } 28 | 29 | pub fn partner(&self) -> &PeerId { 30 | &self.partner 31 | } 32 | 33 | pub fn clear_wantlist(&mut self) { 34 | self.wantlist.clear(); 35 | } 36 | 37 | pub fn wants(&mut self, cid: Cid, priority: Priority, want_type: WantType) { 38 | self.wantlist.add(cid, priority, want_type); 39 | } 40 | 41 | pub fn cancel_want(&mut self, cid: &Cid) -> Option { 42 | self.wantlist.remove(cid) 43 | } 44 | 45 | pub fn wantlist_get(&self, cid: &Cid) -> Option<&Entry> { 46 | self.wantlist.get(cid) 47 | } 48 | 49 | pub fn entries(&mut self) -> impl Iterator + '_ { 50 | self.wantlist.entries() 51 | } 52 | } 53 | -------------------------------------------------------------------------------- /iroh-gateway/src/templates.rs: -------------------------------------------------------------------------------- 1 | use phf::{phf_set, Set}; 2 | use std::{ffi::OsStr, path::Path}; 3 | 4 | pub const DIR_LIST_TEMPLATE: &str = include_str!("../assets/dir_list.html"); 5 | pub const NOT_FOUND_TEMPLATE: &str = include_str!("../assets/404.html"); 6 | pub const STYLESHEET: &str = include_str!("../assets/style.css"); 7 | pub const ICONS_STYLESHEET: &str = include_str!("../assets/icons.css"); 8 | 9 | static KNOWN_ICONS: Set<&'static str> = phf_set! { 10 | ".aac", ".aiff", ".ai", ".avi", ".bmp", ".c", ".cpp", ".css", ".dat", ".dmg", ".doc", 11 | ".dotx", ".dwg", ".dxf", ".eps", ".exe", ".flv", ".gif", ".h", ".hpp", ".html", ".ics", 12 | ".iso", ".java", ".jpg", ".jpeg", ".js", ".key", ".less", ".mid", ".mkv", ".mov", ".mp3", 13 | ".mp4", ".mpg", ".odf", ".ods", ".odt", ".otp", ".ots", ".ott", ".pdf", ".php", ".png", 14 | ".ppt", ".psd", ".py", ".qt", ".rar", ".rb", ".rtf", ".sass", ".scss", ".sql", ".tga", 15 | ".tgz", ".tiff", ".txt", ".wav", ".wmv", ".xls", ".xlsx", ".xml", ".yml", ".zip", 16 | }; 17 | 18 | pub fn icon_class_name(path: &str) -> String { 19 | let ext = Path::new(path) 20 | .extension() 21 | .and_then(OsStr::to_str) 22 | .unwrap_or(""); 23 | let icon = if KNOWN_ICONS.contains(ext) { 24 | ext 25 | } else { 26 | "_blank" 27 | }; 28 | format!("icon-{icon}") 29 | } 30 | -------------------------------------------------------------------------------- /iroh-p2p/README.md: -------------------------------------------------------------------------------- 1 | # iroh-p2p 2 | 3 | [![crates.io](https://img.shields.io/crates/v/iroh-p2p.svg?style=flat-square)](https://crates.io/crates/iroh-p2p) 4 | [![Released API docs](https://img.shields.io/docsrs/iroh-p2p?style=flat-square)](https://docs.rs/iroh-p2p) 5 | [![MIT/Apache-2.0 licensed](https://img.shields.io/crates/l/iroh-p2p?style=flat-square)](../LICENSE-MIT) 6 | [![CI](https://img.shields.io/github/workflow/status/n0-computer/iroh/Continuous%20integration?style=flat-square)](https://github.com/n0-computer/iroh/actions?query=workflow%3A%22Continuous+integration%22) 7 | 8 | 9 | P2P networking for [iroh](https://github.com/n0-computer/iroh). This implements 10 | an [IPFS node](https://docs.ipfs.tech/concepts/nodes/). The IPFS network 11 | consists of a collection of nodes. 12 | 13 | ## How to run 14 | 15 | ```sh 16 | # From the root of the workspace 17 | > cargo run --release -p iroh-p2p 18 | ``` 19 | 20 | ## License 21 | 22 | 23 | Licensed under either of Apache License, Version 24 | 2.0 or MIT license at your option. 25 | 26 | 27 |
28 | 29 | 30 | Unless you explicitly state otherwise, any contribution intentionally submitted 31 | for inclusion in this crate by you, as defined in the Apache-2.0 license, shall 32 | be dual licensed as above, without any additional terms or conditions. 33 | 34 | 35 | -------------------------------------------------------------------------------- /iroh-bitswap/README.md: -------------------------------------------------------------------------------- 1 | # iroh bitswap 2 | 3 | [![crates.io](https://img.shields.io/crates/v/iroh-bitswap.svg?style=flat-square)](https://crates.io/crates/iroh-bitswap) 4 | [![Released API docs](https://img.shields.io/docsrs/iroh-bitswap?style=flat-square)](https://docs.rs/iroh-bitswap) 5 | [![MIT/Apache-2.0 licensed](https://img.shields.io/crates/l/iroh-bitswap?style=flat-square)](../LICENSE-MIT) 6 | [![CI](https://img.shields.io/github/workflow/status/n0-computer/iroh/Continuous%20integration?style=flat-square)](https://github.com/n0-computer/iroh/actions?query=workflow%3A%22Continuous+integration%22) 7 | 8 | This contains an implementation of the [IPFS bitswap 9 | protocol](https://docs.ipfs.tech/concepts/bitswap/). It sends blocks of data to 10 | other peers in the IPFS network who want them, and receives blocks requested by 11 | the client from the network. 12 | 13 | It is part of [iroh](https://github.com/n0-computer/iroh). 14 | 15 | ## License 16 | 17 | 18 | Licensed under either of Apache License, Version 19 | 2.0 or MIT license at your option. 20 | 21 | 22 |
23 | 24 | 25 | Unless you explicitly state otherwise, any contribution intentionally submitted 26 | for inclusion in this crate by you, as defined in the Apache-2.0 license, shall 27 | be dual licensed as above, without any additional terms or conditions. 28 | 29 | -------------------------------------------------------------------------------- /iroh-localops/README.md: -------------------------------------------------------------------------------- 1 | # iroh localops 2 | 3 | [![crates.io](https://img.shields.io/crates/v/iroh-localops.svg?style=flat-square)](https://crates.io/crates/iroh-localops) 4 | [![Released API docs](https://img.shields.io/docsrs/iroh-localops?style=flat-square)](https://docs.rs/iroh-localops) 5 | [![MIT/Apache-2.0 licensed](https://img.shields.io/crates/l/iroh-localops?style=flat-square)](../LICENSE-MIT) 6 | [![CI](https://img.shields.io/github/workflow/status/n0-computer/iroh/Continuous%20integration?style=flat-square)](https://github.com/n0-computer/iroh/actions?query=workflow%3A%22Continuous+integration%22) 7 | 8 | Think "devops on localhost". This crate contains iroh-specific tools for starting & stopping processes in a cross platform way. 9 | 10 | This crate targets three operating systems via [conditional compilation](https://doc.rust-lang.org/reference/conditional-compilation.html): 11 | * `macos` 12 | * `linux` 13 | * `windows` 14 | 15 | ## License 16 | 17 | 18 | Licensed under either of Apache License, Version 19 | 2.0 or MIT license at your option. 20 | 21 | 22 |
23 | 24 | 25 | Unless you explicitly state otherwise, any contribution intentionally submitted 26 | for inclusion in this crate by you, as defined in the Apache-2.0 license, shall 27 | be dual licensed as above, without any additional terms or conditions. 28 | 29 | -------------------------------------------------------------------------------- /iroh-bitswap/src/bitswap_pb.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package bitswap_pb; 4 | 5 | message Message { 6 | message Wantlist { 7 | enum WantType { 8 | Block = 0; 9 | Have = 1; 10 | } 11 | 12 | message Entry { 13 | bytes block = 1; // the block cid (cidV0 in bitswap 1.0.0, cidV1 in bitswap 1.1.0) 14 | int32 priority = 2; // the priority (normalized). default to 1 15 | bool cancel = 3; // whether this revokes an entry 16 | WantType wantType = 4; // Note: defaults to enum 0, ie Block 17 | bool sendDontHave = 5; // Note: defaults to false 18 | } 19 | 20 | repeated Entry entries = 1; // a list of wantlist entries 21 | bool full = 2; // whether this is the full wantlist. default to false 22 | } 23 | 24 | message Block { 25 | bytes prefix = 1; // CID prefix (cid version, multicodec and multihash prefix (type + length) 26 | bytes data = 2; 27 | } 28 | 29 | enum BlockPresenceType { 30 | Have = 0; 31 | DontHave = 1; 32 | } 33 | 34 | message BlockPresence { 35 | bytes cid = 1; 36 | BlockPresenceType type = 2; 37 | } 38 | 39 | Wantlist wantlist = 1; 40 | repeated bytes blocks = 2; // used to send Blocks in bitswap 1.0.0 41 | repeated Block payload = 3; // used to send Blocks in bitswap 1.1.0 42 | repeated BlockPresence blockPresences = 4; 43 | int32 pendingBytes = 5; 44 | } -------------------------------------------------------------------------------- /iroh-store/src/cf.rs: -------------------------------------------------------------------------------- 1 | use bytecheck::CheckBytes; 2 | use rkyv::{with::AsBox, Archive, Deserialize, Serialize}; 3 | 4 | /// Column family to store actual data. 5 | /// - Maps id (u64) to bytes 6 | pub const CF_BLOBS_V0: &str = "blobs-v0"; 7 | /// Column family that stores metdata about a given blob. 8 | /// - indexed by id (u64) 9 | pub const CF_METADATA_V0: &str = "metadata-v0"; 10 | /// Column familty that stores the graph for a blob 11 | /// - indexed by id (u64) 12 | pub const CF_GRAPH_V0: &str = "graph-v0"; 13 | /// Column family that stores the mapping (multihash, code) to id. 14 | /// 15 | /// By storing multihash first we can search for ids either by cid = (multihash, code) or by multihash. 16 | pub const CF_ID_V0: &str = "id-v0"; 17 | 18 | // This wrapper type serializes the contained value out-of-line so that newer 19 | // versions can be viewed as the older version. 20 | #[derive(Debug, Archive, Deserialize, Serialize)] 21 | #[repr(transparent)] 22 | #[archive_attr(repr(transparent), derive(CheckBytes))] 23 | pub struct Versioned(#[with(AsBox)] pub T); 24 | 25 | #[derive(Debug, Archive, Deserialize, Serialize)] 26 | #[repr(C)] 27 | #[archive_attr(repr(C), derive(CheckBytes))] 28 | pub struct MetadataV0 { 29 | /// The codec of the original CID. 30 | pub codec: u64, 31 | pub multihash: Vec, 32 | } 33 | 34 | #[derive(Debug, Archive, Deserialize, Serialize)] 35 | #[repr(C)] 36 | #[archive_attr(repr(C), derive(CheckBytes))] 37 | pub struct GraphV0 { 38 | pub children: Vec, 39 | } 40 | -------------------------------------------------------------------------------- /iroh-resolver/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "iroh-resolver" 3 | authors = ["dignifiedquire "] 4 | description = "Implementation of path resolution for iroh" 5 | version.workspace = true 6 | edition.workspace = true 7 | license.workspace = true 8 | repository.workspace = true 9 | rust-version.workspace = true 10 | 11 | exclude = [ 12 | "tests/**", 13 | "fixtures/**", 14 | ] 15 | 16 | [dependencies] 17 | anyhow.workspace = true 18 | async-channel.workspace = true 19 | async-stream.workspace = true 20 | async-trait.workspace = true 21 | bs58.workspace = true 22 | bytes.workspace = true 23 | cid.workspace = true 24 | futures.workspace = true 25 | iroh-metrics = { workspace = true, features = ["resolver", "gateway"] } 26 | iroh-rpc-client.workspace = true 27 | iroh-util.workspace = true 28 | iroh-unixfs.workspace = true 29 | libipld.workspace = true 30 | libp2p.workspace = true 31 | serde = { workspace = true, features = ["derive"] } 32 | tokio = { workspace = true, features = ["fs"] } 33 | tracing.workspace = true 34 | trust-dns-resolver = { workspace = true, features = ["dns-over-https-rustls", "serde-config", "tokio-runtime"] } 35 | fnv.workspace = true 36 | 37 | [dev-dependencies] 38 | iroh-car.workspace = true 39 | iroh-rpc-types.workspace = true 40 | iroh-store.workspace = true 41 | proptest.workspace = true 42 | rand.workspace = true 43 | async-recursion.workspace = true 44 | rand_chacha.workspace = true 45 | tokio = { workspace = true, features = ["rt", "macros", "rt-multi-thread", "fs"] } 46 | ruzstd.workspace = true 47 | -------------------------------------------------------------------------------- /iroh-metrics/README.md: -------------------------------------------------------------------------------- 1 | # iroh metrics 2 | 3 | [![crates.io](https://img.shields.io/crates/v/iroh-metrics.svg?style=flat-square)](https://crates.io/crates/iroh-metrics) 4 | [![Released API docs](https://img.shields.io/docsrs/iroh-metrics?style=flat-square)](https://docs.rs/iroh-metrics) 5 | [![MIT/Apache-2.0 licensed](https://img.shields.io/crates/l/iroh-metrics?style=flat-square)](../LICENSE-MIT) 6 | [![CI](https://img.shields.io/github/workflow/status/n0-computer/iroh/Continuous%20integration?style=flat-square)](https://github.com/n0-computer/iroh/actions?query=workflow%3A%22Continuous+integration%22) 7 | 8 | 9 | The metrics collection interface for [iroh](https://github.com/n0-computer/iroh) services. 10 | 11 | ## ENV Variables 12 | 13 | - `IROH_METRICS_DEBUG` - redirects traces to stdout if the flag is set to `true` (default: ``) 14 | - `IROH_METRICS_COLLECTOR_ENDPOINT` - endpoint where traces will be routed (default: `http://localhost:4317`) 15 | - `IROH_METRICS_PROM_GATEWAY_ENDPOINT` - endpoint where prometheus metrics will be pushed (default: `http://localhost:9091`) 16 | 17 | ## License 18 | 19 | 20 | Licensed under either of Apache License, Version 21 | 2.0 or MIT license at your option. 22 | 23 | 24 |
25 | 26 | 27 | Unless you explicitly state otherwise, any contribution intentionally submitted 28 | for inclusion in this crate by you, as defined in the Apache-2.0 license, shall 29 | be dual licensed as above, without any additional terms or conditions. 30 | 31 | -------------------------------------------------------------------------------- /iroh-car/README.md: -------------------------------------------------------------------------------- 1 | # iroh-car 2 | 3 | [![crates.io](https://img.shields.io/crates/v/iroh-car.svg?style=flat-square)](https://crates.io/crates/iroh-car) 4 | [![Released API docs](https://img.shields.io/docsrs/iroh-car?style=flat-square)](https://docs.rs/iroh-car) 5 | [![MIT/Apache-2.0 licensed](https://img.shields.io/crates/l/iroh-car?style=flat-square)](../LICENSE-MIT) 6 | [![CI](https://img.shields.io/github/workflow/status/n0-computer/iroh/Continuous%20integration?style=flat-square)](https://github.com/n0-computer/iroh/actions?query=workflow%3A%22Continuous+integration%22) 7 | 8 | [CAR file](https://ipld.io/specs/transport/car/) support for iroh. "CAR" stands 9 | for Content Addressable aRchives. A CAR file typically contains a serialized 10 | representation of an [IPLD 11 | DAG](https://docs.ipfs.tech/concepts/merkle-dag/#merkle-directed-acyclic-graphs-dags), 12 | though is general enough to contain arbitrary IPLD blocks. 13 | 14 | Currently supports only [v1](https://ipld.io/specs/transport/car/carv1/). 15 | 16 | It is part of [iroh](https://github.com/n0-computer/iroh). 17 | 18 | ## License 19 | 20 | 21 | Licensed under either of Apache License, Version 22 | 2.0 or MIT license at your option. 23 | 24 | 25 |
26 | 27 | 28 | Unless you explicitly state otherwise, any contribution intentionally submitted 29 | for inclusion in this crate by you, as defined in the Apache-2.0 license, shall 30 | be dual licensed as above, without any additional terms or conditions. 31 | 32 | 33 | -------------------------------------------------------------------------------- /iroh-gateway/src/cli.rs: -------------------------------------------------------------------------------- 1 | /// CLI arguments support. 2 | use clap::Parser; 3 | use std::collections::HashMap; 4 | use std::path::PathBuf; 5 | 6 | #[derive(Parser, Debug, Clone)] 7 | #[clap(author, version, about, long_about = None)] 8 | pub struct Args { 9 | #[clap(short, long)] 10 | port: Option, 11 | #[clap(short, long)] 12 | writeable: Option, 13 | #[clap(short, long)] 14 | fetch: Option, 15 | #[clap(short, long)] 16 | cache: Option, 17 | #[clap(long)] 18 | metrics: bool, 19 | #[clap(long)] 20 | tracing: bool, 21 | #[clap(long)] 22 | pub cfg: Option, 23 | #[clap(long)] 24 | use_denylist: bool, 25 | } 26 | 27 | impl Args { 28 | pub fn make_overrides_map(&self) -> HashMap<&str, String> { 29 | let mut map: HashMap<&str, String> = HashMap::new(); 30 | if let Some(port) = self.port { 31 | map.insert("port", port.to_string()); 32 | } 33 | if let Some(writable) = self.writeable { 34 | map.insert("writable", writable.to_string()); 35 | } 36 | if let Some(fetch) = self.fetch { 37 | map.insert("fetch", fetch.to_string()); 38 | } 39 | if let Some(cache) = self.cache { 40 | map.insert("cache", cache.to_string()); 41 | } 42 | map.insert("use_denylist", self.use_denylist.to_string()); 43 | map.insert("metrics.collect", self.metrics.to_string()); 44 | map.insert("metrics.tracing", self.tracing.to_string()); 45 | map 46 | } 47 | } 48 | -------------------------------------------------------------------------------- /docker/Dockerfile.iroh-gateway: -------------------------------------------------------------------------------- 1 | ################################################################################ 2 | ## Builder 3 | ################################################################################ 4 | FROM rust:latest AS builder 5 | 6 | RUN update-ca-certificates 7 | 8 | # install latest protocol buffer compiler. 9 | ARG TARGETPLATFORM 10 | COPY ../docker/install_protoc.sh . 11 | RUN ./install_protoc.sh 12 | 13 | # set build env vars 14 | ENV RUST_BACKTRACE=1 \ 15 | PROTOC=/usr/local/bin/protoc \ 16 | PROTOC_INCLUDE=/usr/local/include 17 | 18 | # has the side effect of updating the crates.io index & installing rust toolchain 19 | # called in a separate step for nicer caching. the command itself will fail, 20 | # b/c empty-library is not a dependency, so we override with an exit code 0 21 | RUN cargo install empty-library; exit 0 22 | 23 | WORKDIR /iroh 24 | 25 | COPY ../ . 26 | 27 | RUN cargo build --bin iroh-gateway --profile=docker 28 | 29 | ################################################################################ 30 | ## Final image 31 | ################################################################################ 32 | FROM gcr.io/distroless/cc 33 | 34 | WORKDIR /iroh 35 | 36 | # Copy our build, changing owndership to distroless-provided "nonroot" user, 37 | # (65532:65532) 38 | COPY --from=builder --chown=65532:65532 /iroh/target/docker/iroh-gateway ./ 39 | 40 | # Use nonroot (unprivileged) user 41 | USER nonroot 42 | 43 | # expose the default RPC port and default gateway HTTP port 44 | EXPOSE 4400 9050 45 | 46 | CMD ["/iroh/iroh-gateway"] -------------------------------------------------------------------------------- /iroh-resolver/README.md: -------------------------------------------------------------------------------- 1 | # iroh resolver 2 | 3 | [![crates.io](https://img.shields.io/crates/v/iroh-resolver.svg?style=flat-square)](https://crates.io/crates/iroh-resolver) 4 | [![Released API docs](https://img.shields.io/docsrs/iroh-resolver?style=flat-square)](https://docs.rs/iroh-resolver) 5 | [![MIT/Apache-2.0 licensed](https://img.shields.io/crates/l/iroh-resolver?style=flat-square)](../LICENSE-MIT) 6 | [![CI](https://img.shields.io/github/workflow/status/n0-computer/iroh/Continuous%20integration?style=flat-square)](https://github.com/n0-computer/iroh/actions?query=workflow%3A%22Continuous+integration%22) 7 | 8 | Resolver for [iroh](https://github.com/n0-computer/iroh). It retrieves data 9 | associated with an IPFS CID from the [iroh 10 | store](https://github.com/n0-computer/iroh/tree/main/iroh-store), or if not 11 | available, uses [iroh 12 | p2p](https://github.com/n0-computer/iroh/tree/main/iroh-p2p) to retrieve it 13 | from the IPFS network. 14 | 15 | This crate also provides a way to take a directory of files, or a single file, 16 | and chunk it into smaller parts that can be stored, and assemble them back 17 | together again. 18 | 19 | ## License 20 | 21 | 22 | Licensed under either of Apache License, Version 23 | 2.0 or MIT license at your option. 24 | 25 | 26 |
27 | 28 | 29 | Unless you explicitly state otherwise, any contribution intentionally submitted 30 | for inclusion in this crate by you, as defined in the Apache-2.0 license, shall 31 | be dual licensed as above, without any additional terms or conditions. 32 | 33 | 34 | -------------------------------------------------------------------------------- /docker/Dockerfile.iroh-store: -------------------------------------------------------------------------------- 1 | ################################################################################ 2 | ## Builder 3 | ################################################################################ 4 | FROM rust:latest AS builder 5 | 6 | RUN update-ca-certificates 7 | 8 | # rocksDB needs libclang 9 | RUN apt-get update && \ 10 | apt-get install -y \ 11 | clang libclang-dev 12 | 13 | # install latest protocol buffer compiler. 14 | ARG TARGETPLATFORM 15 | COPY ../docker/install_protoc.sh . 16 | RUN ./install_protoc.sh 17 | 18 | # set build env vars 19 | ENV RUST_BACKTRACE=1 \ 20 | PROTOC=/usr/local/bin/protoc \ 21 | PROTOC_INCLUDE=/usr/local/include 22 | 23 | # has the side effect of updating the crates.io index & installing rust toolchain 24 | # called in a separate step for nicer caching. the command itself will fail, 25 | # b/c empty-library is not a dependency, so we override with an exit code 0 26 | RUN cargo install empty-library; exit 0 27 | 28 | WORKDIR /iroh 29 | 30 | COPY ../ . 31 | 32 | RUN cargo build --bin iroh-store --profile=docker 33 | 34 | ################################################################################ 35 | ## Final image 36 | ################################################################################ 37 | FROM gcr.io/distroless/cc 38 | 39 | WORKDIR /iroh 40 | 41 | # Copy our build, changing owndership to distroless-provided "nonroot" user, 42 | # (65532:65532) 43 | COPY --from=builder --chown=65532:65532 /iroh/target/docker/iroh-store ./ 44 | 45 | # Use nonroot (unprivileged) user 46 | USER nonroot 47 | 48 | # expose the default RPC port 49 | EXPOSE 4402 50 | 51 | CMD ["/iroh/iroh-store"] -------------------------------------------------------------------------------- /iroh-metrics/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "iroh-metrics" 3 | version.workspace = true 4 | edition.workspace = true 5 | license.workspace = true 6 | readme = "README.md" 7 | description = "Iroh metrics" 8 | repository.workspace = true 9 | rust-version.workspace = true 10 | 11 | [dependencies] 12 | async-trait.workspace = true 13 | config.workspace = true 14 | console-subscriber = { workspace = true, optional = true } 15 | iroh-util.workspace = true 16 | lazy_static.workspace = true 17 | names.workspace = true 18 | opentelemetry = { workspace = true, features = ["rt-tokio"] } 19 | opentelemetry-otlp = { workspace = true, features = ["tonic"] } 20 | paste.workspace = true 21 | prometheus-client.workspace = true 22 | reqwest = { workspace = true, features = ["rustls-tls"] } 23 | serde = { workspace = true, features = ["derive"] } 24 | tokio = { workspace = true, features = ["macros", "rt-multi-thread", "process"] } 25 | tracing.workspace = true 26 | tracing-opentelemetry.workspace = true 27 | tracing-subscriber = { workspace = true, features = ["env-filter"] } 28 | 29 | [dependencies.libp2p] 30 | workspace = true 31 | features = [ 32 | "gossipsub", 33 | "kad", 34 | "identify", 35 | "ping", 36 | "mdns", 37 | "noise", 38 | "yamux", 39 | "tcp", 40 | "dns", 41 | "mplex", 42 | "request-response", 43 | "websocket", 44 | "serde", 45 | "metrics", 46 | "relay", 47 | "dcutr", 48 | "autonat", 49 | "tokio", 50 | ] 51 | optional = true 52 | 53 | [features] 54 | gateway = [] 55 | resolver = [] 56 | bitswap = [] 57 | store = [] 58 | p2p = ["libp2p"] 59 | 60 | # requires setting RUSTFLAGS="--cfg tokio_unstable" 61 | tokio-console = ["tokio/tracing", "console-subscriber"] 62 | -------------------------------------------------------------------------------- /docker/Dockerfile.iroh-one: -------------------------------------------------------------------------------- 1 | ################################################################################ 2 | ## Builder 3 | ################################################################################ 4 | # FROM --platform=linux/amd64 rust:latest AS builder 5 | FROM rust:latest AS builder 6 | 7 | RUN update-ca-certificates 8 | 9 | # rocksDB needs libclang 10 | RUN apt-get update && \ 11 | apt-get install -y \ 12 | clang libclang-dev 13 | 14 | # install latest protocol buffer compiler. 15 | ARG TARGETPLATFORM 16 | COPY ../docker/install_protoc.sh . 17 | RUN ./install_protoc.sh 18 | 19 | # set build env vars 20 | ENV RUST_BACKTRACE=1 \ 21 | PROTOC=/usr/local/bin/protoc \ 22 | PROTOC_INCLUDE=/usr/local/include 23 | 24 | # has the side effect of updating the crates.io index & installing rust toolchain 25 | # called in a separate step for nicer caching. the command itself will fail, 26 | # b/c empty-library is not a dependency, so we override with an exit code 0 27 | RUN cargo install empty-library; exit 0 28 | 29 | WORKDIR /iroh 30 | 31 | COPY ../ . 32 | 33 | RUN cargo build --bin iroh-one --profile=docker 34 | 35 | ################################################################################ 36 | ## Final image 37 | ################################################################################ 38 | FROM gcr.io/distroless/cc 39 | 40 | WORKDIR /iroh 41 | 42 | # Copy our build, changing owndership to distroless-provided "nonroot" user, 43 | # (65532:65532) 44 | COPY --from=builder --chown=65532:65532 /iroh/target/docker/iroh-one ./ 45 | 46 | # Use nonroot (unprivileged) user 47 | USER nonroot 48 | 49 | # expose gateway, p2p & all default RPC ports 50 | EXPOSE 4400 4401 4402 4403 4444 9050 51 | 52 | CMD ["/iroh/iroh-one"] -------------------------------------------------------------------------------- /iroh-store/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "iroh-store" 3 | authors = ["dignifiedquire "] 4 | description = "Implementation of the storage part of iroh" 5 | version.workspace = true 6 | edition.workspace = true 7 | license.workspace = true 8 | repository.workspace = true 9 | rust-version.workspace = true 10 | 11 | [dependencies] 12 | anyhow.workspace = true 13 | ahash.workspace = true 14 | async-trait.workspace = true 15 | async-stream.workspace = true 16 | bytecheck.workspace = true 17 | bytes.workspace = true 18 | cid.workspace = true 19 | clap = { workspace = true, features = ["derive"] } 20 | config.workspace = true 21 | ctrlc.workspace = true 22 | futures.workspace = true 23 | git-version.workspace = true 24 | iroh-metrics = { workspace = true, features = ["store"] } 25 | iroh-rpc-client.workspace = true 26 | iroh-rpc-types.workspace = true 27 | iroh-util.workspace = true 28 | multihash.workspace = true 29 | names.workspace = true 30 | opentelemetry = { workspace = true, features = ["rt-tokio"] } 31 | rkyv = { workspace = true, features = ["validation"] } 32 | rocksdb.workspace = true 33 | serde = { workspace = true, features = ["derive"] } 34 | smallvec = { workspace = true, features = ["write"] } 35 | tokio = { workspace = true, features = ["rt"] } 36 | tracing.workspace = true 37 | tracing-opentelemetry.workspace = true 38 | tracing-subscriber = { workspace = true, features = ["env-filter"] } 39 | 40 | [dev-dependencies] 41 | criterion = { workspace = true, features = ["async_tokio"] } 42 | libipld.workspace = true 43 | rayon.workspace = true 44 | tempfile.workspace = true 45 | tokio = { workspace = true, features = ["rt", "macros", "rt-multi-thread"] } 46 | 47 | [[bench]] 48 | name = "store" 49 | harness = false 50 | 51 | [[bench]] 52 | name = "rpc" 53 | harness = false 54 | -------------------------------------------------------------------------------- /iroh-unixfs/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "iroh-unixfs" 3 | authors = ["dignifiedquire "] 4 | description = "Implementation of unixfs for iroh" 5 | version.workspace = true 6 | edition.workspace = true 7 | license.workspace = true 8 | repository.workspace = true 9 | rust-version.workspace = true 10 | 11 | [dependencies] 12 | anyhow.workspace = true 13 | async-channel.workspace = true 14 | async-recursion.workspace = true 15 | async-stream.workspace = true 16 | async-trait.workspace = true 17 | base64.workspace = true 18 | bytes.workspace = true 19 | cid.workspace = true 20 | config.workspace = true 21 | fastmurmur3.workspace = true 22 | futures.workspace = true 23 | iroh-metrics = { workspace = true, features = ["resolver", "gateway"] } 24 | iroh-rpc-client.workspace = true 25 | iroh-util.workspace = true 26 | libipld.workspace = true 27 | libp2p = { workspace = true, features = ["serde"] } 28 | multihash.workspace = true 29 | num_enum.workspace = true 30 | once_cell.workspace = true 31 | prost.workspace = true 32 | rand.workspace = true 33 | reqwest = { workspace = true, features = ["rustls-tls", "json"] } 34 | url = { workspace = true, features = ["serde"] } 35 | serde = { workspace = true, features = ["derive"] } 36 | serde_json.workspace = true 37 | tokio = { workspace = true, features = ["fs"] } 38 | tokio-util = { workspace = true, features = ["io"] } 39 | tracing.workspace = true 40 | unsigned-varint.workspace = true 41 | 42 | [dev-dependencies] 43 | criterion = { workspace = true, features = ["async_tokio"] } 44 | iroh-rpc-types.workspace = true 45 | iroh-store.workspace = true 46 | proptest.workspace = true 47 | tempfile.workspace = true 48 | tokio = { workspace = true, features = ["rt", "macros", "rt-multi-thread", "fs"] } 49 | 50 | [build-dependencies] 51 | prost-build.workspace = true 52 | -------------------------------------------------------------------------------- /docker/Dockerfile.iroh-p2p: -------------------------------------------------------------------------------- 1 | ################################################################################ 2 | ## Builder 3 | ################################################################################ 4 | FROM rust:latest AS builder 5 | 6 | RUN update-ca-certificates 7 | 8 | # install latest protocol buffer compiler. 9 | ARG TARGETPLATFORM 10 | COPY ../docker/install_protoc.sh . 11 | RUN ./install_protoc.sh 12 | 13 | # set build env vars 14 | ENV RUST_BACKTRACE=1 \ 15 | PROTOC=/usr/local/bin/protoc \ 16 | PROTOC_INCLUDE=/usr/local/include 17 | 18 | # has the side effect of updating the crates.io index & installing rust toolchain 19 | # called in a separate step for nicer caching. the command itself will fail, 20 | # b/c empty-library is not a dependency, so we override with an exit code 0 21 | RUN cargo install empty-library; exit 0 22 | 23 | WORKDIR /iroh 24 | 25 | COPY ../ . 26 | 27 | RUN cargo build --bin iroh-p2p --profile=docker 28 | 29 | ################################################################################ 30 | ## Final image 31 | ################################################################################ 32 | FROM gcr.io/distroless/cc 33 | 34 | WORKDIR /iroh 35 | 36 | # Copy our build, changing owndership to distroless-provided "nonroot" user, 37 | # (65532:65532) 38 | COPY --from=builder --chown=65532:65532 /iroh/target/docker/iroh-p2p ./ 39 | 40 | # TODO (b5) - investigate max file descriptor limits within the container image 41 | # libp2p needs lots of FDs for open ports, and we should be maxing them out. 42 | # I have no idea if distroless honors ERL_MAX_PORTS, consider this a starting 43 | # point for experimentation 44 | # ENV ERL_MAX_PORTS=65536 45 | 46 | # Use nonroot (unprivileged) user 47 | USER nonroot 48 | 49 | # expose the default RPC port 50 | EXPOSE 4401 4444 51 | EXPOSE 4444/udp 52 | 53 | CMD ["/iroh/iroh-p2p"] -------------------------------------------------------------------------------- /code_of_conduct.md: -------------------------------------------------------------------------------- 1 | # Code of Conduct 2 | 3 | Online or off, Number Zero is a harrassment-free environment for everyone, regardless of gender, gender identity and expression, sexual orientation, disability, physical appearance, body size, race, age or religion or technical skill level. We do not tolerate harassment of participants in any form. 4 | 5 | Harassment includes verbal comments that reinforce social structures of domination related to gender, gender identity and expression, sexual orientation, disability, physical appearance, body size, race, age, religion, sexual images in public spaces, deliberate intimidation, stalking, following, harassing photography or recording, sustained disruption of talks or other events, inappropriate physical contact, and unwelcome sexual attention. Participants asked to stop any harassing behavior are expected to comply immediately. 6 | 7 | If a participant engages in harassing behaviour, the organizers may take any action they deem appropriate, including warning the offender or expulsion from events and online forums. 8 | 9 | If you are being harassed, notice that someone else is being harassed, or have any other concerns, please contact a member of the organizing team immediately. 10 | 11 | At offline events, organizers will identify themselves, and will help participants contact venue security or local law enforcement, provide escorts, or otherwise assist those experiencing harassment to feel safe for the duration of the event. We value your participation! 12 | 13 | This document is based on a similar code from [EDGI](https://envirodatagov.org/) and [Civic Tech Toronto](http://civictech.ca/about-us/), itself derived from the [Recurse Center’s Social Rules](https://www.recurse.com/manual#sec-environment), and the [anti-harassment policy from the Geek Feminism Wiki](http://geekfeminism.wikia.com/wiki/Conference_anti-harassment/Policy). 14 | -------------------------------------------------------------------------------- /iroh-bitswap/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "iroh-bitswap" 3 | authors = ["dignifiedquire "] 4 | description = "Implementation of the bitswap protocol" 5 | version.workspace = true 6 | edition.workspace = true 7 | license.workspace = true 8 | repository.workspace = true 9 | rust-version.workspace = true 10 | 11 | [build-dependencies] 12 | prost-build.workspace = true 13 | 14 | [dependencies] 15 | ahash.workspace = true 16 | anyhow.workspace = true 17 | async-broadcast.workspace = true 18 | async-channel.workspace = true 19 | async-trait.workspace = true 20 | async-stream.workspace = true 21 | asynchronous-codec.workspace = true 22 | bytes.workspace = true 23 | cid.workspace = true 24 | deadqueue.workspace = true 25 | derivative.workspace = true 26 | futures.workspace = true 27 | iroh-metrics = { workspace = true, features = ["bitswap"] } 28 | iroh-util.workspace = true 29 | keyed_priority_queue.workspace = true 30 | libp2p = { workspace = true, features = ["ping"] } 31 | multihash.workspace = true 32 | names.workspace = true 33 | num_enum.workspace = true 34 | once_cell.workspace = true 35 | prost.workspace = true 36 | rand.workspace = true 37 | smallvec.workspace = true 38 | thiserror.workspace = true 39 | tokio = { workspace = true, features = ["sync"] } 40 | tokio-context.workspace = true 41 | tokio-stream.workspace = true 42 | tracing.workspace = true 43 | unsigned-varint = { workspace = true, features = ["asynchronous_codec"] } 44 | wasm-timer.workspace = true 45 | 46 | [dev-dependencies] 47 | criterion.workspace = true 48 | libp2p = { workspace = true, features = ["yamux", "noise", "tcp", "tokio"] } 49 | tokio = { workspace = true, features = ["macros", "net", "rt"] } 50 | tokio-util = { workspace = true, features = ["compat"] } 51 | tracing-subscriber = { workspace = true, features = ["env-filter"] } 52 | 53 | 54 | [[bench]] 55 | name = "message" 56 | harness = false 57 | -------------------------------------------------------------------------------- /iroh/src/main.rs: -------------------------------------------------------------------------------- 1 | use anyhow::{anyhow, Result}; 2 | use clap::Parser; 3 | use crossterm::style::Stylize; 4 | use iroh_api::ApiError; 5 | 6 | #[tokio::main(flavor = "multi_thread")] 7 | async fn main() -> Result<()> { 8 | let cli = iroh::run::Cli::parse(); 9 | // the `run` method exists in two versions: 10 | // When using the `testing` feature, the 11 | // version of `run` designed for testing purposes using mocked test 12 | // fixtures is invoked. 13 | // Without the `testing` feature, the version of 14 | // `run` that interacts with the real Iroh API is used. 15 | let r = cli.run().await; 16 | let r = transform_error(r); 17 | match r { 18 | Ok(_) => Ok(()), 19 | Err(e) => { 20 | eprintln!("Error: {e:?}"); 21 | std::process::exit(1); 22 | } 23 | } 24 | } 25 | 26 | fn transform_error(r: Result<()>) -> Result<()> { 27 | match r { 28 | Ok(_) => Ok(()), 29 | Err(e) => { 30 | let rpc_error = e 31 | .root_cause() 32 | .downcast_ref::(); 33 | if let Some(iroh_rpc_client::ClientError::Open(_)) = rpc_error { 34 | return Err(anyhow!( 35 | "Connection refused. Are services running?\n{}", 36 | "hint: see 'iroh start' for more on starting services".yellow(), 37 | )); 38 | } 39 | 40 | let api_error = e.root_cause().downcast_ref::(); 41 | if let Some(ApiError::ConnectionRefused { service }) = api_error { 42 | return Err(anyhow!( 43 | "Connection refused. This command requires a running {} service.\n{}", 44 | service, 45 | format!("hint: try 'iroh start {service}'").yellow(), 46 | )); 47 | } 48 | Err(e) 49 | } 50 | } 51 | } 52 | -------------------------------------------------------------------------------- /examples/embed/src/main.rs: -------------------------------------------------------------------------------- 1 | use anyhow::{bail, Result}; 2 | use futures_util::StreamExt; 3 | use iroh_api::{IpfsPath, OutType}; 4 | use iroh_embed::{IrohBuilder, Libp2pConfig, P2pService, RocksStoreService}; 5 | use testdir::testdir; 6 | 7 | #[tokio::main(flavor = "multi_thread")] 8 | async fn main() -> Result<()> { 9 | let dir = testdir!(); 10 | println!("Using directory: {}", dir.display()); 11 | 12 | println!("Starting iroh system..."); 13 | let store = RocksStoreService::new(dir.join("store")).await?; 14 | 15 | let mut p2p_config = Libp2pConfig::default(); 16 | p2p_config.listening_multiaddrs = vec![ 17 | "/ip4/0.0.0.0/tcp/0".parse().unwrap(), 18 | "/ip4/0.0.0.0/udp/0/quic-v1".parse().unwrap(), 19 | ]; 20 | let p2p = P2pService::new(p2p_config, dir, store.addr()).await?; 21 | 22 | // Note by default this is configured with an indexer, but not with http resolvers. 23 | let iroh = IrohBuilder::new().store(store).p2p(p2p).build().await?; 24 | println!("done"); 25 | 26 | let quick_start: IpfsPath = 27 | "/ipfs/QmQPeNsJPyVWPFDVHb77w8G42Fvo15z4bG2X8D2GhfbSXc/quick-start".parse()?; 28 | println!("Fetching quick start: {quick_start}"); 29 | let mut stream = iroh.api().get(&quick_start)?; 30 | 31 | // We only expect a single item here. 32 | while let Some(item) = stream.next().await { 33 | let (rel_path, data) = item?; 34 | println!("PATH: {rel_path}"); 35 | println!("----"); 36 | match data { 37 | OutType::Dir => bail!("found unexpected dir"), 38 | OutType::Symlink(_) => bail!("found unexpected symlink"), 39 | OutType::Reader(mut reader) => { 40 | let mut stdout = tokio::io::stdout(); 41 | tokio::io::copy(&mut reader, &mut stdout).await?; 42 | } 43 | } 44 | } 45 | 46 | // Stop the system gracefully. 47 | iroh.stop().await?; 48 | 49 | Ok(()) 50 | } 51 | -------------------------------------------------------------------------------- /docker/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | # this is a standard, bare bones iroh configuration. More sophisticated setups 2 | # will take advantage of metrics, and provide custom configuration 3 | # 4 | # Usage: 5 | # run 'docker-compose up' from the docker directory, use 'iroh status' from 6 | # another terminal to interact with services, or visit localhost:9050 on a 7 | # browser to utilize the gateway 8 | version: "3.9" 9 | services: 10 | iroh-gateway: 11 | container_name: "iroh-gateway" 12 | image: "n0computer/iroh-gateway:latest" 13 | environment: 14 | - "IROH_GATEWAY__RPC_CLIENT__P2P_ADDR=grpc://iroh-p2p:4401" 15 | - "IROH_GATEWAY__RPC_CLIENT__STORE_ADDR=grpc://iroh-store:4402" 16 | ports: 17 | # public HTTP gateway port. eg: http://localhost:9050/ipfs/bafybeigdyrzt5sfp7udm7hu76uh7y26nf3efuylqabf3oclgtqy55fbzdi 18 | - "9050:9050" 19 | # RPC port, iroh CLI uses this to control your gateway service 20 | # this shouldn't be publically accessible, so we explicitly wire it 21 | # to the local loopback address: 127.0.0.1 22 | - "127.0.0.1:4400:4400" 23 | iroh-p2p: 24 | container_name: "iroh-p2p" 25 | image: "n0computer/iroh-p2p:latest" 26 | environment: 27 | - "IROH_P2P__RPC_CLIENT__STORE_ADDR=grpc://iroh-store:4402" 28 | ports: 29 | # libp2p connection port. peers will dial your node here 30 | - "4444:4444" 31 | # RPC port, iroh CLI uses this to control your p2p service 32 | # this shouldn't be publically accessible, so we explicitly wire it 33 | # to the local loopback address: 127.0.0.1 34 | - "127.0.0.1:4401:4401" 35 | iroh-store: 36 | container_name: "iroh-store" 37 | image: "n0computer/iroh-store:latest" 38 | ports: 39 | # RPC port, iroh CLI uses this to control your store service 40 | # this shouldn't be publically accessible, so we explicitly wire it 41 | # to the local loopback address: 127.0.0.1 42 | - "127.0.0.1:4402:4402" -------------------------------------------------------------------------------- /.github/workflows/weekly.yml: -------------------------------------------------------------------------------- 1 | name: Weekly CI 2 | 3 | on: 4 | schedule: 5 | - cron: "0 0 * * 1" # every monday at 00:00 6 | workflow_dispatch: 7 | 8 | jobs: 9 | weekly-ci: 10 | strategy: 11 | fail-fast: false 12 | matrix: 13 | rust: [beta] 14 | protoc-arch: [linux-x86_64] 15 | 16 | runs-on: ubuntu-latest 17 | 18 | env: 19 | RUSTFLAGS: -Dwarnings -Cdebuginfo=0 20 | RUST_BACKTRACE: full 21 | RUSTV: ${{ matrix.rust }} 22 | 23 | steps: 24 | - uses: actions/checkout@master 25 | 26 | - name: Set build arch 27 | run: | 28 | echo "PROTOC_ARCH=${{ matrix.protoc-arch }}" >> $GITHUB_ENV 29 | 30 | - name: Install Protoc 31 | run: | 32 | PROTOC_VERSION=3.20.1 33 | PROTOC_ZIP=protoc-$PROTOC_VERSION-$PROTOC_ARCH.zip 34 | curl --retry 3 --retry-max-time 90 -H "Authorization: Bearer ${{ secrets.GITHUB_TOKEN }}" -OL https://github.com/protocolbuffers/protobuf/releases/download/v$PROTOC_VERSION/$PROTOC_ZIP 35 | sudo unzip -o $PROTOC_ZIP -d /usr/local bin/protoc 36 | sudo unzip -o $PROTOC_ZIP -d /usr/local 'include/*' 37 | rm -f $PROTOC_ZIP 38 | echo "PROTOC=/usr/local/bin/protoc" >> $GITHUB_ENV 39 | echo "PROTOC_INCLUDE=/usr/local/include" >> $GITHUB_ENV 40 | 41 | - name: Install ${{ matrix.rust }} 42 | run: | 43 | rustup toolchain install --profile default ${{ matrix.rust }} 44 | 45 | - name: clippy all features 46 | run: | 47 | cargo +$RUSTV clippy --workspace --all-features --all-targets -- -D warnings 48 | 49 | - name: Create Issue if clippy failed 50 | if: ${{ failure() }} 51 | uses: dacbd/create-issue-action@v1 52 | with: 53 | token: ${{ github.token }} 54 | title: ${{ matrix.rust }} clippy failed 55 | body: | 56 | Failed Run: https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }} 57 | -------------------------------------------------------------------------------- /iroh-bitswap/src/block.rs: -------------------------------------------------------------------------------- 1 | use std::fmt::Debug; 2 | 3 | use bytes::Bytes; 4 | use cid::Cid; 5 | use multihash::{Code, MultihashDigest}; 6 | 7 | /// A wrapper around bytes with their `Cid`. 8 | #[derive(Clone, Eq, PartialEq, PartialOrd, Ord)] 9 | pub struct Block { 10 | pub cid: Cid, 11 | pub data: Bytes, 12 | } 13 | 14 | impl Debug for Block { 15 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 16 | f.debug_struct("Block") 17 | .field("cid", &self.cid.to_string()) 18 | .field("data", &format!("[{} bytes]", self.data.len())) 19 | .finish() 20 | } 21 | } 22 | 23 | impl Block { 24 | pub fn new(data: Bytes, cid: Cid) -> Self { 25 | Self { cid, data } 26 | } 27 | 28 | pub fn from_v0_data(data: Bytes) -> cid::Result { 29 | let digest = Code::Sha2_256.digest(&data); 30 | let cid = Cid::new_v0(digest)?; 31 | Ok(Self { cid, data }) 32 | } 33 | 34 | pub fn cid(&self) -> &Cid { 35 | &self.cid 36 | } 37 | 38 | pub fn data(&self) -> &Bytes { 39 | &self.data 40 | } 41 | } 42 | 43 | pub mod tests { 44 | use super::*; 45 | use bytes::BytesMut; 46 | use rand::{thread_rng, Rng}; 47 | 48 | const RAW: u64 = 0x55; 49 | 50 | pub fn create_random_block_v1() -> Block { 51 | let mut bytes = BytesMut::with_capacity(64); 52 | bytes.resize(64, 0); 53 | thread_rng().fill(&mut bytes[..]); 54 | create_block_v1(bytes) 55 | } 56 | 57 | pub fn create_block_v1>(bytes: B) -> Block { 58 | let bytes = bytes.into(); 59 | let digest = Code::Sha2_256.digest(&bytes); 60 | let cid = Cid::new_v1(RAW, digest); 61 | Block::new(bytes, cid) 62 | } 63 | 64 | pub fn create_block_v0>(bytes: B) -> Block { 65 | let bytes = bytes.into(); 66 | let digest = Code::Sha2_256.digest(&bytes); 67 | let cid = Cid::new_v0(digest).unwrap(); 68 | Block::new(bytes, cid) 69 | } 70 | } 71 | -------------------------------------------------------------------------------- /iroh-rpc-client/src/gateway.rs: -------------------------------------------------------------------------------- 1 | use std::fmt; 2 | 3 | use anyhow::Result; 4 | use async_stream::stream; 5 | use futures::{Stream, StreamExt}; 6 | use iroh_rpc_types::{gateway::*, VersionRequest, WatchRequest}; 7 | 8 | use crate::{StatusType, HEALTH_POLL_WAIT}; 9 | 10 | #[derive(Clone)] 11 | pub struct GatewayClient { 12 | client: quic_rpc::RpcClient, 13 | } 14 | 15 | impl fmt::Debug for GatewayClient { 16 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 17 | f.debug_struct("GatewayClient2") 18 | .field("client", &self.client) 19 | .finish() 20 | } 21 | } 22 | 23 | impl GatewayClient { 24 | pub async fn new(addr: GatewayAddr) -> anyhow::Result { 25 | let client = crate::open_client::(addr).await?; 26 | Ok(Self { client }) 27 | } 28 | 29 | #[tracing::instrument(skip(self))] 30 | pub async fn version(&self) -> Result { 31 | let res = self.client.rpc(VersionRequest).await?; 32 | Ok(res.version) 33 | } 34 | 35 | #[tracing::instrument(skip(self))] 36 | pub async fn check(&self) -> (StatusType, String) { 37 | match self.version().await { 38 | Ok(version) => (StatusType::Serving, version), 39 | Err(_) => (StatusType::Down, String::new()), 40 | } 41 | } 42 | 43 | #[tracing::instrument(skip(self))] 44 | pub async fn watch(&self) -> impl Stream { 45 | let client = self.client.clone(); 46 | stream! { 47 | loop { 48 | let res = client.server_streaming(WatchRequest).await; 49 | if let Ok(mut res) = res { 50 | while let Some(Ok(version)) = res.next().await { 51 | yield (StatusType::Serving, version.version); 52 | } 53 | } 54 | yield (StatusType::Down, String::new()); 55 | tokio::time::sleep(HEALTH_POLL_WAIT).await; 56 | } 57 | } 58 | } 59 | } 60 | -------------------------------------------------------------------------------- /iroh-p2p/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "iroh-p2p" 3 | version.workspace = true 4 | edition.workspace = true 5 | authors = ["dignifiedquire "] 6 | license.workspace = true 7 | repository.workspace = true 8 | description = "Implementation of the p2p part of iroh" 9 | rust-version.workspace = true 10 | 11 | [dependencies] 12 | ahash.workspace = true 13 | anyhow.workspace = true 14 | async-stream.workspace = true 15 | async-trait.workspace = true 16 | asynchronous-codec.workspace = true 17 | bytes.workspace = true 18 | cid.workspace = true 19 | clap = { workspace = true, features = ["derive"] } 20 | config.workspace = true 21 | futures.workspace = true 22 | futures-util.workspace = true 23 | git-version.workspace = true 24 | iroh-bitswap.workspace = true 25 | iroh-metrics = { workspace = true, features = ["bitswap", "p2p"] } 26 | iroh-rpc-client.workspace = true 27 | iroh-rpc-types.workspace = true 28 | iroh-util.workspace = true 29 | lazy_static.workspace = true 30 | lru.workspace = true 31 | names.workspace = true 32 | rand.workspace = true 33 | serde = { workspace = true, features = ["derive"] } 34 | smallvec.workspace = true 35 | ssh-key = { workspace = true, features = ["ed25519", "std", "rand_core"] } 36 | tempfile.workspace = true 37 | tokio = { workspace = true, features = ["fs", "time", "sync", "macros"] } 38 | tokio-stream.workspace = true 39 | toml.workspace = true 40 | tracing.workspace = true 41 | tracing-subscriber = { workspace = true, features = ["env-filter"] } 42 | zeroize.workspace = true 43 | 44 | [dependencies.libp2p] 45 | workspace = true 46 | features = [ 47 | "gossipsub", 48 | "kad", 49 | "identify", 50 | "ping", 51 | "mdns", 52 | "noise", 53 | "yamux", 54 | "tcp", 55 | "quic", 56 | "dns", 57 | "mplex", 58 | "request-response", 59 | "websocket", 60 | "serde", 61 | "metrics", 62 | "relay", 63 | "dcutr", 64 | "autonat", 65 | "rsa", 66 | "tokio", 67 | "macros", 68 | ] 69 | 70 | [dev-dependencies] 71 | criterion.workspace = true 72 | rand_chacha.workspace = true 73 | 74 | [[bench]] 75 | name = "lru_cache" 76 | harness = false 77 | -------------------------------------------------------------------------------- /iroh/src/size.rs: -------------------------------------------------------------------------------- 1 | use std::path::{Path, PathBuf}; 2 | 3 | use async_stream::stream; 4 | use futures::stream::Stream; 5 | use tokio::fs; 6 | 7 | #[derive(Debug, PartialEq, Eq)] 8 | pub struct FileInfo { 9 | pub path: PathBuf, 10 | pub size: u64, 11 | } 12 | 13 | pub fn size_stream(path: &Path) -> impl Stream + '_ { 14 | stream! { 15 | let mut stack = vec![path.to_path_buf()]; 16 | while let Some(path) = stack.pop() { 17 | if path.is_dir() { 18 | let mut read_dir = fs::read_dir(&path).await.unwrap(); 19 | while let Some(entry) = read_dir.next_entry().await.unwrap() { 20 | stack.push(entry.path()); 21 | } 22 | } else if path.is_symlink() { 23 | continue; 24 | } else { 25 | let size = fs::metadata(&path).await.unwrap().len(); 26 | let path = path.clone(); 27 | yield FileInfo { path, size }; 28 | } 29 | } 30 | } 31 | } 32 | 33 | #[cfg(test)] 34 | mod tests { 35 | use super::*; 36 | use futures::stream::StreamExt; 37 | use std::path::PathBuf; 38 | 39 | #[tokio::test] 40 | async fn test_read_directory_size() { 41 | let mut d = PathBuf::from(env!("CARGO_MANIFEST_DIR")); 42 | d.push("fixtures"); 43 | d.push("dir"); 44 | let mut size_info = size_stream(&d).collect::>().await; 45 | // have to sort this for testing purposes as read_dir has no guaranteed 46 | // order 47 | size_info.sort_by_key(|info| info.path.clone()); 48 | assert_eq!( 49 | size_info, 50 | vec![ 51 | FileInfo { 52 | path: d.join("a.txt"), 53 | size: 7, 54 | }, 55 | FileInfo { 56 | path: d.join("subdir").join("b.txt"), 57 | size: 15 58 | }, 59 | FileInfo { 60 | path: d.join("subdir").join("c.txt"), 61 | size: 14, 62 | }, 63 | ], 64 | ); 65 | } 66 | } 67 | -------------------------------------------------------------------------------- /iroh-car/src/writer.rs: -------------------------------------------------------------------------------- 1 | use cid::Cid; 2 | use integer_encoding::VarIntAsyncWriter; 3 | use tokio::io::{AsyncWrite, AsyncWriteExt}; 4 | 5 | use crate::{error::Error, header::CarHeader}; 6 | 7 | #[derive(Debug)] 8 | pub struct CarWriter { 9 | header: CarHeader, 10 | writer: W, 11 | cid_buffer: Vec, 12 | is_header_written: bool, 13 | } 14 | 15 | impl CarWriter 16 | where 17 | W: AsyncWrite + Send + Unpin, 18 | { 19 | pub fn new(header: CarHeader, writer: W) -> Self { 20 | CarWriter { 21 | header, 22 | writer, 23 | cid_buffer: Vec::new(), 24 | is_header_written: false, 25 | } 26 | } 27 | 28 | /// Writes header and stream of data to writer in Car format. 29 | pub async fn write(&mut self, cid: Cid, data: T) -> Result<(), Error> 30 | where 31 | T: AsRef<[u8]>, 32 | { 33 | if !self.is_header_written { 34 | // Write header bytes 35 | let header_bytes = self.header.encode()?; 36 | self.writer.write_varint_async(header_bytes.len()).await?; 37 | self.writer.write_all(&header_bytes).await?; 38 | self.is_header_written = true; 39 | } 40 | 41 | // Write the given block. 42 | self.cid_buffer.clear(); 43 | cid.write_bytes(&mut self.cid_buffer).expect("vec write"); 44 | 45 | let data = data.as_ref(); 46 | let len = self.cid_buffer.len() + data.len(); 47 | 48 | self.writer.write_varint_async(len).await?; 49 | self.writer.write_all(&self.cid_buffer).await?; 50 | self.writer.write_all(data).await?; 51 | 52 | Ok(()) 53 | } 54 | 55 | /// Finishes writing, including flushing and returns the writer. 56 | pub async fn finish(mut self) -> Result { 57 | self.flush().await?; 58 | Ok(self.writer) 59 | } 60 | 61 | /// Flushes the underlying writer. 62 | pub async fn flush(&mut self) -> Result<(), Error> { 63 | self.writer.flush().await?; 64 | Ok(()) 65 | } 66 | 67 | /// Consumes the [`CarWriter`] and returns the underlying writer. 68 | pub fn into_inner(self) -> W { 69 | self.writer 70 | } 71 | } 72 | -------------------------------------------------------------------------------- /iroh-gateway/src/constants.rs: -------------------------------------------------------------------------------- 1 | use axum::http::{header::HeaderName, HeaderValue}; 2 | 3 | // Headers 4 | pub static HEADER_X_FORWARDED_HOST: HeaderName = HeaderName::from_static("x-forwarded-host"); 5 | pub static HEADER_X_FORWARDED_PROTO: HeaderName = HeaderName::from_static("x-forwarded-proto"); 6 | pub static HEADER_X_IPFS_PATH: HeaderName = HeaderName::from_static("x-ipfs-path"); 7 | pub static HEADER_X_CONTENT_TYPE_OPTIONS: HeaderName = 8 | HeaderName::from_static("x-content-type-options"); 9 | pub static HEADER_X_TRACE_ID: HeaderName = HeaderName::from_static("x-trace-id"); 10 | pub static HEADER_X_IPFS_GATEWAY_PREFIX: HeaderName = 11 | HeaderName::from_static("x-ipfs-gateway-prefix"); 12 | pub static HEADER_X_IPFS_ROOTS: HeaderName = HeaderName::from_static("x-ipfs-roots"); 13 | pub static HEADER_SERVICE_WORKER: HeaderName = HeaderName::from_static("service-worker"); 14 | pub static HEADER_CACHE_CONTROL: HeaderName = HeaderName::from_static("cache-control"); 15 | pub static HEADER_X_CHUNKED_OUTPUT: HeaderName = HeaderName::from_static("x-chunked-output"); 16 | pub static HEADER_X_STREAM_OUTPUT: HeaderName = HeaderName::from_static("x-stream-output"); 17 | pub static HEADER_X_REQUESTED_WITH: HeaderName = HeaderName::from_static("x-requested-with"); 18 | 19 | // Common Header Values 20 | pub static VALUE_XCTO_NOSNIFF: HeaderValue = HeaderValue::from_static("nosniff"); 21 | pub static VALUE_NONE: HeaderValue = HeaderValue::from_static("none"); 22 | pub static VAL_IMMUTABLE_MAX_AGE: HeaderValue = 23 | HeaderValue::from_static("public, max-age=31536000, immutable"); 24 | 25 | // Dispositions 26 | pub static DISPOSITION_ATTACHMENT: &str = "attachment"; 27 | pub static DISPOSITION_INLINE: &str = "inline"; 28 | 29 | // Content Types 30 | pub static CONTENT_TYPE_IPLD_RAW: HeaderValue = 31 | HeaderValue::from_static("application/vnd.ipld.raw"); 32 | pub static CONTENT_TYPE_IPLD_CAR: HeaderValue = 33 | HeaderValue::from_static("application/vnd.ipld.car; version=1"); 34 | 35 | // Schemes 36 | pub static SCHEME_IPFS: &str = "ipfs"; 37 | pub static SCHEME_IPNS: &str = "ipns"; 38 | 39 | // Max number of links to return in a single recursive request. 40 | // TODO: Make configurable. 41 | pub static RECURSION_LIMIT: usize = 4096; 42 | -------------------------------------------------------------------------------- /iroh-gateway/src/cors.rs: -------------------------------------------------------------------------------- 1 | use http::header::{HeaderMap, HeaderName, HeaderValue}; 2 | use std::str::FromStr; 3 | use tower_http::cors::CorsLayer; 4 | 5 | /// Convert a header value formatted as a csv to a list of a given type. 6 | fn from_header_value(source: &HeaderValue) -> Option> { 7 | if let Ok(names) = source.to_str() { 8 | Some( 9 | names 10 | .split(',') 11 | .filter_map(|s| T::from_str(s.trim()).ok()) 12 | .collect(), 13 | ) 14 | } else { 15 | None 16 | } 17 | } 18 | 19 | /// Creates a CORS middleware from the config headers. 20 | /// Used headers are: 21 | /// - access-control-allow-headers 22 | /// - access-control-expose-headers (set to allow-headers when not present) 23 | /// - access-control-allow-methods 24 | /// - access-control-allow-origin 25 | pub(crate) fn cors_from_headers(headers: &HeaderMap) -> CorsLayer { 26 | let mut layer = CorsLayer::new(); 27 | 28 | // access-control-allow-methods 29 | if let Some(methods) = headers.get("access-control-allow-methods") { 30 | if let Some(list) = from_header_value(methods) { 31 | layer = layer.allow_methods(list); 32 | } 33 | } 34 | 35 | // access-control-allow-origin 36 | if let Some(origin) = headers.get("access-control-allow-origin") { 37 | layer = layer.allow_origin(origin.clone()); 38 | } 39 | 40 | // access-control-allow-headers 41 | let mut allowed_header_names: Vec = vec![]; 42 | if let Some(allowed_headers) = headers.get("access-control-allow-headers") { 43 | if let Some(list) = from_header_value(allowed_headers) { 44 | allowed_header_names = list.clone(); 45 | layer = layer.allow_headers(list); 46 | } 47 | } 48 | 49 | // access-control-expose-headers 50 | if let Some(exposed_headers) = headers.get("access-control-expose-headers") { 51 | if let Some(list) = from_header_value(exposed_headers) { 52 | layer = layer.expose_headers(list); 53 | } 54 | } else if !allowed_header_names.is_empty() { 55 | layer = layer.expose_headers(allowed_header_names); 56 | } 57 | 58 | layer 59 | } 60 | -------------------------------------------------------------------------------- /iroh-gateway/src/rpc.rs: -------------------------------------------------------------------------------- 1 | use std::result; 2 | 3 | use anyhow::Result; 4 | use futures::stream::Stream; 5 | use iroh_rpc_client::{create_server, GatewayServer, ServerError, ServerSocket, HEALTH_POLL_WAIT}; 6 | use iroh_rpc_types::{ 7 | gateway::{GatewayAddr, GatewayRequest, GatewayService}, 8 | VersionRequest, VersionResponse, WatchRequest, WatchResponse, 9 | }; 10 | use tracing::info; 11 | 12 | use crate::VERSION; 13 | 14 | #[derive(Default, Debug, Clone)] 15 | pub struct Gateway {} 16 | 17 | impl Gateway { 18 | #[tracing::instrument(skip(self))] 19 | fn watch(self, _: WatchRequest) -> impl Stream { 20 | async_stream::stream! { 21 | loop { 22 | yield WatchResponse { version: VERSION.to_string() }; 23 | tokio::time::sleep(HEALTH_POLL_WAIT).await; 24 | } 25 | } 26 | } 27 | 28 | #[tracing::instrument(skip(self))] 29 | async fn version(self, _: VersionRequest) -> VersionResponse { 30 | VersionResponse { 31 | version: VERSION.to_string(), 32 | } 33 | } 34 | } 35 | 36 | impl iroh_rpc_types::NamedService for Gateway { 37 | const NAME: &'static str = "gateway"; 38 | } 39 | 40 | /// dispatch a single request from the server 41 | async fn dispatch( 42 | s: GatewayServer, 43 | req: GatewayRequest, 44 | chan: ServerSocket, 45 | target: Gateway, 46 | ) -> result::Result<(), ServerError> { 47 | use GatewayRequest::*; 48 | match req { 49 | Watch(req) => s.server_streaming(req, chan, target, Gateway::watch).await, 50 | Version(req) => s.rpc(req, chan, target, Gateway::version).await, 51 | } 52 | } 53 | 54 | pub async fn new(addr: GatewayAddr, gw: Gateway) -> Result<()> { 55 | info!("gateway rpc listening on: {}", addr); 56 | let server = create_server::(addr).await?; 57 | loop { 58 | match server.accept_one().await { 59 | Ok((req, chan)) => { 60 | tokio::spawn(dispatch(server.clone(), req, chan, gw.clone())); 61 | } 62 | Err(cause) => { 63 | tracing::debug!("gateway rpc accept error: {}", cause); 64 | } 65 | } 66 | } 67 | } 68 | -------------------------------------------------------------------------------- /iroh-one/README.md: -------------------------------------------------------------------------------- 1 | # iroh one 2 | 3 | [![crates.io](https://img.shields.io/crates/v/iroh-one.svg?style=flat-square)](https://crates.io/crates/iroh-one) 4 | [![Released API docs](https://img.shields.io/docsrs/iroh-one?style=flat-square)](https://docs.rs/iroh-one) 5 | [![MIT/Apache-2.0 licensed](https://img.shields.io/crates/l/iroh-one?style=flat-square)](../LICENSE-MIT) 6 | [![CI](https://img.shields.io/github/workflow/status/n0-computer/iroh/Continuous%20integration?style=flat-square)](https://github.com/n0-computer/iroh/actions?query=workflow%3A%22Continuous+integration%22) 7 | 8 | Single binary of [iroh](https://github.com/n0-computer/iroh) services 9 | ([gateway](https://github.com/n0-computer/iroh/tree/main/iroh-gateway), 10 | [p2p](https://github.com/n0-computer/iroh/tree/main/iroh-p2p), 11 | [store](https://github.com/n0-computer/iroh/tree/main/iroh-store)) 12 | communicating via mem channels. This is an alternative to deploying the iroh 13 | services as micro services. 14 | 15 | ## Running / Building 16 | 17 | `cargo run --release -- -p 10000 --store-path=tmpstore` 18 | 19 | ### Options 20 | 21 | - Run with `cargo run --release -- -h` for details 22 | - `-wcf` Writeable, Cache, Fetch (options to toggle write enable, caching mechanics and fetching from the network); currently exists but is not implemented 23 | - `-p` Port the gateway should listen on 24 | - `--store-path` Path for the iroh-store 25 | 26 | ### Features 27 | 28 | - `http-uds-gateway` - enables the usage and binding of the http gateway over UDS. This is independent from the rpc control endpoint which uses the same default and configuration as `iroh-gateway`. 29 | 30 | ### Reference 31 | 32 | - [Gateway](../iroh-gateway/README.md) 33 | - [P2P](../iroh-p2p/README.md) 34 | - [Store](../iroh-store/README.md) 35 | 36 | ## License 37 | 38 | 39 | Licensed under either of Apache License, Version 40 | 2.0 or MIT license at your option. 41 | 42 | 43 |
44 | 45 | 46 | Unless you explicitly state otherwise, any contribution intentionally submitted 47 | for inclusion in this crate by you, as defined in the Apache-2.0 license, shall 48 | be dual licensed as above, without any additional terms or conditions. 49 | 50 | 51 | 52 | -------------------------------------------------------------------------------- /iroh-one/src/cli.rs: -------------------------------------------------------------------------------- 1 | /// CLI arguments support. 2 | use clap::Parser; 3 | use std::collections::HashMap; 4 | use std::path::PathBuf; 5 | 6 | #[derive(Parser, Debug, Clone)] 7 | #[clap(author, version, about, long_about = None)] 8 | pub struct Args { 9 | /// Gateway 10 | #[clap(short = 'p', long = "gateway-port")] 11 | gateway_port: Option, 12 | #[clap(short, long)] 13 | writeable: Option, 14 | #[clap(short, long)] 15 | fetch: Option, 16 | #[clap(short, long)] 17 | cache: Option, 18 | #[clap(long)] 19 | metrics: bool, 20 | #[clap(long)] 21 | tracing: bool, 22 | #[clap(long)] 23 | denylist: bool, 24 | #[cfg(all(feature = "http-uds-gateway", unix))] 25 | #[clap(long = "gateway-uds-path")] 26 | pub gateway_uds_path: Option, 27 | /// Path to the store 28 | #[clap(long = "store-path")] 29 | pub store_path: Option, 30 | #[clap(long)] 31 | pub cfg: Option, 32 | } 33 | 34 | impl Args { 35 | pub fn make_overrides_map(&self) -> HashMap<&str, String> { 36 | let mut map: HashMap<&str, String> = HashMap::new(); 37 | if let Some(port) = self.gateway_port { 38 | map.insert("gateway.port", port.to_string()); 39 | } 40 | if let Some(writable) = self.writeable { 41 | map.insert("gateway.writable", writable.to_string()); 42 | } 43 | if let Some(fetch) = self.fetch { 44 | map.insert("gateway.fetch", fetch.to_string()); 45 | } 46 | if let Some(cache) = self.cache { 47 | map.insert("gateway.cache", cache.to_string()); 48 | } 49 | map.insert("gateway.denylist", self.denylist.to_string()); 50 | map.insert("metrics.collect", self.metrics.to_string()); 51 | map.insert("metrics.tracing", self.tracing.to_string()); 52 | if let Some(path) = self.store_path.clone() { 53 | map.insert("store.path", path.to_str().unwrap_or("").to_string()); 54 | } 55 | #[cfg(all(feature = "http-uds-gateway", unix))] 56 | if let Some(path) = self.gateway_uds_path.clone() { 57 | map.insert("gateway_uds_path", path.to_str().unwrap_or("").to_string()); 58 | } 59 | map 60 | } 61 | } 62 | -------------------------------------------------------------------------------- /iroh-api/src/p2p.rs: -------------------------------------------------------------------------------- 1 | use crate::error::map_service_error; 2 | use anyhow::Result; 3 | use iroh_rpc_client::{Lookup, P2pClient}; 4 | use libp2p::{multiaddr::Protocol, Multiaddr, PeerId}; 5 | use std::collections::HashMap; 6 | 7 | #[derive(Debug)] 8 | pub struct P2p { 9 | client: P2pClient, 10 | } 11 | 12 | #[derive(Debug, Clone)] 13 | pub enum PeerIdOrAddr { 14 | PeerId(PeerId), 15 | Multiaddr(Multiaddr), 16 | } 17 | 18 | impl P2p { 19 | pub fn new(client: P2pClient) -> Self { 20 | Self { client } 21 | } 22 | 23 | pub async fn lookup_local(&self) -> Result { 24 | self.client.lookup_local().await 25 | } 26 | 27 | pub async fn lookup(&self, addr: &PeerIdOrAddr) -> Result { 28 | match addr { 29 | PeerIdOrAddr::PeerId(peer_id) => self.client.lookup(*peer_id, None).await, 30 | PeerIdOrAddr::Multiaddr(addr) => { 31 | let peer_id = peer_id_from_multiaddr(addr)?; 32 | self.client.lookup(peer_id, Some(addr.clone())).await 33 | } 34 | } 35 | .map_err(|e| map_service_error("p2p", e)) 36 | } 37 | 38 | pub async fn connect(&self, addr: &PeerIdOrAddr) -> Result<()> { 39 | match addr { 40 | PeerIdOrAddr::PeerId(peer_id) => self.client.connect(*peer_id, vec![]).await, 41 | PeerIdOrAddr::Multiaddr(addr) => { 42 | let peer_id = peer_id_from_multiaddr(addr)?; 43 | self.client.connect(peer_id, vec![addr.clone()]).await 44 | } 45 | } 46 | .map_err(|e| map_service_error("p2p", e)) 47 | } 48 | 49 | pub async fn peers(&self) -> Result>> { 50 | self.client 51 | .get_peers() 52 | .await 53 | .map_err(|e| map_service_error("p2p", e)) 54 | } 55 | } 56 | 57 | fn peer_id_from_multiaddr(addr: &Multiaddr) -> Result { 58 | match addr.iter().find(|p| matches!(*p, Protocol::P2p(_))) { 59 | Some(Protocol::P2p(peer_id)) => { 60 | PeerId::from_multihash(peer_id).map_err(|m| anyhow::anyhow!("Multiaddress contains invalid p2p multihash {:?}. Cannot derive a PeerId from this address.", m )) 61 | } 62 | , 63 | _ => anyhow::bail!("Mulitaddress must include the peer id"), 64 | } 65 | } 66 | -------------------------------------------------------------------------------- /iroh-bitswap/src/client/session/peer_response_tracker.rs: -------------------------------------------------------------------------------- 1 | use std::sync::Arc; 2 | 3 | use ahash::AHashMap; 4 | use libp2p::PeerId; 5 | use rand::{thread_rng, Rng}; 6 | use tokio::sync::RwLock; 7 | 8 | /// Keeps track of how many times each peer was the first to send us a block for a 9 | /// given cid (used to rank peers) 10 | #[derive(Default, Debug, Clone)] 11 | pub struct PeerResponseTracker { 12 | first_responder: Arc>>, 13 | } 14 | 15 | impl PeerResponseTracker { 16 | /// Called when a block is received from a peer (only called first time block is received) 17 | pub async fn received_block_from(&self, from: &PeerId) { 18 | *self.first_responder.write().await.entry(*from).or_default() += 1; 19 | } 20 | 21 | /// Picks a peer from the list of candidate peers, favouring those peers 22 | /// that were first to send us previous blocks. 23 | pub async fn choose(&self, peers: &[PeerId]) -> Option { 24 | if peers.is_empty() { 25 | return None; 26 | } 27 | 28 | let rnd: f64 = thread_rng().gen(); 29 | 30 | // Find the total received blocks for all candidate peers 31 | let mut total = 0.; 32 | for peer in peers { 33 | total += self.get_peer_count(peer).await as f64; 34 | } 35 | 36 | // Choose one of the peers with a chance proportional to the number 37 | // of blocks received from that peer 38 | let mut counted = 0.0; 39 | for peer in peers { 40 | counted += self.get_peer_count(peer).await as f64 / total; 41 | if counted > rnd { 42 | return Some(*peer); 43 | } 44 | } 45 | 46 | // We shouldn't get here unless there is some weirdness with floating point 47 | // math that doesn't quite cover the whole range of peers in the for loop 48 | // so just choose the last peer. 49 | peers.iter().last().copied() 50 | } 51 | 52 | /// Returns the number of times the peer was first to send us a block. 53 | pub async fn get_peer_count(&self, peer: &PeerId) -> usize { 54 | // Make sure there is always at least a small chance a new peer 55 | // will be chosen 56 | self.first_responder 57 | .read() 58 | .await 59 | .get(peer) 60 | .copied() 61 | .unwrap_or(1) 62 | } 63 | } 64 | -------------------------------------------------------------------------------- /iroh-gateway/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "iroh-gateway" 3 | readme = "README.md" 4 | description = "IPFS gateway" 5 | version.workspace = true 6 | edition.workspace = true 7 | license.workspace = true 8 | repository.workspace = true 9 | rust-version.workspace = true 10 | 11 | [dependencies] 12 | anyhow.workspace = true 13 | async-recursion.workspace = true 14 | async-trait.workspace = true 15 | async-stream.workspace = true 16 | axum.workspace = true 17 | bytes.workspace = true 18 | cid.workspace = true 19 | clap = { workspace = true, features = ["derive"] } 20 | config.workspace = true 21 | futures.workspace = true 22 | git-version.workspace = true 23 | handlebars.workspace = true 24 | headers.workspace = true 25 | hex-literal.workspace = true 26 | hex.workspace = true 27 | http-body.workspace = true 28 | http-serde.workspace = true 29 | http.workspace = true 30 | hyper.workspace = true 31 | iroh-car.workspace = true 32 | iroh-metrics = { workspace = true, features = ["gateway"] } 33 | iroh-resolver.workspace = true 34 | iroh-rpc-client.workspace = true 35 | iroh-rpc-types.workspace = true 36 | iroh-unixfs.workspace = true 37 | iroh-util.workspace = true 38 | libp2p.workspace = true 39 | mime.workspace = true 40 | mime_classifier.workspace = true 41 | mime_guess.workspace = true 42 | names.workspace = true 43 | once_cell.workspace = true 44 | opentelemetry = { workspace = true, features = ["rt-tokio"] } 45 | phf = { workspace = true, features = ["macros"] } 46 | rand.workspace = true 47 | reqwest = { workspace = true, features = ["rustls-tls"] } 48 | serde = { workspace = true, features = ["derive"] } 49 | serde_json.workspace = true 50 | serde_qs.workspace = true 51 | sha2.workspace = true 52 | time.workspace = true 53 | tokio = { workspace = true, features = ["macros", "rt-multi-thread", "process", "fs", "io-util"] } 54 | tokio-util = { workspace = true, features = ["io"] } 55 | toml.workspace = true 56 | tower = { workspace = true, features = ["util", "timeout", "load-shed", "limit"] } 57 | tower-http = { workspace = true, features = ["trace", "compression-full", "cors"] } 58 | tower-layer.workspace = true 59 | tracing-opentelemetry.workspace = true 60 | tracing-subscriber = { workspace = true, features = ["env-filter"] } 61 | tracing.workspace = true 62 | url.workspace = true 63 | urlencoding.workspace = true 64 | testdir.workspace = true 65 | 66 | [dev-dependencies] 67 | iroh-store.workspace = true 68 | tempfile.workspace = true 69 | -------------------------------------------------------------------------------- /iroh-store/src/main.rs: -------------------------------------------------------------------------------- 1 | use anyhow::anyhow; 2 | use clap::Parser; 3 | use iroh_store::{ 4 | cli::Args, 5 | config::{config_data_path, Config, ServerConfig, CONFIG_FILE_NAME, ENV_PREFIX}, 6 | metrics, rpc, Store, 7 | }; 8 | use iroh_util::lock::ProgramLock; 9 | use iroh_util::{block_until_sigint, iroh_config_path, make_config}; 10 | use tracing::info; 11 | 12 | #[tokio::main(flavor = "multi_thread")] 13 | async fn main() -> anyhow::Result<()> { 14 | let mut lock = ProgramLock::new("iroh-store")?; 15 | lock.acquire_or_exit(); 16 | 17 | let args = Args::parse(); 18 | 19 | let version = env!("CARGO_PKG_VERSION"); 20 | println!("Starting iroh-store, version {version}"); 21 | 22 | let config_path = iroh_config_path(CONFIG_FILE_NAME)?; 23 | let sources = &[Some(config_path.as_path()), args.cfg.as_deref()]; 24 | let config_data_path = config_data_path(args.path.clone())?; 25 | let config = make_config( 26 | // default 27 | ServerConfig::new(config_data_path), 28 | // potential config files 29 | sources, 30 | // env var prefix for this config 31 | ENV_PREFIX, 32 | // map of present command line arguments 33 | args.make_overrides_map(), 34 | ) 35 | .unwrap(); 36 | let metrics_config = config.metrics.clone(); 37 | 38 | let metrics_handle = iroh_metrics::MetricsHandle::new( 39 | metrics::metrics_config_with_compile_time_info(metrics_config), 40 | ) 41 | .await 42 | .expect("failed to initialize metrics"); 43 | 44 | #[cfg(unix)] 45 | { 46 | match iroh_util::increase_fd_limit() { 47 | Ok(soft) => tracing::debug!("NOFILE limit: soft = {}", soft), 48 | Err(err) => tracing::error!("Error increasing NOFILE limit: {}", err), 49 | } 50 | } 51 | 52 | let config = Config::from(config); 53 | let rpc_addr = config 54 | .rpc_addr() 55 | .ok_or_else(|| anyhow!("missing store rpc addr"))?; 56 | let store = if config.path.exists() { 57 | info!("Opening store at {}", config.path.display()); 58 | Store::open(config).await? 59 | } else { 60 | info!("Creating store at {}", config.path.display()); 61 | Store::create(config).await? 62 | }; 63 | 64 | let rpc_task = tokio::spawn(async move { rpc::new(rpc_addr, store).await.unwrap() }); 65 | 66 | block_until_sigint().await; 67 | rpc_task.abort(); 68 | metrics_handle.shutdown(); 69 | 70 | Ok(()) 71 | } 72 | -------------------------------------------------------------------------------- /iroh-bitswap/src/server/blockstore_manager.rs: -------------------------------------------------------------------------------- 1 | use ahash::AHashMap; 2 | use anyhow::Result; 3 | use cid::Cid; 4 | use tokio::sync::mpsc; 5 | 6 | use crate::{block::Block, Store}; 7 | 8 | /// Maintains a pool of workers that make requests to the blockstore. 9 | #[derive(Debug)] 10 | pub struct BlockstoreManager { 11 | store: S, 12 | // pending_gauge -> iroh-metrics 13 | // active_gauge -> iroh-metrics 14 | } 15 | 16 | impl BlockstoreManager { 17 | /// Creates a new manager. 18 | pub async fn new(store: S, _worker_count: usize) -> Self { 19 | BlockstoreManager { store } 20 | } 21 | 22 | pub async fn stop(self) -> Result<()> { 23 | Ok(()) 24 | } 25 | 26 | pub async fn get_block_sizes(&self, keys: &[Cid]) -> Result> { 27 | let mut res = AHashMap::new(); 28 | if keys.is_empty() { 29 | return Ok(res); 30 | } 31 | let (s, mut r) = mpsc::channel(1); 32 | 33 | let store = self.store.clone(); 34 | let keys = keys.to_vec(); 35 | tokio::task::spawn(async move { 36 | for cid in keys { 37 | if let Ok(size) = store.get_size(&cid).await { 38 | s.send(Some((cid, size))).await.ok(); 39 | } else { 40 | s.send(None).await.ok(); 41 | } 42 | } 43 | }); 44 | 45 | while let Some(r) = r.recv().await { 46 | if let Some((cid, block)) = r { 47 | res.insert(cid, block); 48 | } 49 | } 50 | 51 | Ok(res) 52 | } 53 | 54 | pub async fn get_blocks(&self, keys: &[Cid]) -> Result> { 55 | let mut res = AHashMap::new(); 56 | if keys.is_empty() { 57 | return Ok(res); 58 | } 59 | let (s, mut r) = mpsc::channel(1); 60 | 61 | let store = self.store.clone(); 62 | let keys = keys.to_vec(); 63 | tokio::task::spawn(async move { 64 | for cid in keys { 65 | if let Ok(block) = store.get(&cid).await { 66 | s.send(Some((cid, block))).await.ok(); 67 | } else { 68 | s.send(None).await.ok(); 69 | } 70 | } 71 | }); 72 | 73 | while let Some(r) = r.recv().await { 74 | if let Some((cid, block)) = r { 75 | res.insert(cid, block); 76 | } 77 | } 78 | 79 | Ok(res) 80 | } 81 | } 82 | -------------------------------------------------------------------------------- /iroh-p2p/src/behaviour/event.rs: -------------------------------------------------------------------------------- 1 | use iroh_bitswap::BitswapEvent; 2 | use libp2p::{ 3 | autonat, dcutr, gossipsub::GossipsubEvent, identify::Event as IdentifyEvent, 4 | kad::KademliaEvent, mdns::Event as MdnsEvent, ping::Event as PingEvent, relay, 5 | }; 6 | 7 | use super::peer_manager::PeerManagerEvent; 8 | 9 | /// Event type which is emitted from the [`NodeBehaviour`]. 10 | /// 11 | /// [`NodeBehaviour`]: crate::behaviour::NodeBehaviour 12 | #[derive(Debug)] 13 | pub enum Event { 14 | Ping(PingEvent), 15 | Identify(Box), 16 | Kademlia(KademliaEvent), 17 | Mdns(MdnsEvent), 18 | Bitswap(BitswapEvent), 19 | Autonat(autonat::Event), 20 | Relay(relay::v2::relay::Event), 21 | RelayClient(relay::v2::client::Event), 22 | Dcutr(dcutr::behaviour::Event), 23 | Gossipsub(GossipsubEvent), 24 | PeerManager(PeerManagerEvent), 25 | } 26 | 27 | impl From for Event { 28 | fn from(event: PingEvent) -> Self { 29 | Event::Ping(event) 30 | } 31 | } 32 | 33 | impl From for Event { 34 | fn from(event: IdentifyEvent) -> Self { 35 | Event::Identify(Box::new(event)) 36 | } 37 | } 38 | 39 | impl From for Event { 40 | fn from(event: KademliaEvent) -> Self { 41 | Event::Kademlia(event) 42 | } 43 | } 44 | 45 | impl From for Event { 46 | fn from(event: MdnsEvent) -> Self { 47 | Event::Mdns(event) 48 | } 49 | } 50 | 51 | impl From for Event { 52 | fn from(event: BitswapEvent) -> Self { 53 | Event::Bitswap(event) 54 | } 55 | } 56 | 57 | impl From for Event { 58 | fn from(event: GossipsubEvent) -> Self { 59 | Event::Gossipsub(event) 60 | } 61 | } 62 | 63 | impl From for Event { 64 | fn from(event: autonat::Event) -> Self { 65 | Event::Autonat(event) 66 | } 67 | } 68 | 69 | impl From for Event { 70 | fn from(event: relay::v2::relay::Event) -> Self { 71 | Event::Relay(event) 72 | } 73 | } 74 | 75 | impl From for Event { 76 | fn from(event: relay::v2::client::Event) -> Self { 77 | Event::RelayClient(event) 78 | } 79 | } 80 | 81 | impl From for Event { 82 | fn from(event: dcutr::behaviour::Event) -> Self { 83 | Event::Dcutr(event) 84 | } 85 | } 86 | 87 | impl From for Event { 88 | fn from(event: PeerManagerEvent) -> Self { 89 | Event::PeerManager(event) 90 | } 91 | } 92 | -------------------------------------------------------------------------------- /iroh-one/src/uds.rs: -------------------------------------------------------------------------------- 1 | /// HTTP over UDS support 2 | /// From https://github.com/tokio-rs/axum/blob/1fe45583626a4c9c890cc01131d38c57f8728686/examples/unix-domain-socket/src/main.rs 3 | use axum::extract::connect_info; 4 | use axum::{Router, Server}; 5 | use futures::ready; 6 | use hyper::server::accept::Accept; 7 | use iroh_gateway::{core::State, handlers::get_app_routes}; 8 | use iroh_unixfs::content_loader::ContentLoader; 9 | use std::path::PathBuf; 10 | use std::{ 11 | pin::Pin, 12 | sync::Arc, 13 | task::{Context, Poll}, 14 | }; 15 | use tokio::net::{unix::UCred, UnixListener, UnixStream}; 16 | 17 | #[derive(Debug)] 18 | pub struct ServerAccept { 19 | pub uds: UnixListener, 20 | } 21 | 22 | impl Accept for ServerAccept { 23 | type Conn = UnixStream; 24 | type Error = Box; 25 | 26 | fn poll_accept( 27 | self: Pin<&mut Self>, 28 | cx: &mut Context<'_>, 29 | ) -> Poll>> { 30 | let (stream, _addr) = ready!(self.uds.poll_accept(cx))?; 31 | Poll::Ready(Some(Ok(stream))) 32 | } 33 | } 34 | 35 | #[derive(Clone, Debug)] 36 | #[allow(dead_code)] 37 | pub struct UdsConnectInfo { 38 | peer_addr: Arc, 39 | peer_cred: UCred, 40 | } 41 | 42 | impl connect_info::Connected<&UnixStream> for UdsConnectInfo { 43 | fn connect_info(target: &UnixStream) -> Self { 44 | let peer_addr = target.peer_addr().unwrap(); 45 | let peer_cred = target.peer_cred().unwrap(); 46 | 47 | Self { 48 | peer_addr: Arc::new(peer_addr), 49 | peer_cred, 50 | } 51 | } 52 | } 53 | 54 | pub fn uds_server( 55 | state: Arc>, 56 | path: PathBuf, 57 | ) -> Option< 58 | Server< 59 | ServerAccept, 60 | axum::extract::connect_info::IntoMakeServiceWithConnectInfo, 61 | >, 62 | > { 63 | let _ = std::fs::remove_file(&path); 64 | match UnixListener::bind(&path) { 65 | Ok(uds) => { 66 | tracing::debug!("Binding to UDS at {}", path.display()); 67 | let app = get_app_routes(&state); 68 | Some( 69 | Server::builder(ServerAccept { uds }) 70 | .serve(app.into_make_service_with_connect_info::()), 71 | ) 72 | } 73 | Err(err) => { 74 | tracing::error!( 75 | "Failed to bind http uds socket at {}: {}", 76 | path.display(), 77 | err 78 | ); 79 | None 80 | } 81 | } 82 | } 83 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # iroh 2 | 3 | [![crates.io](https://img.shields.io/crates/v/iroh.svg?style=flat-square)](https://crates.io/crates/iroh) 4 | [![Released API docs](https://img.shields.io/docsrs/iroh?style=flat-square)](https://docs.rs/iroh) 5 | [![MIT/Apache-2.0 licensed](https://img.shields.io/crates/l/iroh?style=flat-square)](./LICENSE-MIT) 6 | [![CI](https://img.shields.io/github/workflow/status/n0-computer/iroh/Continuous%20integration?style=flat-square)](https://github.com/n0-computer/iroh/actions?query=workflow%3A%22Continuous+integration%22) 7 | 8 | Iroh is a next-generation implementation of the Interplanetary File System ([IPFS](https://ipfs.io)) for Cloud & Mobile platforms. 9 | 10 | IPFS is a networking protocol for exchanging _content-addressed_ blocks of immutable data. “Content-addressed” means referring to data by the *hash of its content*, which makes the reference unique and verifiable. These two properties make it possible to get data from *any* node in the network that speaks the IPFS protocol, including IPFS content being served by other implementations of IPFS. 11 | 12 | This repo is a common core for three distributions of iroh: 13 | 14 | - **Iroh Cloud:** core features of iroh split into configurable microservices, optimized for running at datacenter scale. 15 | - **Iroh One:** A select set of iroh cloud features packaged as a single binary for simplified deployment. 16 | - **Iroh Mobile:** iOS & Android libraries that bring efficient data distribution to mobile apps. 17 | 18 | Here is an [install guide](https://iroh.computer/install). 19 | 20 | ## Working on Iroh 21 | Check out the [CONTRIBUTOR docs](./CONTRIBUTOR.md) to get familiar with ways you can contribute to the Iroh project. The [DEVELOPERS docs](./DEVELOPERS.md) will help you get starting with building and developing Iroh. 22 | 23 | ## Benchmarks 24 | 25 | A full suite of automated benchmarks is in the works. [this talk](https://www.youtube.com/watch?v=qPBR2K2X6cs&t=161s) goes into some early numbers. 26 | 27 | ## Who's behind this? 28 | 29 | [Iroh](https://iroh.computer) is built & maintained by [number 0](https://n0.computer). We're a founder-backed startup hell-bent on building efficient distributed systems software. 30 | 31 | ## License 32 | 33 | 34 | Licensed under either of Apache License, Version 35 | 2.0 or MIT license at your option. 36 | 37 | 38 |
39 | 40 | 41 | Unless you explicitly state otherwise, any contribution intentionally submitted 42 | for inclusion in this crate by you, as defined in the Apache-2.0 license, shall 43 | be dual licensed as above, without any additional terms or conditions. 44 | 45 | -------------------------------------------------------------------------------- /iroh-bitswap/src/client/message_queue/wantlist.rs: -------------------------------------------------------------------------------- 1 | use std::time::Instant; 2 | 3 | use ahash::{AHashMap, AHashSet}; 4 | use cid::Cid; 5 | 6 | use crate::{ 7 | client::wantlist::Wantlist, 8 | message::{Priority, WantType}, 9 | }; 10 | 11 | #[derive(Debug, Clone)] 12 | pub struct Wants { 13 | pub bcst_wants: RecallWantlist, 14 | pub peer_wants: RecallWantlist, 15 | pub cancels: AHashSet, 16 | pub priority: i32, 17 | } 18 | 19 | impl Wants { 20 | /// Wether there is work to be processed. 21 | pub fn has_pending_work(&self) -> bool { 22 | self.pending_work_count() > 0 23 | } 24 | 25 | /// The amount of work that is waiting to be processed. 26 | pub fn pending_work_count(&self) -> usize { 27 | self.bcst_wants.pending.len() + self.peer_wants.pending.len() + self.cancels.len() 28 | } 29 | } 30 | 31 | #[derive(Debug, Default, Clone)] 32 | pub struct RecallWantlist { 33 | /// List of wants that have not yet been sent. 34 | pub pending: Wantlist, 35 | /// The list of wants that have been sent. 36 | pub sent: Wantlist, 37 | /// The time at which each want was sent. 38 | pub sent_at: AHashMap, 39 | } 40 | 41 | impl RecallWantlist { 42 | /// Adds a want to the pending list. 43 | pub fn add(&mut self, cid: Cid, priority: Priority, want_type: WantType) { 44 | self.pending.add(cid, priority, want_type); 45 | } 46 | 47 | /// Removes wants from both pending and sent list. 48 | pub fn remove(&mut self, cid: &Cid) { 49 | self.pending.remove(cid); 50 | self.sent.remove(cid); 51 | self.sent_at.remove(cid); 52 | } 53 | 54 | /// Removes wants from both pending and sent list, by type. 55 | pub fn remove_type(&mut self, cid: &Cid, want_type: WantType) { 56 | self.pending.remove_type(cid, want_type); 57 | if self.sent.remove_type(cid, want_type).is_some() { 58 | self.sent_at.remove(cid); 59 | } 60 | } 61 | 62 | /// Moves the want from pending to sent. 63 | /// 64 | /// Returns true if the want was marked as sent, false if the want wasn't 65 | /// pending to begin with. 66 | pub fn mark_sent(&mut self, e: &crate::client::wantlist::Entry) -> bool { 67 | if self.pending.remove_type(&e.cid, e.want_type).is_none() { 68 | return false; 69 | } 70 | self.sent.add(e.cid, e.priority, e.want_type); 71 | true 72 | } 73 | 74 | /// Clears out the recorded sent time. 75 | pub fn clear_sent_at(&mut self, cid: &Cid) { 76 | self.sent_at.remove(cid); 77 | } 78 | 79 | pub fn sent_at(&mut self, cid: Cid, at: Instant) { 80 | if !self.sent.contains(&cid) { 81 | self.sent_at.insert(cid, at); 82 | } 83 | } 84 | } 85 | -------------------------------------------------------------------------------- /iroh-bitswap/src/peer_task_queue/peer_task.rs: -------------------------------------------------------------------------------- 1 | use std::time::Instant; 2 | 3 | use libp2p::PeerId; 4 | 5 | use super::{Data, Topic}; 6 | 7 | /// A single task to be executed. 8 | #[derive(Debug, Clone, PartialEq, Eq)] 9 | pub struct Task { 10 | /// The topic of the task. 11 | pub topic: T, 12 | /// The priority of the task 13 | pub priority: isize, 14 | /// The size of the task 15 | /// - peers with most active work are deprioritzed 16 | /// - peers with most pending work are prioritized 17 | pub work: usize, 18 | /// Associated data. 19 | pub data: D, 20 | } 21 | 22 | /// Contains a Task, and also some bookkeeping information. 23 | /// It is used internally by the PeerTracker to keep track of tasks. 24 | #[derive(Debug, Clone, PartialEq, Eq)] 25 | pub struct QueueTask { 26 | pub task: Task, 27 | pub target: PeerId, 28 | /// Marks the time that the task was added to the queue. 29 | pub created: Instant, 30 | } 31 | 32 | impl PartialOrd for QueueTask { 33 | fn partial_cmp(&self, other: &Self) -> Option { 34 | Some(self.cmp(other)) 35 | } 36 | } 37 | impl Ord for QueueTask { 38 | fn cmp(&self, other: &Self) -> std::cmp::Ordering { 39 | if self.target == other.target && self.task.priority != other.task.priority { 40 | return self.task.priority.cmp(&other.task.priority); 41 | } 42 | 43 | // FIFO 44 | other.created.cmp(&self.created) 45 | } 46 | } 47 | 48 | impl QueueTask { 49 | pub fn new(task: Task, target: PeerId, created: Instant) -> Self { 50 | QueueTask { 51 | task, 52 | target, 53 | created, 54 | } 55 | } 56 | } 57 | 58 | /// Trait that is used to merge new tasks into the active and pending queues. 59 | pub trait TaskMerger: 60 | PartialEq + Eq + Clone + std::fmt::Debug + Send + Sync + 'static 61 | { 62 | /// Indicates whether the given task has more information than 63 | /// the existing group of tasks (which have the same Topic), and thus should be merged. 64 | fn has_new_info(&self, task_info: &Task, existing_tasks: &[Task]) -> bool; 65 | /// Copies relevant fields from a new task to an existing task. 66 | fn merge(&self, task: &Task, existing: &mut Task); 67 | } 68 | 69 | #[derive(Default, Debug, Clone, PartialEq, Eq)] 70 | pub struct DefaultTaskMerger {} 71 | impl TaskMerger for DefaultTaskMerger { 72 | fn has_new_info(&self, _task_info: &Task, _existing_tasks: &[Task]) -> bool { 73 | false 74 | } 75 | fn merge(&self, _task: &Task, _exising: &mut Task) {} 76 | } 77 | -------------------------------------------------------------------------------- /iroh-car/tests/car_file_test.rs: -------------------------------------------------------------------------------- 1 | use futures::TryStreamExt; 2 | use iroh_car::*; 3 | use tokio::fs::{self, File}; 4 | use tokio::io::BufReader; 5 | 6 | #[tokio::test] 7 | async fn roundtrip_carv1_test_file() { 8 | let file = File::open("tests/testv1.car").await.unwrap(); 9 | let buf_reader = BufReader::new(file); 10 | 11 | let car_reader = CarReader::new(buf_reader).await.unwrap(); 12 | let header = car_reader.header().clone(); 13 | let files: Vec<_> = car_reader.stream().try_collect().await.unwrap(); 14 | assert_eq!(files.len(), 35); 15 | 16 | let mut buffer = Vec::new(); 17 | let mut writer = CarWriter::new(header, &mut buffer); 18 | for (cid, data) in &files { 19 | writer.write(*cid, data).await.unwrap(); 20 | } 21 | writer.finish().await.unwrap(); 22 | 23 | let file = fs::read("tests/testv1.car").await.unwrap(); 24 | assert_eq!(file, buffer); 25 | } 26 | 27 | #[tokio::test] 28 | async fn roundtrip_carv1_basic_fixtures_file() { 29 | let file = File::open("tests/carv1_basic.car").await.unwrap(); 30 | let buf_reader = BufReader::new(file); 31 | 32 | let car_reader = CarReader::new(buf_reader).await.unwrap(); 33 | let header = car_reader.header().clone(); 34 | 35 | assert_eq!( 36 | car_reader.header().roots(), 37 | [ 38 | "bafyreihyrpefhacm6kkp4ql6j6udakdit7g3dmkzfriqfykhjw6cad5lrm" 39 | .parse() 40 | .unwrap(), 41 | "bafyreidj5idub6mapiupjwjsyyxhyhedxycv4vihfsicm2vt46o7morwlm" 42 | .parse() 43 | .unwrap() 44 | ] 45 | ); 46 | 47 | let files: Vec<_> = car_reader.stream().try_collect().await.unwrap(); 48 | assert_eq!(files.len(), 8); 49 | 50 | let cids = [ 51 | "bafyreihyrpefhacm6kkp4ql6j6udakdit7g3dmkzfriqfykhjw6cad5lrm", 52 | "QmNX6Tffavsya4xgBi2VJQnSuqy9GsxongxZZ9uZBqp16d", 53 | "bafkreifw7plhl6mofk6sfvhnfh64qmkq73oeqwl6sloru6rehaoujituke", 54 | "QmWXZxVQ9yZfhQxLD35eDR8LiMRsYtHxYqTFCBbJoiJVys", 55 | "bafkreiebzrnroamgos2adnbpgw5apo3z4iishhbdx77gldnbk57d4zdio4", 56 | "QmdwjhxpxzcMsR3qUuj7vUL8pbA7MgR3GAxWi2GLHjsKCT", 57 | "bafkreidbxzk2ryxwwtqxem4l3xyyjvw35yu4tcct4cqeqxwo47zhxgxqwq", 58 | "bafyreidj5idub6mapiupjwjsyyxhyhedxycv4vihfsicm2vt46o7morwlm", 59 | ]; 60 | 61 | for (expected_cid, (cid, _)) in cids.iter().zip(&files) { 62 | assert_eq!(*cid, expected_cid.parse().unwrap()); 63 | } 64 | 65 | let mut buffer = Vec::new(); 66 | let mut writer = CarWriter::new(header, &mut buffer); 67 | for (cid, data) in &files { 68 | writer.write(*cid, data).await.unwrap(); 69 | } 70 | writer.finish().await.unwrap(); 71 | 72 | let file = fs::read("tests/carv1_basic.car").await.unwrap(); 73 | assert_eq!(file, buffer); 74 | } 75 | -------------------------------------------------------------------------------- /iroh/src/config.rs: -------------------------------------------------------------------------------- 1 | use anyhow::Result; 2 | use config::{ConfigError, Map, Source, Value}; 3 | use iroh_util::insert_into_config_map; 4 | use serde::{Deserialize, Serialize}; 5 | 6 | /// CONFIG_FILE_NAME is the name of the optional config file located in the iroh home directory 7 | pub const CONFIG_FILE_NAME: &str = "cli.config.toml"; 8 | /// ENV_PREFIX should be used along side the config field name to set a config field using 9 | /// environment variables 10 | /// For example, `IROH_CLI_PATH=/path/to/config` would set the value of the `Config.path` field 11 | pub const ENV_PREFIX: &str = "IROH_CLI"; 12 | 13 | /// The configuration for the iroh cli. 14 | #[derive(PartialEq, Eq, Debug, Deserialize, Serialize, Clone)] 15 | pub struct Config { 16 | /// The set of services to start if no arguments are given to 'iroh start' 17 | pub start_default_services: Vec, 18 | } 19 | 20 | impl Config { 21 | pub fn new() -> Self { 22 | Self { 23 | start_default_services: vec![ 24 | "store".to_string(), 25 | "p2p".to_string(), 26 | "gateway".to_string(), 27 | ], 28 | } 29 | } 30 | } 31 | 32 | impl Source for Config { 33 | fn clone_into_box(&self) -> Box { 34 | Box::new(self.clone()) 35 | } 36 | fn collect(&self) -> Result, ConfigError> { 37 | let mut map: Map = Map::new(); 38 | insert_into_config_map( 39 | &mut map, 40 | "start_default_services", 41 | self.start_default_services.clone(), 42 | ); 43 | 44 | Ok(map) 45 | } 46 | } 47 | 48 | #[cfg(test)] 49 | mod tests { 50 | use super::*; 51 | 52 | #[test] 53 | fn test_collect() { 54 | let default = Config::new(); 55 | 56 | let mut expect: Map = Map::new(); 57 | expect.insert( 58 | "start_default_services".to_string(), 59 | Value::new( 60 | None, 61 | vec![ 62 | "store".to_string(), 63 | "p2p".to_string(), 64 | "gateway".to_string(), 65 | ], 66 | ), 67 | ); 68 | 69 | let got = default.collect().unwrap(); 70 | for key in got.keys() { 71 | let left = expect.get(key).unwrap(); 72 | let right = got.get(key).unwrap(); 73 | assert_eq!(left, right); 74 | } 75 | } 76 | 77 | #[test] 78 | fn test_build_config_from_struct() { 79 | let expect = Config::new(); 80 | let got: Config = config::Config::builder() 81 | .add_source(expect.clone()) 82 | .build() 83 | .unwrap() 84 | .try_deserialize() 85 | .unwrap(); 86 | 87 | assert_eq!(expect, got); 88 | } 89 | } 90 | -------------------------------------------------------------------------------- /iroh-bitswap/src/prefix.rs: -------------------------------------------------------------------------------- 1 | use std::convert::TryFrom; 2 | 3 | use cid::{self, Cid, Version}; 4 | use multihash::{Code, MultihashDigest}; 5 | use unsigned_varint::{decode as varint_decode, encode as varint_encode}; 6 | 7 | use crate::error::Error; 8 | 9 | /// Prefix represents all metadata of a CID, without the actual content. 10 | #[derive(PartialEq, Eq, Clone, Debug)] 11 | pub struct Prefix { 12 | /// The version of CID. 13 | pub version: Version, 14 | /// The codec of CID. 15 | pub codec: u64, 16 | /// The multihash type of CID. 17 | pub mh_type: Code, 18 | /// The multihash length of CID. 19 | pub mh_len: usize, 20 | } 21 | 22 | impl Prefix { 23 | /// Create a new prefix from encoded bytes. 24 | pub fn new(data: &[u8]) -> Result { 25 | let (raw_version, remain) = varint_decode::u64(data).map_err(Into::::into)?; 26 | let version = Version::try_from(raw_version)?; 27 | let (codec, remain) = varint_decode::u64(remain).map_err(Into::::into)?; 28 | let (mh_type, remain) = varint_decode::u64(remain).map_err(Into::::into)?; 29 | let (mh_len, _remain) = varint_decode::usize(remain).map_err(Into::::into)?; 30 | 31 | Ok(Prefix { 32 | version, 33 | codec, 34 | mh_type: Code::try_from(mh_type)?, 35 | mh_len, 36 | }) 37 | } 38 | 39 | /// Convert the prefix to encoded bytes. 40 | pub fn to_bytes(&self) -> Vec { 41 | let mut res = Vec::with_capacity(4); 42 | 43 | let mut buf = varint_encode::u64_buffer(); 44 | let version = varint_encode::u64(self.version.into(), &mut buf); 45 | res.extend_from_slice(version); 46 | let mut buf = varint_encode::u64_buffer(); 47 | let codec = varint_encode::u64(self.codec, &mut buf); 48 | res.extend_from_slice(codec); 49 | let mut buf = varint_encode::u64_buffer(); 50 | let mh_type = varint_encode::u64(self.mh_type.into(), &mut buf); 51 | res.extend_from_slice(mh_type); 52 | let mut buf = varint_encode::u64_buffer(); 53 | let mh_len = varint_encode::u64(self.mh_len as u64, &mut buf); 54 | res.extend_from_slice(mh_len); 55 | 56 | res 57 | } 58 | 59 | /// Create a CID out of the prefix and some data that will be hashed 60 | pub fn to_cid(&self, data: &[u8]) -> Result { 61 | let mh = self.mh_type.digest(data); 62 | Cid::new(self.version, self.codec, mh) 63 | } 64 | } 65 | 66 | impl From<&Cid> for Prefix { 67 | fn from(cid: &Cid) -> Self { 68 | Self { 69 | version: cid.version(), 70 | codec: cid.codec(), 71 | mh_type: Code::try_from(cid.hash().code()).expect("unknown codec"), 72 | mh_len: cid.hash().digest().len(), 73 | } 74 | } 75 | } 76 | -------------------------------------------------------------------------------- /iroh-car/src/header.rs: -------------------------------------------------------------------------------- 1 | use cid::Cid; 2 | use ipld::codec::Codec; 3 | use ipld_cbor::DagCborCodec; 4 | 5 | use crate::error::Error; 6 | 7 | /// A car header. 8 | #[derive(Debug, Clone, PartialEq, Eq)] 9 | #[non_exhaustive] 10 | pub enum CarHeader { 11 | V1(CarHeaderV1), 12 | } 13 | 14 | impl CarHeader { 15 | pub fn new_v1(roots: Vec) -> Self { 16 | Self::V1(roots.into()) 17 | } 18 | 19 | pub fn decode(buffer: &[u8]) -> Result { 20 | let header: CarHeaderV1 = DagCborCodec 21 | .decode(buffer) 22 | .map_err(|e| Error::Parsing(e.to_string()))?; 23 | 24 | if header.roots.is_empty() { 25 | return Err(Error::Parsing("empty CAR file".to_owned())); 26 | } 27 | 28 | if header.version != 1 { 29 | return Err(Error::InvalidFile( 30 | "Only CAR file version 1 is supported".to_string(), 31 | )); 32 | } 33 | 34 | Ok(CarHeader::V1(header)) 35 | } 36 | 37 | pub fn encode(&self) -> Result, Error> { 38 | match self { 39 | CarHeader::V1(ref header) => { 40 | let res = DagCborCodec.encode(header)?; 41 | Ok(res) 42 | } 43 | } 44 | } 45 | 46 | pub fn roots(&self) -> &[Cid] { 47 | match self { 48 | CarHeader::V1(header) => &header.roots, 49 | } 50 | } 51 | 52 | pub fn version(&self) -> u64 { 53 | match self { 54 | CarHeader::V1(_) => 1, 55 | } 56 | } 57 | } 58 | 59 | /// CAR file header version 1. 60 | #[derive(Debug, Clone, Default, ipld::DagCbor, PartialEq, Eq)] 61 | pub struct CarHeaderV1 { 62 | #[ipld] 63 | pub roots: Vec, 64 | #[ipld] 65 | pub version: u64, 66 | } 67 | 68 | impl CarHeaderV1 { 69 | /// Creates a new CAR file header 70 | pub fn new(roots: Vec, version: u64) -> Self { 71 | Self { roots, version } 72 | } 73 | } 74 | 75 | impl From> for CarHeaderV1 { 76 | fn from(roots: Vec) -> Self { 77 | Self { roots, version: 1 } 78 | } 79 | } 80 | 81 | #[cfg(test)] 82 | mod tests { 83 | use ipld::codec::{Decode, Encode}; 84 | use ipld_cbor::DagCborCodec; 85 | use multihash::MultihashDigest; 86 | 87 | use super::*; 88 | 89 | #[test] 90 | fn symmetric_header_v1() { 91 | let digest = multihash::Code::Blake2b256.digest(b"test"); 92 | let cid = Cid::new_v1(DagCborCodec.into(), digest); 93 | 94 | let header = CarHeaderV1::from(vec![cid]); 95 | 96 | let mut bytes = Vec::new(); 97 | header.encode(DagCborCodec, &mut bytes).unwrap(); 98 | 99 | assert_eq!( 100 | CarHeaderV1::decode(DagCborCodec, &mut std::io::Cursor::new(&bytes)).unwrap(), 101 | header 102 | ); 103 | } 104 | } 105 | -------------------------------------------------------------------------------- /examples/importer/src/main.rs: -------------------------------------------------------------------------------- 1 | use std::{path::PathBuf, time::Instant}; 2 | 3 | use anyhow::{bail, Result}; 4 | use bytes::Bytes; 5 | use clap::Parser; 6 | use futures::{stream::TryStreamExt, StreamExt}; 7 | use indicatif::{ProgressBar, ProgressStyle}; 8 | use iroh_car::CarReader; 9 | use iroh_rpc_client::{Client, Config as RpcClientConfig}; 10 | use par_stream::prelude::*; 11 | 12 | #[derive(Parser, Debug, Clone)] 13 | #[clap(author, version, about, long_about = None)] 14 | struct Args { 15 | #[clap(long)] 16 | path: PathBuf, 17 | #[clap(long)] 18 | limit: Option, 19 | } 20 | 21 | #[tokio::main(flavor = "multi_thread")] 22 | async fn main() -> Result<()> { 23 | let args = Args::parse(); 24 | 25 | println!("Importing from {:?} (limit: {:?})", args.path, args.limit); 26 | 27 | let rpc_config = RpcClientConfig::default(); 28 | let rpc = Client::new(rpc_config).await?; 29 | 30 | let car_file = tokio::fs::File::open(&args.path).await?; 31 | let total_size = car_file.metadata().await?.len(); 32 | 33 | let car_reader = CarReader::new(car_file).await?; 34 | let stream = if let Some(limit) = args.limit { 35 | car_reader.stream().take(limit).boxed() 36 | } else { 37 | car_reader.stream().boxed() 38 | }; 39 | 40 | let pb = ProgressBar::new(total_size); 41 | pb.set_style( 42 | ProgressStyle::default_bar() 43 | .template("{spinner:.green} [{elapsed_precise}] [{wide_bar:.cyan/blue}] {bytes}/{total_bytes} ({bytes_per_sec}, {eta})").unwrap() 44 | .progress_chars("#>-") 45 | ); 46 | 47 | let start = Instant::now(); 48 | let pb_clone = pb.clone(); 49 | 50 | let res: Vec<_> = stream 51 | .map_err(anyhow::Error::from) 52 | .try_par_map_unordered(None, move |(cid, data)| { 53 | move || { 54 | let data = Bytes::from(data); 55 | if iroh_util::verify_hash(&cid, &data) == Some(false) { 56 | bail!("invalid hash {:?}", cid); 57 | } 58 | let links = iroh_unixfs::parse_links(&cid, &data).unwrap_or_default(); 59 | Ok((cid, data, links)) 60 | } 61 | }) 62 | .try_par_then_unordered(None, move |(cid, data, links)| { 63 | let rpc = rpc.clone(); 64 | let pb = pb_clone.clone(); 65 | async move { 66 | let l = data.len(); 67 | rpc.try_store()?.put(cid, data, links).await?; 68 | pb.inc(l as _); 69 | Ok(l) 70 | } 71 | }) 72 | .try_collect() 73 | .await?; 74 | 75 | let count = res.len(); 76 | let bytes: usize = res.into_iter().sum(); 77 | pb.finish(); 78 | 79 | println!( 80 | "imported {} elements ({}) in {}s", 81 | count, 82 | bytesize::ByteSize::b(bytes as u64).to_string_as(true), 83 | start.elapsed().as_secs() 84 | ); 85 | 86 | Ok(()) 87 | } 88 | -------------------------------------------------------------------------------- /iroh-car/src/util.rs: -------------------------------------------------------------------------------- 1 | use cid::Cid; 2 | use integer_encoding::VarIntAsyncReader; 3 | use tokio::io::{AsyncRead, AsyncReadExt}; 4 | 5 | use super::error::Error; 6 | 7 | /// Maximum size that is used for single node. 8 | pub(crate) const MAX_ALLOC: usize = 4 * 1024 * 1024; 9 | 10 | pub(crate) async fn ld_read(mut reader: R, buf: &mut Vec) -> Result, Error> 11 | where 12 | R: AsyncRead + Send + Unpin, 13 | { 14 | let length: usize = match VarIntAsyncReader::read_varint_async(&mut reader).await { 15 | Ok(len) => len, 16 | Err(e) => { 17 | if e.kind() == std::io::ErrorKind::UnexpectedEof { 18 | return Ok(None); 19 | } 20 | return Err(Error::Parsing(e.to_string())); 21 | } 22 | }; 23 | 24 | if length > MAX_ALLOC { 25 | return Err(Error::LdReadTooLarge(length)); 26 | } 27 | if length > buf.len() { 28 | buf.resize(length, 0); 29 | } 30 | 31 | reader 32 | .read_exact(&mut buf[..length]) 33 | .await 34 | .map_err(|e| Error::Parsing(e.to_string()))?; 35 | 36 | Ok(Some(&buf[..length])) 37 | } 38 | 39 | pub(crate) async fn read_node( 40 | buf_reader: &mut R, 41 | buf: &mut Vec, 42 | ) -> Result)>, Error> 43 | where 44 | R: AsyncRead + Send + Unpin, 45 | { 46 | if let Some(buf) = ld_read(buf_reader, buf).await? { 47 | let mut cursor = std::io::Cursor::new(buf); 48 | let c = Cid::read_bytes(&mut cursor)?; 49 | let pos = cursor.position() as usize; 50 | 51 | return Ok(Some((c, buf[pos..].to_vec()))); 52 | } 53 | Ok(None) 54 | } 55 | 56 | #[cfg(test)] 57 | mod tests { 58 | use integer_encoding::VarIntAsyncWriter; 59 | use tokio::io::{AsyncWrite, AsyncWriteExt}; 60 | 61 | use super::*; 62 | 63 | async fn ld_write<'a, W>(writer: &mut W, bytes: &[u8]) -> Result<(), Error> 64 | where 65 | W: AsyncWrite + Send + Unpin, 66 | { 67 | writer.write_varint_async(bytes.len()).await?; 68 | writer.write_all(bytes).await?; 69 | writer.flush().await?; 70 | Ok(()) 71 | } 72 | 73 | #[tokio::test] 74 | async fn ld_read_write_good() { 75 | let mut buffer = Vec::::new(); 76 | ld_write(&mut buffer, b"test bytes").await.unwrap(); 77 | let reader = std::io::Cursor::new(buffer); 78 | 79 | let mut buffer = vec![1u8; 1024]; 80 | let read = ld_read(reader, &mut buffer).await.unwrap().unwrap(); 81 | assert_eq!(read, b"test bytes"); 82 | } 83 | 84 | #[tokio::test] 85 | async fn ld_read_write_fail() { 86 | let mut buffer = Vec::::new(); 87 | let size = MAX_ALLOC + 1; 88 | ld_write(&mut buffer, &vec![2u8; size]).await.unwrap(); 89 | let reader = std::io::Cursor::new(buffer); 90 | 91 | let mut buffer = vec![1u8; 1024]; 92 | let read = ld_read(reader, &mut buffer).await; 93 | assert!(matches!(read, Err(Error::LdReadTooLarge(_)))); 94 | } 95 | } 96 | -------------------------------------------------------------------------------- /iroh-p2p/src/main.rs: -------------------------------------------------------------------------------- 1 | use anyhow::{anyhow, Context, Result}; 2 | use clap::Parser; 3 | use iroh_p2p::config::{Config, CONFIG_FILE_NAME, ENV_PREFIX}; 4 | use iroh_p2p::ServerConfig; 5 | use iroh_p2p::{cli::Args, metrics, DiskStorage, Keychain, Node}; 6 | use iroh_util::lock::ProgramLock; 7 | use iroh_util::{iroh_config_path, make_config}; 8 | use tokio::task; 9 | use tracing::error; 10 | 11 | /// Starts daemon process 12 | fn main() -> Result<()> { 13 | let mut lock = ProgramLock::new("iroh-p2p")?; 14 | lock.acquire_or_exit(); 15 | 16 | let runtime = tokio::runtime::Builder::new_multi_thread() 17 | .max_blocking_threads(2048) 18 | .thread_stack_size(16 * 1024 * 1024) 19 | .enable_all() 20 | .build() 21 | .unwrap(); 22 | 23 | runtime.block_on(async move { 24 | let version = option_env!("IROH_VERSION").unwrap_or(env!("CARGO_PKG_VERSION")); 25 | println!("Starting iroh-p2p, version {version}"); 26 | 27 | let args = Args::parse(); 28 | 29 | // TODO: configurable network 30 | let cfg_path = iroh_config_path(CONFIG_FILE_NAME)?; 31 | let sources = [Some(cfg_path.as_path()), args.cfg.as_deref()]; 32 | let network_config = make_config( 33 | // default 34 | ServerConfig::default(), 35 | // potential config files 36 | &sources, 37 | // env var prefix for this config 38 | ENV_PREFIX, 39 | // map of present command line arguments 40 | args.make_overrides_map(), 41 | ) 42 | .context("invalid config")?; 43 | 44 | let metrics_config = 45 | metrics::metrics_config_with_compile_time_info(network_config.metrics.clone()); 46 | 47 | let metrics_handle = iroh_metrics::MetricsHandle::new(metrics_config) 48 | .await 49 | .map_err(|e| anyhow!("metrics init failed: {:?}", e))?; 50 | 51 | #[cfg(unix)] 52 | { 53 | match iroh_util::increase_fd_limit() { 54 | Ok(soft) => tracing::debug!("NOFILE limit: soft = {}", soft), 55 | Err(err) => error!("Error increasing NOFILE limit: {}", err), 56 | } 57 | } 58 | 59 | let network_config = Config::from(network_config); 60 | let kc = Keychain::::new(network_config.key_store_path.clone()).await?; 61 | let rpc_addr = network_config 62 | .rpc_addr() 63 | .ok_or_else(|| anyhow!("missing p2p rpc addr"))?; 64 | let mut p2p = Node::new(network_config, rpc_addr, kc).await?; 65 | 66 | // Start services 67 | let p2p_task = task::spawn(async move { 68 | if let Err(err) = p2p.run().await { 69 | error!("{:?}", err); 70 | } 71 | }); 72 | 73 | iroh_util::block_until_sigint().await; 74 | 75 | // Cancel all async services 76 | p2p_task.abort(); 77 | p2p_task.await.ok(); 78 | 79 | metrics_handle.shutdown(); 80 | Ok(()) 81 | }) 82 | } 83 | -------------------------------------------------------------------------------- /iroh-resolver/tests/unixfs.rs: -------------------------------------------------------------------------------- 1 | use std::{io::Read, path::PathBuf, time::Instant}; 2 | 3 | use anyhow::Result; 4 | use bytes::Bytes; 5 | use iroh_metrics::resolver::OutMetrics; 6 | use iroh_resolver::resolver::{read_to_vec, stream_to_resolver, Path}; 7 | use iroh_unixfs::{ 8 | self, 9 | builder::FileBuilder, 10 | chunker::{self, Chunker}, 11 | }; 12 | 13 | async fn read_fixture(path: impl AsRef) -> Result> { 14 | let path = path.as_ref().to_owned(); 15 | tokio::task::spawn_blocking(move || { 16 | let mut file = std::fs::File::open(path)?; 17 | let mut decompressed = Vec::new(); 18 | let mut decoder = ruzstd::streaming_decoder::StreamingDecoder::new(&mut file)?; 19 | decoder.read_to_end(&mut decompressed)?; 20 | 21 | Ok(decompressed) 22 | }) 23 | .await? 24 | } 25 | 26 | const FIXTURE_DIR: &str = "fixtures"; 27 | 28 | #[derive(Debug)] 29 | struct Param { 30 | degree: usize, 31 | chunker: Chunker, 32 | } 33 | 34 | #[tokio::test] 35 | #[ignore] 36 | async fn test_dagger_testdata() -> Result<()> { 37 | let sources = [ 38 | "uicro_1B.zst", 39 | "uicro_50B.zst", 40 | "zero_0B.zst", 41 | "repeat_0.04GiB_174.zst", 42 | "repeat_0.04GiB_174_1.zst", 43 | "repeat_0.04GiB_175.zst", 44 | "repeat_0.04GiB_175_1.zst", 45 | "large_repeat_1GiB.zst", 46 | "large_repeat_5GiB.zst", 47 | ]; 48 | 49 | let params = [ 50 | Param { 51 | degree: 174, 52 | chunker: Chunker::Fixed(chunker::Fixed::default()), 53 | }, 54 | Param { 55 | degree: 174, 56 | chunker: Chunker::Rabin(Box::default()), 57 | }, 58 | ]; 59 | 60 | for source in sources { 61 | for param in ¶ms { 62 | println!("== {source:?} =="); 63 | println!("Degree: {}", param.degree); 64 | println!("Chunker: {}", param.chunker); 65 | 66 | let source = PathBuf::from(FIXTURE_DIR).join(source); 67 | let data = read_fixture(&source).await?; 68 | let data = Bytes::from(data); 69 | 70 | let start = Instant::now(); 71 | 72 | let file = FileBuilder::new() 73 | .name(source.to_string_lossy().into_owned()) 74 | .chunker(param.chunker.clone()) 75 | .degree(param.degree) 76 | .content_bytes(data.clone()) 77 | .build() 78 | .await?; 79 | let stream = file.encode().await?; 80 | let (root, resolver) = stream_to_resolver(stream).await?; 81 | let out = resolver.resolve(Path::from_cid(root)).await?; 82 | let t = read_to_vec(out.pretty(resolver, OutMetrics::default(), None)?).await?; 83 | 84 | println!("Root: {root}"); 85 | println!("Len: {}", data.len()); 86 | println!("Elapsed: {}s", start.elapsed().as_secs_f32()); 87 | 88 | // Ensure the data roundtrips 89 | assert_eq!(t, data); 90 | } 91 | } 92 | 93 | Ok(()) 94 | } 95 | -------------------------------------------------------------------------------- /iroh-bitswap/src/server/task_merger.rs: -------------------------------------------------------------------------------- 1 | use cid::Cid; 2 | 3 | use crate::peer_task_queue::Task; 4 | 5 | /// Extra data associated with each task in the request queue. 6 | #[derive(Debug, Clone, PartialEq, Eq)] 7 | pub struct TaskData { 8 | /// Tasks can be either want-have or want-block. 9 | pub is_want_block: bool, 10 | /// Wether to immediately send a response if teh block is not found. 11 | pub send_dont_have: bool, 12 | /// The size of the block corresponding to the task. 13 | pub block_size: usize, 14 | /// Wether the block was found. 15 | pub have_block: bool, 16 | } 17 | 18 | #[derive(Default, Debug, Clone, PartialEq, Eq)] 19 | pub struct TaskMerger {} 20 | 21 | impl crate::peer_task_queue::TaskMerger for TaskMerger { 22 | fn has_new_info(&self, task: &Task, existing: &[Task]) -> bool { 23 | let mut have_size = false; 24 | let mut is_want_block = false; 25 | 26 | for entry in existing { 27 | if entry.data.have_block { 28 | have_size = true; 29 | } 30 | if entry.data.is_want_block { 31 | is_want_block = true; 32 | } 33 | } 34 | 35 | // If there is no active want-block and the new task is a want-block 36 | // the new task is better. 37 | let new_task_data = &task.data; 38 | if !is_want_block && new_task_data.is_want_block { 39 | return true; 40 | } 41 | 42 | // If there is no size information for the Cid and the new taks has size 43 | // information, the new task is better. 44 | if !have_size && new_task_data.have_block { 45 | return true; 46 | } 47 | 48 | false 49 | } 50 | 51 | fn merge(&self, task: &Task, existing: &mut Task) { 52 | let new_task = &task.data; 53 | let existing_task = &mut existing.data; 54 | 55 | // If we now have block size information, update the task with the new block size. 56 | if !existing_task.have_block && new_task.have_block { 57 | existing_task.have_block = new_task.have_block; 58 | existing_task.block_size = new_task.block_size; 59 | } 60 | 61 | // If replacing a want-ahve with a want-block 62 | if !existing_task.is_want_block && new_task.is_want_block { 63 | // Change the type form want-have to want-block. 64 | existing_task.is_want_block = true; 65 | // If the want-have was a DONT_HAVAE, or the want-block has a size 66 | if !existing_task.have_block || new_task.have_block { 67 | // Update the entry size 68 | existing_task.have_block = new_task.have_block; 69 | existing.work = task.work; 70 | } 71 | } 72 | 73 | // If the task is a want-block, make sure the entry size is equal to the block size 74 | // (because we will send the whole block) 75 | if existing_task.is_want_block && existing_task.have_block { 76 | existing.work = existing_task.block_size; 77 | } 78 | } 79 | } 80 | -------------------------------------------------------------------------------- /iroh-embed/src/store.rs: -------------------------------------------------------------------------------- 1 | //! Store services to use in an iroh system. 2 | 3 | use std::path::PathBuf; 4 | 5 | use anyhow::Result; 6 | use iroh_one::mem_store; 7 | use iroh_rpc_types::store::StoreAddr; 8 | use iroh_rpc_types::Addr; 9 | use iroh_store::Config as StoreConfig; 10 | use tokio::task::JoinHandle; 11 | 12 | /// A iroh store backed by an on-disk RocksDB. 13 | /// 14 | /// An iroh system needs a store service for keeping local state and IPFS data. This one 15 | /// uses RocksDB in a directory on disk. 16 | #[derive(Debug)] 17 | pub struct RocksStoreService { 18 | task: JoinHandle<()>, 19 | addr: StoreAddr, 20 | } 21 | 22 | impl RocksStoreService { 23 | /// Starts a new iroh Store service with RocksDB storage. 24 | /// 25 | /// This implicitly starts a task on the tokio runtime to manage the storage node. 26 | pub async fn new(path: PathBuf) -> Result { 27 | let addr = Addr::new_mem(); 28 | let config = StoreConfig::with_rpc_addr(path, addr.clone()); 29 | let task = mem_store::start(addr.clone(), config).await?; 30 | Ok(Self { task, addr }) 31 | } 32 | 33 | /// Returns the internal RPC address of this store node. 34 | /// 35 | /// This is used by the other iroh services, like the p2p and gateway services, to use 36 | /// the store. 37 | pub fn addr(&self) -> StoreAddr { 38 | self.addr.clone() 39 | } 40 | 41 | /// Stop this store service. 42 | /// 43 | /// This function waits for the store to be fully terminated and only returns once it is 44 | /// no longer running. 45 | // TODO: This should be graceful termination. 46 | pub async fn stop(mut self) -> Result<()> { 47 | // This dummy task will be aborted by Drop. 48 | let fut = futures::future::ready(()); 49 | let dummy_task = tokio::spawn(fut); 50 | let task = std::mem::replace(&mut self.task, dummy_task); 51 | 52 | task.abort(); 53 | 54 | // Because we currently don't do graceful termination we expect a cancelled error. 55 | match task.await { 56 | Ok(()) => Ok(()), 57 | Err(err) if err.is_cancelled() => Ok(()), 58 | Err(err) => Err(err.into()), 59 | } 60 | } 61 | } 62 | 63 | impl Drop for RocksStoreService { 64 | fn drop(&mut self) { 65 | // Abort the task without polling it. It may or may not ever be polled again and 66 | // actually abort. If .stop() has been called though the task is already shut down 67 | // gracefully and not polling it anymore has no significance. 68 | self.task.abort(); 69 | } 70 | } 71 | 72 | #[cfg(test)] 73 | mod tests { 74 | use std::time::Duration; 75 | 76 | use testdir::testdir; 77 | use tokio::time; 78 | 79 | use super::*; 80 | 81 | #[tokio::test] 82 | async fn test_create_store_stop() { 83 | let dir = testdir!(); 84 | let marker = dir.join("CURRENT"); 85 | 86 | let store = RocksStoreService::new(dir).await.unwrap(); 87 | assert!(marker.exists()); 88 | 89 | let fut = store.stop(); 90 | let ret = time::timeout(Duration::from_millis(500), fut).await; 91 | 92 | assert!(ret.is_ok()); 93 | } 94 | } 95 | -------------------------------------------------------------------------------- /iroh-gateway/src/text.rs: -------------------------------------------------------------------------------- 1 | pub struct IpfsSubdomain<'a> { 2 | pub cid_or_domain: &'a str, 3 | pub scheme: &'a str, 4 | pub hostname: &'a str, 5 | } 6 | 7 | impl<'a> IpfsSubdomain<'a> { 8 | pub(crate) fn try_from_str(value: &'a str) -> Option { 9 | let mut value = value.splitn(3, '.'); 10 | if let (Some(cid_or_domain), Some(schema), Some(hostname)) = 11 | (value.next(), value.next(), value.next()) 12 | { 13 | if schema == "ipns" || schema == "ipfs" { 14 | return Some(IpfsSubdomain { 15 | cid_or_domain, 16 | scheme: schema, 17 | hostname, 18 | }); 19 | } 20 | } 21 | None 22 | } 23 | } 24 | 25 | #[cfg(test)] 26 | mod tests { 27 | use super::*; 28 | 29 | #[test] 30 | fn response_format_try_from() { 31 | assert!(IpfsSubdomain::try_from_str("localhost:8080").is_none()); 32 | assert!(IpfsSubdomain::try_from_str("localhost").is_none()); 33 | assert!(IpfsSubdomain::try_from_str("ipfs.localhost").is_none()); 34 | assert!(IpfsSubdomain::try_from_str("bafy.ipfs.localhost").is_some()); 35 | assert!(IpfsSubdomain::try_from_str("ipfs-eth.ipns.localhost").is_some()); 36 | assert!(IpfsSubdomain::try_from_str("bafy.ipfs.localhost:8080").is_some()); 37 | assert!(IpfsSubdomain::try_from_str("bafy.ipnotfs.localhost").is_none()); 38 | assert!(IpfsSubdomain::try_from_str("bafy.ipnotfs.localhost").is_none()); 39 | 40 | let complex_case_1 = IpfsSubdomain::try_from_str("bafy.ipfs.bafy.ipfs.com").unwrap(); 41 | assert_eq!(complex_case_1.cid_or_domain, "bafy"); 42 | assert_eq!(complex_case_1.scheme, "ipfs"); 43 | assert_eq!(complex_case_1.hostname, "bafy.ipfs.com"); 44 | 45 | let complex_case_2 = IpfsSubdomain::try_from_str("bafy.ipfs.ipfs.com").unwrap(); 46 | assert_eq!(complex_case_2.cid_or_domain, "bafy"); 47 | assert_eq!(complex_case_2.scheme, "ipfs"); 48 | assert_eq!(complex_case_2.hostname, "ipfs.com"); 49 | 50 | let complex_case_3 = IpfsSubdomain::try_from_str("bafy.ipfs.ipns.com").unwrap(); 51 | assert_eq!(complex_case_3.cid_or_domain, "bafy"); 52 | assert_eq!(complex_case_3.scheme, "ipfs"); 53 | assert_eq!(complex_case_3.hostname, "ipns.com"); 54 | 55 | let complex_case_4 = IpfsSubdomain::try_from_str("bafy.ipns.ipfs.com").unwrap(); 56 | assert_eq!(complex_case_4.cid_or_domain, "bafy"); 57 | assert_eq!(complex_case_4.scheme, "ipns"); 58 | assert_eq!(complex_case_4.hostname, "ipfs.com"); 59 | 60 | let complex_case_5 = IpfsSubdomain::try_from_str("bafy.ipns.ipns.com").unwrap(); 61 | assert_eq!(complex_case_5.cid_or_domain, "bafy"); 62 | assert_eq!(complex_case_5.scheme, "ipns"); 63 | assert_eq!(complex_case_5.hostname, "ipns.com"); 64 | 65 | let complex_case_6 = IpfsSubdomain::try_from_str("bafy-mafy.ipfs.ipfs.com").unwrap(); 66 | assert_eq!(complex_case_6.cid_or_domain, "bafy-mafy"); 67 | assert_eq!(complex_case_6.scheme, "ipfs"); 68 | assert_eq!(complex_case_6.hostname, "ipfs.com"); 69 | } 70 | } 71 | -------------------------------------------------------------------------------- /iroh-gateway/README.md: -------------------------------------------------------------------------------- 1 | # iroh gateway 2 | 3 | [![crates.io](https://img.shields.io/crates/v/iroh-gateway.svg?style=flat-square)](https://crates.io/crates/iroh-gateway) 4 | [![Released API docs](https://img.shields.io/docsrs/iroh-gateway?style=flat-square)](https://docs.rs/iroh-gateway) 5 | [![MIT/Apache-2.0 licensed](https://img.shields.io/crates/l/iroh-gateway?style=flat-square)](../LICENSE-MIT) 6 | [![CI](https://img.shields.io/github/workflow/status/n0-computer/iroh/Continuous%20integration?style=flat-square)](https://github.com/n0-computer/iroh/actions?query=workflow%3A%22Continuous+integration%22) 7 | 8 | A Rust implementation of an [IPFS gateway](https://docs.ipfs.tech/concepts/ipfs-gateway/) based on 9 | [iroh](https://github.com/n0-computer/iroh). An IPFS gateway allows you to 10 | access content on the IPFS network over HTTP. 11 | 12 | ## Running / Building 13 | 14 | `cargo run -- -p 10000` 15 | 16 | ### Options 17 | 18 | - Run with `cargo run -- -h` for details 19 | - `-wcf` Writeable, Cache, Fetch (options to toggle write enable, caching mechanics and fetching from the network); currently exists but is not implemented 20 | - `-p` Port the gateway should listen on 21 | 22 | ## ENV Variables 23 | 24 | - `IROH_INSTANCE_ID` - unique instance identifier, preferably some name than hard id (default: generated lower & snake case name) 25 | - `IROH_ENV` - indicates the service environment (default: `dev`) 26 | 27 | ## Endpoints 28 | 29 | | Endpoint | Flag | Description | Default | 30 | |-----------------------------------|--------------------------------------------|-----------------------------------------------------------------------------------------|-------------| 31 | | `/ipfs/:cid` & `/ipfs/:cid/:path` | `?format={"", "fs", "raw", "car"}` | Specifies the serving format & content-type | `""/fs` | 32 | | | `?filename=DESIRED_FILE_NAME` | Specifies a filename for the attachment | `{cid}.bin` | 33 | | | `?download={true, false}` | Sets content-disposition to attachment, browser prompts to save file instead of loading | `false` | 34 | | | `?force_dir={true, false}` | Lists unixFS directories even if they contain an `index.html` file | `false` | 35 | | | `?uri=ENCODED_URL` | Query parameter to handle navigator.registerProtocolHandler Web API ie. ipfs:// | `""` | 36 | 37 | 38 | ## License 39 | 40 | 41 | Licensed under either of Apache License, Version 42 | 2.0 or MIT license at your option. 43 | 44 | 45 |
46 | 47 | 48 | Unless you explicitly state otherwise, any contribution intentionally submitted 49 | for inclusion in this crate by you, as defined in the Apache-2.0 license, shall 50 | be dual licensed as above, without any additional terms or conditions. 51 | 52 | -------------------------------------------------------------------------------- /iroh-metrics/src/bitswap.rs: -------------------------------------------------------------------------------- 1 | use std::fmt; 2 | 3 | use prometheus_client::{ 4 | metrics::{counter::Counter, gauge::Gauge}, 5 | registry::Registry, 6 | }; 7 | use tracing::error; 8 | 9 | use crate::{ 10 | core::{HistogramType, MRecorder, MetricType, MetricsRecorder}, 11 | Collector, 12 | }; 13 | 14 | make_metrics! { 15 | Bitswap, 16 | RequestsTotal: Counter: "Total number of requests received by bitswap", 17 | CanceledTotal: Counter: "Total number of requests canceled by bitswap", 18 | SentBlockBytes: Counter: "Number of bytes streamed", 19 | ReceivedBlockBytes: Counter: "Number of bytes received", 20 | MessageBytesOut: Counter: "", 21 | MessageBytesIn: Counter: "", 22 | BlocksIn: Counter: "", 23 | BlocksOut: Counter: "", 24 | ProvidersTotal: Counter: "Number of providers", 25 | AttemptedDials: Counter: "", 26 | Dials: Counter: "", 27 | KnownPeers: Counter: "", 28 | ForgottenPeers: Counter: "", 29 | WantedBlocks: Counter: "", 30 | WantedBlocksReceived: Counter: "", 31 | WantHaveBlocks: Counter: "", 32 | CancelBlocks: Counter: "", 33 | CancelWantBlocks: Counter: "", 34 | ConnectedPeers: Counter: "", 35 | ResponsivePeers: Counter: "", 36 | UnresponsivePeers: Counter: "", 37 | DisconnectedPeers: Counter: "", 38 | MessagesAttempted: Counter: "", 39 | MessagesSent: Counter: "", 40 | MessagesProcessingClient: Counter: "", 41 | MessagesProcessingServer: Counter: "", 42 | MessagesReceived: Counter: "", 43 | EventsBackpressureIn: Counter: "", 44 | EventsBackpressureOut: Counter: "", 45 | PollActionConnectedWants: Counter: "", 46 | PollActionConnected: Counter: "", 47 | PollActionNotConnected: Counter: "", 48 | ProtocolUnsupported: Counter: "", 49 | HandlerPollCount: Counter: "", 50 | HandlerPollEventCount: Counter: "", 51 | HandlerConnUpgradeErrors: Counter: "", 52 | InboundSubstreamsCreatedLimit: Counter: "", 53 | OutboundSubstreamsEvent: Counter: "", 54 | OutboundSubstreamsCreatedLimit: Counter: "", 55 | HandlerInboundLoopCount: Counter: "", 56 | HandlerOutboundLoopCount: Counter: "", 57 | SessionsCreated: Counter: "Number of sessions created", 58 | SessionsDestroyed: Counter: "Number of sessions destroyed", 59 | ProviderQueryCreated: Counter: "", 60 | ProviderQuerySuccess: Counter: "", 61 | ProviderQueryError: Counter: "", 62 | EngineActiveTasks: Gauge: "", 63 | EnginePendingTasks: Gauge: "", 64 | 65 | ClientLoopTick: Counter: "", 66 | ServerTaskLoopTick: Counter: "", 67 | ServerProviderTaskLoopTick: Counter: "", 68 | ServerKeyProviderTaskLoopTick: Counter: "", 69 | MessageQueueWorkerLoopTick: Counter: "", 70 | SessionLoopTick: Counter: "", 71 | SessionGetBlockLoopTick: Counter: "", 72 | FindMorePeersLoopTick: Counter: "", 73 | DontHaveTimeoutLoopTick: Counter: "", 74 | SessionWantSenderLoopTick: Counter: "", 75 | EngineLoopTick: Counter: "", 76 | ScoreLedgerLoopTick: Counter: "", 77 | PeerManagerLoopTick: Counter: "", 78 | 79 | MessageQueuesCreated: Counter: "", 80 | MessageQueuesDestroyed: Counter: "", 81 | MessageQueuesStopped: Counter: "", 82 | 83 | NetworkBehaviourActionPollTick: Counter: "", 84 | NetworkPollTick: Counter: "" 85 | } 86 | -------------------------------------------------------------------------------- /iroh-api/src/store.rs: -------------------------------------------------------------------------------- 1 | use std::{pin::Pin, sync::Arc}; 2 | 3 | use anyhow::Result; 4 | use async_stream::stream; 5 | use async_trait::async_trait; 6 | use bytes::Bytes; 7 | use cid::Cid; 8 | use futures::{Stream, StreamExt}; 9 | use iroh_rpc_client::Client; 10 | use iroh_unixfs::Block; 11 | 12 | /// How many chunks to buffer up when adding content. 13 | const _ADD_PAR: usize = 24; 14 | 15 | #[async_trait] 16 | pub trait Store: 'static + Send + Sync + Clone { 17 | async fn has(&self, &cid: Cid) -> Result; 18 | async fn put(&self, cid: Cid, blob: Bytes, links: Vec) -> Result<()>; 19 | async fn put_many(&self, blocks: Vec) -> Result<()>; 20 | } 21 | 22 | #[async_trait] 23 | impl Store for Client { 24 | async fn has(&self, cid: Cid) -> Result { 25 | self.try_store()?.has(cid).await 26 | } 27 | 28 | async fn put(&self, cid: Cid, blob: Bytes, links: Vec) -> Result<()> { 29 | self.try_store()?.put(cid, blob, links).await 30 | } 31 | 32 | async fn put_many(&self, blocks: Vec) -> Result<()> { 33 | self.try_store()? 34 | .put_many(blocks.into_iter().map(|x| x.into_parts()).collect()) 35 | .await 36 | } 37 | } 38 | 39 | #[async_trait] 40 | impl Store for Arc>> { 41 | async fn has(&self, cid: Cid) -> Result { 42 | Ok(self.lock().await.contains_key(&cid)) 43 | } 44 | async fn put(&self, cid: Cid, blob: Bytes, _links: Vec) -> Result<()> { 45 | self.lock().await.insert(cid, blob); 46 | Ok(()) 47 | } 48 | 49 | async fn put_many(&self, blocks: Vec) -> Result<()> { 50 | let mut this = self.lock().await; 51 | for block in blocks { 52 | this.insert(*block.cid(), block.data().clone()); 53 | } 54 | Ok(()) 55 | } 56 | } 57 | 58 | fn add_blocks_to_store_chunked( 59 | store: S, 60 | mut blocks: Pin> + Send>>, 61 | ) -> impl Stream> { 62 | let mut chunk = Vec::new(); 63 | let mut chunk_size = 0u64; 64 | const MAX_CHUNK_SIZE: u64 = 1024 * 1024; 65 | stream! { 66 | while let Some(block) = blocks.next().await { 67 | let block = block?; 68 | let block_size = block.data().len() as u64 + block.links().len() as u64 * 128; 69 | let cid = *block.cid(); 70 | let raw_data_size = block.raw_data_size().unwrap_or_default(); 71 | tracing::info!("adding chunk of {} bytes", chunk_size); 72 | if chunk_size + block_size > MAX_CHUNK_SIZE { 73 | store.put_many(std::mem::take(&mut chunk)).await?; 74 | chunk_size = 0; 75 | } 76 | chunk.push(block); 77 | chunk_size += block_size; 78 | yield Ok(( 79 | cid, 80 | raw_data_size, 81 | )); 82 | } 83 | // make sure to also send the last chunk! 84 | store.put_many(chunk).await?; 85 | } 86 | } 87 | 88 | pub async fn add_blocks_to_store( 89 | store: Option, 90 | blocks: Pin> + Send>>, 91 | ) -> impl Stream> { 92 | add_blocks_to_store_chunked(store.unwrap(), blocks) 93 | } 94 | -------------------------------------------------------------------------------- /iroh-localops/src/process.rs: -------------------------------------------------------------------------------- 1 | use anyhow::{anyhow, Result}; 2 | #[cfg(any(target_os = "macos", target_os = "linux"))] 3 | use nix::sys::signal::{kill, Signal}; 4 | #[cfg(any(target_os = "macos", target_os = "linux"))] 5 | use nix::unistd::Pid; 6 | use std::path::PathBuf; 7 | #[cfg(any(target_os = "macos", target_os = "linux"))] 8 | use std::process::{Command, Stdio}; 9 | 10 | // TODO(b5): instead of using u32's for Process Identifiers, use a proper Pid type 11 | // something along the lines of: 12 | 13 | // #[cfg(unix)] 14 | // #[derive(Debug, Clone, Copy, PartialEq, Eq)] 15 | // pub struct Pid(nix::unistd::Pid); 16 | 17 | // #[cfg(not(unix))] 18 | // #[derive(Debug, Clone, Copy, PartialEq, Eq)] 19 | // pub struct Pid; // TODO: fill in for each platform when supported 20 | 21 | // // #[cfg(unix)] 22 | // impl From nix::Pid for Pid { 23 | // // .. 24 | // } 25 | 26 | // impl std::fmt::Display for Pid { 27 | // fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 28 | // write!(f, "{}", self.to_string()) 29 | // } 30 | // } 31 | 32 | pub fn daemonize(bin_path: PathBuf, log_path: PathBuf) -> Result<()> { 33 | daemonize_process(bin_path, log_path) 34 | } 35 | 36 | #[cfg(not(any(target_os = "macos", target_os = "linux", target_os = "windows")))] 37 | fn daemonize_process(_bin_path: PathBuf, _log_path: PathBuf) -> Result<()> { 38 | Err(anyhow!( 39 | "daemonizing processes is not supported on your operating system" 40 | )) 41 | } 42 | 43 | #[cfg(any(target_os = "macos", target_os = "linux"))] 44 | fn daemonize_process(bin_path: PathBuf, log_path: PathBuf) -> Result<()> { 45 | std::fs::create_dir_all(log_path.parent().unwrap())?; 46 | // ¯\_(ツ)_/¯ 47 | let status = Command::new("bash") 48 | .arg("-c") 49 | // TODO(b5): might be nice to capture output in a log file at some point? 50 | .arg(format!( 51 | "nohup {} > {} 2>&1 &", 52 | bin_path.to_str().unwrap(), 53 | log_path.to_str().unwrap(), 54 | )) 55 | .stderr(Stdio::null()) 56 | .stdout(Stdio::null()) 57 | .status()?; 58 | 59 | if !status.success() { 60 | Err(anyhow::anyhow!("couldn't daemonize binary"))?; 61 | } 62 | Ok(()) 63 | } 64 | 65 | #[cfg(target_os = "windows")] 66 | fn daemonize_process(_bin_path: PathBuf, _log_path: PathBuf) -> Result<()> { 67 | Err(anyhow!("daemonizing processes on windows is not supported")) 68 | } 69 | 70 | // TODO(b5) - this level of indirection isn't necessary, factor `stop_process` 71 | // directly into `stop` 72 | // https://github.com/n0-computer/iroh/pull/360#discussion_r1002000769 73 | pub fn stop(pid: u32) -> Result<()> { 74 | stop_process(pid) 75 | } 76 | 77 | #[cfg(not(any(target_os = "macos", target_os = "linux", target_os = "windows")))] 78 | fn stop_process(pid: u32) -> Result<()> { 79 | Err(anyhow!( 80 | "stopping processes is not supported on your operating system" 81 | )) 82 | } 83 | 84 | #[cfg(any(target_os = "macos", target_os = "linux"))] 85 | fn stop_process(pid: u32) -> Result<()> { 86 | let id = Pid::from_raw(pid as i32); 87 | kill(id, Signal::SIGINT).map_err(|e| anyhow!("killing process: {}", e)) 88 | } 89 | 90 | #[cfg(target_os = "windows")] 91 | fn stop_process(_pid: u32) -> Result<()> { 92 | Err(anyhow!("stopping processes on windows is not supported")) 93 | } 94 | -------------------------------------------------------------------------------- /iroh-car/src/reader.rs: -------------------------------------------------------------------------------- 1 | use cid::Cid; 2 | use futures::Stream; 3 | use tokio::io::AsyncRead; 4 | 5 | use crate::{ 6 | error::Error, 7 | header::CarHeader, 8 | util::{ld_read, read_node}, 9 | }; 10 | 11 | /// Reads CAR files that are in a BufReader 12 | #[derive(Debug)] 13 | pub struct CarReader { 14 | reader: R, 15 | header: CarHeader, 16 | buffer: Vec, 17 | } 18 | 19 | impl CarReader 20 | where 21 | R: AsyncRead + Send + Unpin, 22 | { 23 | /// Creates a new CarReader and parses the CarHeader 24 | pub async fn new(mut reader: R) -> Result { 25 | let mut buffer = Vec::new(); 26 | 27 | match ld_read(&mut reader, &mut buffer).await? { 28 | Some(buf) => { 29 | let header = CarHeader::decode(buf)?; 30 | 31 | Ok(CarReader { 32 | reader, 33 | header, 34 | buffer, 35 | }) 36 | } 37 | None => Err(Error::Parsing( 38 | "failed to parse uvarint for header".to_string(), 39 | )), 40 | } 41 | } 42 | 43 | /// Returns the header of this car file. 44 | pub fn header(&self) -> &CarHeader { 45 | &self.header 46 | } 47 | 48 | /// Returns the next IPLD Block in the buffer 49 | pub async fn next_block(&mut self) -> Result)>, Error> { 50 | read_node(&mut self.reader, &mut self.buffer).await 51 | } 52 | 53 | pub fn stream(self) -> impl Stream), Error>> { 54 | futures::stream::try_unfold(self, |mut this| async move { 55 | let maybe_block = read_node(&mut this.reader, &mut this.buffer).await?; 56 | Ok(maybe_block.map(|b| (b, this))) 57 | }) 58 | } 59 | } 60 | 61 | #[cfg(test)] 62 | mod tests { 63 | use std::io::Cursor; 64 | 65 | use cid::Cid; 66 | use futures::TryStreamExt; 67 | use ipld_cbor::DagCborCodec; 68 | use multihash::MultihashDigest; 69 | 70 | use crate::{header::CarHeaderV1, writer::CarWriter}; 71 | 72 | use super::*; 73 | 74 | #[tokio::test] 75 | async fn car_write_read() { 76 | let digest_test = multihash::Code::Blake2b256.digest(b"test"); 77 | let cid_test = Cid::new_v1(DagCborCodec.into(), digest_test); 78 | 79 | let digest_foo = multihash::Code::Blake2b256.digest(b"foo"); 80 | let cid_foo = Cid::new_v1(DagCborCodec.into(), digest_foo); 81 | 82 | let header = CarHeader::V1(CarHeaderV1::from(vec![cid_foo])); 83 | 84 | let mut buffer = Vec::new(); 85 | let mut writer = CarWriter::new(header, &mut buffer); 86 | writer.write(cid_test, b"test").await.unwrap(); 87 | writer.write(cid_foo, b"foo").await.unwrap(); 88 | writer.finish().await.unwrap(); 89 | 90 | let reader = Cursor::new(&buffer); 91 | let car_reader = CarReader::new(reader).await.unwrap(); 92 | let files: Vec<_> = car_reader.stream().try_collect().await.unwrap(); 93 | 94 | assert_eq!(files.len(), 2); 95 | assert_eq!(files[0].0, cid_test); 96 | assert_eq!(files[0].1, b"test"); 97 | assert_eq!(files[1].0, cid_foo); 98 | assert_eq!(files[1].1, b"foo"); 99 | } 100 | } 101 | -------------------------------------------------------------------------------- /iroh-rpc-client/src/store.rs: -------------------------------------------------------------------------------- 1 | use anyhow::Result; 2 | use async_stream::stream; 3 | use bytes::Bytes; 4 | use cid::Cid; 5 | use futures::{Stream, StreamExt}; 6 | use iroh_rpc_types::{store::*, VersionRequest, WatchRequest}; 7 | 8 | use crate::open_client; 9 | use crate::{StatusType, HEALTH_POLL_WAIT}; 10 | 11 | #[derive(Debug, Clone)] 12 | pub struct StoreClient { 13 | client: quic_rpc::RpcClient, 14 | } 15 | 16 | impl StoreClient { 17 | pub async fn new(addr: StoreAddr) -> anyhow::Result { 18 | let client = open_client(addr).await?; 19 | Ok(Self { client }) 20 | } 21 | 22 | #[tracing::instrument(skip(self))] 23 | pub async fn version(&self) -> Result { 24 | let res = self.client.rpc(VersionRequest).await?; 25 | Ok(res.version) 26 | } 27 | 28 | #[tracing::instrument(skip(self, blob))] 29 | pub async fn put(&self, cid: Cid, blob: Bytes, links: Vec) -> Result<()> { 30 | self.client.rpc(PutRequest { cid, blob, links }).await??; 31 | Ok(()) 32 | } 33 | 34 | #[tracing::instrument(skip(self, blocks))] 35 | pub async fn put_many(&self, blocks: Vec<(Cid, Bytes, Vec)>) -> Result<()> { 36 | let blocks = blocks 37 | .into_iter() 38 | .map(|(cid, blob, links)| PutRequest { cid, blob, links }) 39 | .collect(); 40 | self.client.rpc(PutManyRequest { blocks }).await??; 41 | Ok(()) 42 | } 43 | 44 | #[tracing::instrument(skip(self))] 45 | pub async fn get(&self, cid: Cid) -> Result> { 46 | let res = self.client.rpc(GetRequest { cid }).await??; 47 | Ok(res.data) 48 | } 49 | 50 | #[tracing::instrument(skip(self))] 51 | pub async fn has(&self, cid: Cid) -> Result { 52 | let res = self.client.rpc(HasRequest { cid }).await??; 53 | Ok(res.has) 54 | } 55 | 56 | #[tracing::instrument(skip(self))] 57 | pub async fn get_links(&self, cid: Cid) -> Result>> { 58 | let res = self.client.rpc(GetLinksRequest { cid }).await??; 59 | Ok(res.links) 60 | } 61 | 62 | #[tracing::instrument(skip(self))] 63 | pub async fn get_size(&self, cid: Cid) -> Result> { 64 | let res = self.client.rpc(GetSizeRequest { cid }).await??; 65 | Ok(res.size) 66 | } 67 | 68 | #[tracing::instrument(skip(self))] 69 | pub async fn check(&self) -> (StatusType, String) { 70 | match self.version().await { 71 | Ok(version) => (StatusType::Serving, version), 72 | Err(_) => (StatusType::Down, String::new()), 73 | } 74 | } 75 | 76 | #[tracing::instrument(skip(self))] 77 | pub async fn watch(&self) -> impl Stream { 78 | let client = self.client.clone(); 79 | stream! { 80 | loop { 81 | let res = client.server_streaming(WatchRequest).await; 82 | if let Ok(mut res) = res { 83 | while let Some(Ok(version)) = res.next().await { 84 | yield (StatusType::Serving, version.version); 85 | } 86 | } 87 | yield (StatusType::Down, String::new()); 88 | tokio::time::sleep(HEALTH_POLL_WAIT).await; 89 | } 90 | } 91 | } 92 | } 93 | -------------------------------------------------------------------------------- /iroh-store/benches/store.rs: -------------------------------------------------------------------------------- 1 | use std::time::Instant; 2 | 3 | use cid::multihash::{Code, MultihashDigest}; 4 | use criterion::{black_box, criterion_group, criterion_main, BenchmarkId, Criterion}; 5 | use iroh_store::{Config, Store}; 6 | use tokio::runtime::Runtime; 7 | 8 | const RAW: u64 = 0x55; 9 | 10 | pub fn put_benchmark(c: &mut Criterion) { 11 | let mut group = c.benchmark_group("store_put"); 12 | for value_size in [32, 128, 512, 1024].iter() { 13 | let value = vec![8u8; *value_size]; 14 | let hash = Code::Sha2_256.digest(&value); 15 | let key = cid::Cid::new_v1(RAW, hash); 16 | 17 | group.throughput(criterion::Throughput::Bytes(*value_size as u64)); 18 | group.bench_with_input( 19 | BenchmarkId::new("value_size", *value_size as u64), 20 | &(key, value), 21 | |b, (key, value)| { 22 | let executor = Runtime::new().unwrap(); 23 | let dir = tempfile::tempdir().unwrap(); 24 | let config = Config::new(dir.path().into()); 25 | let store = executor.block_on(async { Store::create(config).await.unwrap() }); 26 | let store_ref = &store; 27 | b.to_async(&executor) 28 | .iter(|| async move { store_ref.put(*key, black_box(value), []).unwrap() }); 29 | }, 30 | ); 31 | } 32 | group.finish(); 33 | } 34 | 35 | pub fn get_benchmark(c: &mut Criterion) { 36 | let mut group = c.benchmark_group("store_get"); 37 | for value_size in [32, 128, 512, 1024].iter() { 38 | group.throughput(criterion::Throughput::Bytes(*value_size as u64)); 39 | group.bench_with_input( 40 | BenchmarkId::new("value_size", *value_size as u64), 41 | &(), 42 | |b, _| { 43 | let executor = Runtime::new().unwrap(); 44 | let dir = tempfile::tempdir().unwrap(); 45 | let config = Config::new(dir.path().into()); 46 | let store = executor.block_on(async { Store::create(config).await.unwrap() }); 47 | let store_ref = &store; 48 | let keys = executor.block_on(async { 49 | let mut keys = Vec::new(); 50 | for i in 0..1000 { 51 | let value = vec![i as u8; *value_size]; 52 | let hash = Code::Sha2_256.digest(&value); 53 | let key = cid::Cid::new_v1(RAW, hash); 54 | keys.push(key); 55 | store_ref.put(key, &value, []).unwrap(); 56 | } 57 | keys 58 | }); 59 | 60 | let keys_ref = &keys[..]; 61 | b.to_async(&executor).iter_custom(|iters| async move { 62 | let l = keys_ref.len(); 63 | 64 | let start = Instant::now(); 65 | for i in 0..iters { 66 | let key = &keys_ref[(i as usize) % l]; 67 | let res = store_ref.get(key).unwrap().unwrap(); 68 | black_box(res); 69 | } 70 | start.elapsed() 71 | }); 72 | }, 73 | ); 74 | } 75 | group.finish(); 76 | } 77 | 78 | criterion_group!(benches, put_benchmark, get_benchmark); 79 | criterion_main!(benches); 80 | -------------------------------------------------------------------------------- /iroh-rpc-client/src/lib.rs: -------------------------------------------------------------------------------- 1 | pub mod client; 2 | pub mod config; 3 | pub mod gateway; 4 | pub mod network; 5 | pub mod status; 6 | pub mod store; 7 | pub use self::config::Config; 8 | pub use client::Client; 9 | use iroh_rpc_types::{gateway::GatewayService, p2p::P2pService, store::StoreService, Addr}; 10 | pub use network::{Lookup, P2pClient}; 11 | use quic_rpc::{ 12 | transport::{combined, http2, CombinedChannelTypes, Http2ChannelTypes, MemChannelTypes}, 13 | RpcClient, RpcServer, Service, 14 | }; 15 | pub use status::{ClientStatus, ServiceStatus, ServiceType, StatusType, HEALTH_POLL_WAIT}; 16 | pub use store::StoreClient; 17 | 18 | /// The types of channels used by the client and server. 19 | pub type ChannelTypes = CombinedChannelTypes; 20 | 21 | /// Error when handling an RPC call on the client side. 22 | pub type ClientError = quic_rpc::client::RpcClientError; 23 | 24 | /// Error when handling an RPC call on the server side. 25 | pub type ServerError = quic_rpc::server::RpcServerError; 26 | 27 | /// A request sink and response stream for a single RPC call on the client side. 28 | #[allow(type_alias_bounds)] 29 | pub type ClientSocket = 30 | (C::SendSink, C::RecvStream); 31 | 32 | /// A response sink and request stream for a single RPC call on the server side. 33 | #[allow(type_alias_bounds)] 34 | pub type ServerSocket = 35 | (C::SendSink, C::RecvStream); 36 | 37 | pub type StoreServer = RpcServer; 38 | pub type GatewayServer = RpcServer; 39 | pub type P2pServer = RpcServer; 40 | 41 | pub async fn create_server( 42 | addr: Addr, 43 | ) -> anyhow::Result> { 44 | // make a channel matching the channel types for this crate 45 | match addr { 46 | Addr::Mem(channel, _) => { 47 | let channel = combined::ServerChannel::new(None, Some(channel)); 48 | let server = RpcServer::new(channel); 49 | Ok(server) 50 | } 51 | Addr::IrpcLookup(_addr) => { 52 | todo!() 53 | // Ok(Some(RpcServer::new(combined::Channel::new(Some(addr), None)))) 54 | } 55 | Addr::Irpc(addr) => { 56 | let channel = quic_rpc::transport::http2::ServerChannel::serve(&addr)?; 57 | let channel = combined::ServerChannel::new(Some(channel), None); 58 | let server = RpcServer::new(channel); 59 | Ok(server) 60 | } 61 | } 62 | } 63 | 64 | pub async fn open_client(addr: Addr) -> anyhow::Result> { 65 | // make a channel matching the channel types for this crate 66 | match addr { 67 | Addr::Mem(_, client) => Ok(RpcClient::::new( 68 | combined::ClientChannel::new(None, Some(client)), 69 | )), 70 | Addr::Irpc(uri) => { 71 | let uri = format!("http://{uri}").parse()?; 72 | let channel = http2::ClientChannel::new(uri); 73 | let channel = combined::ClientChannel::new(Some(channel), None); 74 | Ok(RpcClient::::new(channel)) 75 | } 76 | Addr::IrpcLookup(_addr) => { 77 | todo!() 78 | } 79 | } 80 | } 81 | -------------------------------------------------------------------------------- /iroh-unixfs/src/hamt/hash_bits.rs: -------------------------------------------------------------------------------- 1 | use std::cmp::Ordering; 2 | 3 | use anyhow::{bail, ensure, Result}; 4 | 5 | /// Helper struct which indexes and allows returning bits from a hashed key 6 | #[derive(Debug, Clone, Copy)] 7 | pub struct HashBits<'a, const S: usize> { 8 | b: &'a [u8; S], 9 | pub consumed: u32, 10 | } 11 | 12 | #[inline] 13 | fn mkmask(n: u32) -> u32 { 14 | ((1u64 << n) - 1) as u32 15 | } 16 | 17 | impl<'a, const S: usize> HashBits<'a, S> { 18 | pub fn new(hash_buffer: &'a [u8; S]) -> HashBits<'a, S> { 19 | Self::new_at_index(hash_buffer, 0) 20 | } 21 | 22 | /// Constructs hash bits with custom consumed index 23 | pub fn new_at_index(hash_buffer: &'a [u8; S], consumed: u32) -> HashBits<'a, S> { 24 | Self { 25 | b: hash_buffer, 26 | consumed, 27 | } 28 | } 29 | 30 | /// Returns next `i` bits of the hash and returns the value as an integer and returns 31 | /// Error when maximum depth is reached 32 | pub fn next(&mut self, i: u32) -> Result { 33 | ensure!(i <= 8, "invalid hash bit length"); 34 | if (self.consumed + i) as usize > self.b.len() * 8 { 35 | bail!("maxium depth reached"); 36 | } 37 | Ok(self.next_bits(i)) 38 | } 39 | 40 | fn next_bits(&mut self, i: u32) -> u32 { 41 | let curbi = self.consumed / 8; 42 | let leftb = 8 - (self.consumed % 8); 43 | 44 | let curb = self.b[curbi as usize] as u32; 45 | match i.cmp(&leftb) { 46 | Ordering::Equal => { 47 | // bits to consume is equal to the bits remaining in the currently indexed byte 48 | let out = mkmask(i) & curb; 49 | self.consumed += i; 50 | out 51 | } 52 | Ordering::Less => { 53 | // Consuming less than the remaining bits in the current byte 54 | let a = curb & mkmask(leftb); 55 | let b = a & !mkmask(leftb - i); 56 | let c = b >> (leftb - i); 57 | self.consumed += i; 58 | c 59 | } 60 | Ordering::Greater => { 61 | // Consumes remaining bits and remaining bits from a recursive call 62 | let mut out = (mkmask(leftb) & curb) as u64; 63 | out <<= i - leftb; 64 | self.consumed += leftb; 65 | out += self.next_bits(i - leftb) as u64; 66 | out as u32 67 | } 68 | } 69 | } 70 | } 71 | 72 | #[cfg(test)] 73 | mod tests { 74 | use super::*; 75 | 76 | #[test] 77 | fn test_bitfield() { 78 | let mut key: [u8; 32] = Default::default(); 79 | key[0] = 0b10001000; 80 | key[1] = 0b10101010; 81 | key[2] = 0b10111111; 82 | key[3] = 0b11111111; 83 | let mut hb = HashBits::new(&key); 84 | // Test eq cmp 85 | assert_eq!(hb.next(8).unwrap(), 0b10001000); 86 | // Test lt cmp 87 | assert_eq!(hb.next(5).unwrap(), 0b10101); 88 | // Test gt cmp 89 | assert_eq!(hb.next(5).unwrap(), 0b01010); 90 | assert_eq!(hb.next(6).unwrap(), 0b111111); 91 | assert_eq!(hb.next(8).unwrap(), 0b11111111); 92 | assert!(hb.next(9).is_err()); 93 | for _ in 0..28 { 94 | // Iterate through rest of key to test depth 95 | hb.next(8).unwrap(); 96 | } 97 | assert!(hb.next(1).is_err()); 98 | } 99 | } 100 | -------------------------------------------------------------------------------- /iroh-api/src/config.rs: -------------------------------------------------------------------------------- 1 | use config::{ConfigError, Map, Source, Value}; 2 | use iroh_metrics::config::Config as MetricsConfig; 3 | use iroh_rpc_client::Config as RpcClientConfig; 4 | use iroh_unixfs::indexer::IndexerUrl; 5 | use iroh_util::insert_into_config_map; 6 | use serde::{Deserialize, Serialize}; 7 | 8 | /// CONFIG_FILE_NAME is the name of the optional config file located in the iroh home directory 9 | pub const CONFIG_FILE_NAME: &str = "ctl.config.toml"; 10 | /// ENV_PREFIX should be used along side the config field name to set a config field using 11 | /// environment variables 12 | pub const ENV_PREFIX: &str = "IROH_CTL"; 13 | 14 | /// Configuration for [`iroh-api`]. 15 | #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] 16 | pub struct Config { 17 | pub rpc_client: RpcClientConfig, 18 | pub metrics: MetricsConfig, 19 | pub http_resolvers: Option>, 20 | pub indexer_endpoint: Option, 21 | } 22 | 23 | impl Default for Config { 24 | fn default() -> Self { 25 | Self { 26 | rpc_client: RpcClientConfig::default_network(), 27 | metrics: Default::default(), 28 | http_resolvers: None, 29 | indexer_endpoint: Some(IndexerUrl::default()), 30 | } 31 | } 32 | } 33 | 34 | impl Source for Config { 35 | fn clone_into_box(&self) -> Box { 36 | Box::new(self.clone()) 37 | } 38 | fn collect(&self) -> Result, ConfigError> { 39 | let mut map: Map = Map::new(); 40 | insert_into_config_map(&mut map, "rpc_client", self.rpc_client.collect()?); 41 | insert_into_config_map(&mut map, "metrics", self.metrics.collect()?); 42 | if let Some(http_resolvers) = &self.http_resolvers { 43 | insert_into_config_map(&mut map, "http_resolvers", http_resolvers.clone()); 44 | } 45 | if let Some(indexer_endpoint) = &self.indexer_endpoint { 46 | insert_into_config_map(&mut map, "indexer_endpoint", indexer_endpoint.clone()); 47 | } 48 | 49 | Ok(map) 50 | } 51 | } 52 | 53 | #[cfg(test)] 54 | mod tests { 55 | use super::*; 56 | use config::Config as ConfigBuilder; 57 | 58 | #[test] 59 | fn test_collect() { 60 | let default = Config::default(); 61 | let mut expect: Map = Map::new(); 62 | expect.insert( 63 | "rpc_client".to_string(), 64 | Value::new(None, default.rpc_client.collect().unwrap()), 65 | ); 66 | expect.insert( 67 | "metrics".to_string(), 68 | Value::new(None, default.metrics.collect().unwrap()), 69 | ); 70 | expect.insert( 71 | "indexer_endpoint".to_string(), 72 | Value::new(None, default.indexer_endpoint.clone()), 73 | ); 74 | 75 | expect.insert( 76 | "http_resolvers".to_string(), 77 | Value::new(None, default.http_resolvers.clone()), 78 | ); 79 | 80 | let got = default.collect().unwrap(); 81 | 82 | for key in got.keys() { 83 | let left = expect.get(key).unwrap(); 84 | let right = got.get(key).unwrap(); 85 | assert_eq!(left, right); 86 | } 87 | } 88 | 89 | #[test] 90 | fn test_build_config_from_struct() { 91 | let expect = Config::default(); 92 | let got: Config = ConfigBuilder::builder() 93 | .add_source(expect.clone()) 94 | .build() 95 | .unwrap() 96 | .try_deserialize() 97 | .unwrap(); 98 | 99 | assert_eq!(expect, got); 100 | } 101 | } 102 | --------------------------------------------------------------------------------