├── .gitignore ├── src ├── store │ ├── test.rs │ ├── util │ │ ├── observer.rs │ │ ├── partial_mem_storage.rs │ │ ├── size_info.rs │ │ ├── sparse_mem_file.rs │ │ └── mem_or_file.rs │ ├── mod.rs │ └── fs │ │ ├── util.rs │ │ ├── meta │ │ ├── tables.rs │ │ └── proto.rs │ │ ├── options.rs │ │ ├── delete_set.rs │ │ └── entry_state.rs ├── format.rs ├── metrics.rs ├── util │ ├── channel.rs │ └── temp_tag.rs ├── lib.rs ├── test.rs ├── net_protocol.rs ├── hashseq.rs ├── get │ └── error.rs ├── api │ └── tags.rs ├── ticket.rs └── api.rs ├── .cargo └── config.toml ├── .config └── nextest.toml ├── .github ├── workflows │ ├── commit.yaml │ ├── cleanup.yaml │ ├── beta.yaml │ ├── docs.yaml │ ├── flaky.yaml │ ├── tests.yaml │ └── ci.yaml ├── dependabot.yaml └── pull_request_template.md ├── proptest-regressions └── protocol │ └── range_spec.txt ├── Makefile.toml ├── deny.toml ├── LICENSE-MIT ├── examples ├── common │ └── mod.rs ├── get-blob.rs ├── transfer.rs ├── mdns-discovery.rs ├── transfer-collection.rs ├── expiring-tags.rs ├── compression.rs └── random_store.rs ├── .img └── iroh_wordmark.svg ├── code_of_conduct.md ├── cliff.toml ├── Cargo.toml ├── README.md ├── tests ├── blobs.rs └── tags.rs └── LICENSE-APACHE /.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | iroh.config.toml 3 | .vscode/* 4 | -------------------------------------------------------------------------------- /src/store/test.rs: -------------------------------------------------------------------------------- 1 | //! Test harness for store implementations. 2 | -------------------------------------------------------------------------------- /.cargo/config.toml: -------------------------------------------------------------------------------- 1 | [target.wasm32-unknown-unknown] 2 | runner = "wasm-bindgen-test-runner" 3 | rustflags = ['--cfg', 'getrandom_backend="wasm_js"'] 4 | -------------------------------------------------------------------------------- /src/store/util/observer.rs: -------------------------------------------------------------------------------- 1 | use std::fmt::Debug; 2 | 3 | // A commutative combine trait for updates 4 | pub trait Combine: Debug { 5 | fn combine(self, other: Self) -> Self; 6 | } 7 | 8 | #[allow(dead_code)] 9 | pub trait CombineInPlace: Combine { 10 | fn combine_with(&mut self, other: Self) -> Self; 11 | fn is_neutral(&self) -> bool; 12 | } 13 | -------------------------------------------------------------------------------- /.config/nextest.toml: -------------------------------------------------------------------------------- 1 | [test-groups] 2 | run-in-isolation = { max-threads = 32 } 3 | # these are tests that must not run with other tests concurrently. All tests in 4 | # this group can take up at most 32 threads among them, but each one requiring 5 | # 16 threads also. The effect should be that tests run isolated. 6 | 7 | [[profile.ci.overrides]] 8 | filter = 'test(::run_in_isolation::)' 9 | test-group = 'run-in-isolation' 10 | threads-required = 32 11 | -------------------------------------------------------------------------------- /.github/workflows/commit.yaml: -------------------------------------------------------------------------------- 1 | name: Commits 2 | 3 | on: 4 | pull_request: 5 | branches: [main] 6 | types: [opened, edited, synchronize] 7 | 8 | env: 9 | IROH_FORCE_STAGING_RELAYS: "1" 10 | 11 | jobs: 12 | check-for-cc: 13 | runs-on: ubuntu-latest 14 | steps: 15 | - name: check-for-cc 16 | id: check-for-cc 17 | uses: agenthunt/conventional-commit-checker-action@v2.0.1 18 | with: 19 | pr-title-regex: "^(.+)(?:(([^)s]+)))?!?: (.+)" 20 | -------------------------------------------------------------------------------- /.github/dependabot.yaml: -------------------------------------------------------------------------------- 1 | # Keep GitHub Actions up to date with GitHub's Dependabot... 2 | # https://docs.github.com/en/code-security/dependabot/working-with-dependabot/keeping-your-actions-up-to-date-with-dependabot 3 | # https://docs.github.com/en/code-security/dependabot/dependabot-version-updates/configuration-options-for-the-dependabot.yml-file#package-ecosystem 4 | version: 2 5 | updates: 6 | - package-ecosystem: github-actions 7 | directory: / 8 | groups: 9 | github-actions: 10 | patterns: 11 | - "*" # Group all Actions updates into a single larger pull request 12 | schedule: 13 | interval: weekly 14 | -------------------------------------------------------------------------------- /proptest-regressions/protocol/range_spec.txt: -------------------------------------------------------------------------------- 1 | # Seeds for failure cases proptest has generated in the past. It is 2 | # automatically read and these particular cases re-run before any 3 | # novel cases are generated. 4 | # 5 | # It is recommended to check this file in to source control so that 6 | # everyone who runs the test benefits from these saved cases. 7 | cc 5ff4de8531a81c637b4d202c97b724a41a989bc6894464e84db5ac2a519c08a9 # shrinks to ranges = [RangeSet{1..2}] 8 | cc 50cb338763aa276705bb970d57d3d87e834f31a7e57bba810f46690c6d1e9955 # shrinks to ranges = [RangeSet{7..98}, RangeSet{7..98}] 9 | cc 8579821a8bde7872fed2cfa38e8a5923706b9915f3920e9c2d101a06bc789309 # shrinks to ranges = [] 10 | -------------------------------------------------------------------------------- /src/store/mod.rs: -------------------------------------------------------------------------------- 1 | //! Store implementations 2 | //! 3 | //! Use the [`mem`] store for sharing a small amount of mutable data, 4 | //! the [`readonly_mem`] store for sharing static data, and the [`fs`] store 5 | //! for when you want to efficiently share more than the available memory and 6 | //! have access to a writeable filesystem. 7 | use bao_tree::BlockSize; 8 | #[cfg(feature = "fs-store")] 9 | pub mod fs; 10 | mod gc; 11 | pub mod mem; 12 | pub mod readonly_mem; 13 | mod test; 14 | pub(crate) mod util; 15 | 16 | /// Block size used by iroh, 2^4*1024 = 16KiB 17 | pub const IROH_BLOCK_SIZE: BlockSize = BlockSize::from_chunk_log(4); 18 | 19 | pub use gc::{GcConfig, ProtectCb, ProtectOutcome}; 20 | -------------------------------------------------------------------------------- /.github/pull_request_template.md: -------------------------------------------------------------------------------- 1 | ## Description 2 | 3 | 4 | 5 | ## Breaking Changes 6 | 7 | 8 | 9 | ## Notes & open questions 10 | 11 | 12 | 13 | ## Change checklist 14 | 15 | - [ ] Self-review. 16 | - [ ] Documentation updates following the [style guide](https://rust-lang.github.io/rfcs/1574-more-api-documentation-conventions.html#appendix-a-full-conventions-text), if relevant. 17 | - [ ] Tests if relevant. 18 | - [ ] All breaking changes documented. 19 | -------------------------------------------------------------------------------- /Makefile.toml: -------------------------------------------------------------------------------- 1 | # Use cargo-make to run tasks here: https://crates.io/crates/cargo-make 2 | 3 | [tasks.format] 4 | workspace = false 5 | command = "cargo" 6 | args = [ 7 | "fmt", 8 | "--all", 9 | "--", 10 | "--config", 11 | "unstable_features=true", 12 | "--config", 13 | "imports_granularity=Crate,group_imports=StdExternalCrate,reorder_imports=true", 14 | ] 15 | 16 | [tasks.format-check] 17 | workspace = false 18 | command = "cargo" 19 | args = [ 20 | "fmt", 21 | "--all", 22 | "--check", 23 | "--", 24 | "--config", 25 | "unstable_features=true", 26 | "--config", 27 | "imports_granularity=Crate,group_imports=StdExternalCrate,reorder_imports=true", 28 | ] 29 | -------------------------------------------------------------------------------- /src/format.rs: -------------------------------------------------------------------------------- 1 | //! Defines data formats for HashSeq. 2 | //! 3 | //! The exact details how to use a HashSeq for specific purposes is up to the 4 | //! user. However, the following approach is used by iroh formats: 5 | //! 6 | //! The first child blob is a metadata blob. It starts with a header, followed 7 | //! by serialized metadata. We mostly use [postcard] for serialization. The 8 | //! metadata either implicitly or explicitly refers to the other blobs in the 9 | //! HashSeq by index. 10 | //! 11 | //! In a very simple case, the metadata just an array of items, where each item 12 | //! is the metadata for the corresponding blob. The metadata array will have 13 | //! n-1 items, where n is the number of blobs in the HashSeq. 14 | //! 15 | //! [postcard]: https://docs.rs/postcard/latest/postcard/ 16 | pub mod collection; 17 | -------------------------------------------------------------------------------- /deny.toml: -------------------------------------------------------------------------------- 1 | [advisories] 2 | ignore = [ 3 | "RUSTSEC-2024-0370", 4 | "RUSTSEC-2024-0384", 5 | "RUSTSEC-2024-0436", 6 | "RUSTSEC-2023-0089", 7 | ] 8 | 9 | [bans] 10 | deny = [ 11 | "aws-lc", 12 | "aws-lc-rs", 13 | "aws-lc-sys", 14 | "native-tls", 15 | "openssl", 16 | ] 17 | multiple-versions = "allow" 18 | 19 | [licenses] 20 | allow = [ 21 | "Apache-2.0", 22 | "Apache-2.0 WITH LLVM-exception", 23 | "BSD-2-Clause", 24 | "BSD-3-Clause", 25 | "BSL-1.0", 26 | "ISC", 27 | "MIT", 28 | "Zlib", 29 | "MPL-2.0", 30 | "Unicode-3.0", 31 | "Unlicense", 32 | "CDLA-Permissive-2.0", 33 | ] 34 | 35 | [[licenses.clarify]] 36 | expression = "MIT AND ISC AND OpenSSL" 37 | name = "ring" 38 | 39 | [[licenses.clarify.license-files]] 40 | hash = 3171872035 41 | path = "LICENSE" 42 | -------------------------------------------------------------------------------- /LICENSE-MIT: -------------------------------------------------------------------------------- 1 | Copyright 2023 N0, INC. 2 | 3 | Permission is hereby granted, free of charge, to any 4 | person obtaining a copy of this software and associated 5 | documentation files (the "Software"), to deal in the 6 | Software without restriction, including without 7 | limitation the rights to use, copy, modify, merge, 8 | publish, distribute, sublicense, and/or sell copies of 9 | the Software, and to permit persons to whom the Software 10 | is furnished to do so, subject to the following 11 | conditions: 12 | 13 | The above copyright notice and this permission notice 14 | shall be included in all copies or substantial portions 15 | of the Software. 16 | 17 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF 18 | ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED 19 | TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A 20 | PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT 21 | SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY 22 | CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 23 | OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR 24 | IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 25 | DEALINGS IN THE SOFTWARE. 26 | -------------------------------------------------------------------------------- /.github/workflows/cleanup.yaml: -------------------------------------------------------------------------------- 1 | # Run tests using the beta Rust compiler 2 | 3 | name: Cleanup 4 | 5 | on: 6 | schedule: 7 | # 06:50 UTC every Monday 8 | - cron: '50 6 * * 1' 9 | workflow_dispatch: 10 | 11 | concurrency: 12 | group: beta-${{ github.workflow }}-${{ github.ref }} 13 | cancel-in-progress: true 14 | 15 | env: 16 | IROH_FORCE_STAGING_RELAYS: "1" 17 | 18 | jobs: 19 | clean_docs_branch: 20 | permissions: 21 | issues: write 22 | contents: write 23 | runs-on: ubuntu-latest 24 | steps: 25 | - name: Checkout 26 | uses: actions/checkout@v6 27 | with: 28 | ref: generated-docs-preview 29 | - name: Clean docs branch 30 | run: | 31 | cd pr/ 32 | # keep the last 25 prs 33 | dirs=$(ls -1d [0-9]* | sort -n) 34 | total_dirs=$(echo "$dirs" | wc -l) 35 | dirs_to_remove=$(echo "$dirs" | head -n $(($total_dirs - 25))) 36 | if [ -n "$dirs_to_remove" ]; then 37 | echo "$dirs_to_remove" | xargs rm -rf 38 | fi 39 | git add . 40 | git commit -m "Cleanup old docs" 41 | git push 42 | 43 | 44 | 45 | 46 | -------------------------------------------------------------------------------- /.github/workflows/beta.yaml: -------------------------------------------------------------------------------- 1 | # Run tests using the beta Rust compiler 2 | 3 | name: Beta Rust 4 | 5 | on: 6 | schedule: 7 | # 06:50 UTC every Monday 8 | - cron: '50 6 * * 1' 9 | workflow_dispatch: 10 | 11 | concurrency: 12 | group: beta-${{ github.workflow }}-${{ github.ref }} 13 | cancel-in-progress: true 14 | 15 | env: 16 | IROH_FORCE_STAGING_RELAYS: "1" 17 | 18 | jobs: 19 | tests: 20 | uses: './.github/workflows/tests.yaml' 21 | with: 22 | rust-version: beta 23 | notify: 24 | needs: tests 25 | if: ${{ always() }} 26 | runs-on: ubuntu-latest 27 | steps: 28 | - name: Extract test results 29 | run: | 30 | printf '${{ toJSON(needs) }}\n' 31 | result=$(echo '${{ toJSON(needs) }}' | jq -r .tests.result) 32 | echo TESTS_RESULT=$result 33 | echo "TESTS_RESULT=$result" >>"$GITHUB_ENV" 34 | - name: Notify discord on failure 35 | uses: n0-computer/discord-webhook-notify@v1 36 | if: ${{ env.TESTS_RESULT == 'failure' }} 37 | with: 38 | severity: error 39 | details: | 40 | Rustc beta tests failed in **${{ github.repository }}** 41 | See https://github.com/${{ github.repository }}/actions/workflows/beta.yaml 42 | webhookUrl: ${{ secrets.DISCORD_N0_GITHUB_CHANNEL_WEBHOOK_URL }} 43 | 44 | -------------------------------------------------------------------------------- /examples/common/mod.rs: -------------------------------------------------------------------------------- 1 | #![allow(dead_code)] 2 | use anyhow::Result; 3 | use iroh::SecretKey; 4 | use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt, EnvFilter}; 5 | 6 | /// Gets a secret key from the IROH_SECRET environment variable or generates a new random one. 7 | /// If the environment variable is set, it must be a valid string representation of a secret key. 8 | pub fn get_or_generate_secret_key() -> Result { 9 | use std::{env, str::FromStr}; 10 | 11 | use anyhow::Context; 12 | if let Ok(secret) = env::var("IROH_SECRET") { 13 | // Parse the secret key from string 14 | SecretKey::from_str(&secret).context("Invalid secret key format") 15 | } else { 16 | // Generate a new random key 17 | let secret_key = SecretKey::generate(&mut rand::rng()); 18 | println!( 19 | "Generated new secret key: {}", 20 | hex::encode(secret_key.to_bytes()) 21 | ); 22 | println!("To reuse this key, set the IROH_SECRET environment variable to this value"); 23 | Ok(secret_key) 24 | } 25 | } 26 | 27 | // set the RUST_LOG env var to one of {debug,info,warn} to see logging info 28 | pub fn setup_logging() { 29 | tracing_subscriber::registry() 30 | .with(tracing_subscriber::fmt::layer().with_writer(std::io::stderr)) 31 | .with(EnvFilter::from_default_env()) 32 | .try_init() 33 | .ok(); 34 | } 35 | -------------------------------------------------------------------------------- /.img/iroh_wordmark.svg: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /src/metrics.rs: -------------------------------------------------------------------------------- 1 | //! Metrics for iroh-blobs 2 | 3 | use iroh_metrics::{Counter, MetricsGroup}; 4 | 5 | /// Enum of metrics for the module 6 | #[allow(missing_docs)] 7 | #[allow(dead_code)] 8 | #[derive(Debug, Default, MetricsGroup)] 9 | #[metrics(name = "iroh-blobs")] 10 | pub struct Metrics { 11 | /// Total number of content bytes downloaded 12 | pub download_bytes_total: Counter, 13 | /// Total time in ms spent downloading content bytes 14 | pub download_time_total: Counter, 15 | /// Total number of successful downloads 16 | pub downloads_success: Counter, 17 | /// Total number of downloads failed with error 18 | pub downloads_error: Counter, 19 | /// Total number of downloads failed with not found 20 | pub downloads_notfound: Counter, 21 | /// Number of times the main downloader actor loop ticked 22 | pub downloader_tick_main: Counter, 23 | /// Number of times the downloader actor ticked for a connection ready 24 | pub downloader_tick_connection_ready: Counter, 25 | /// Number of times the downloader actor ticked for a message received 26 | pub downloader_tick_message_received: Counter, 27 | /// Number of times the downloader actor ticked for a transfer completed 28 | pub downloader_tick_transfer_completed: Counter, 29 | /// Number of times the downloader actor ticked for a transfer failed 30 | pub downloader_tick_transfer_failed: Counter, 31 | /// Number of times the downloader actor ticked for a retry node 32 | pub downloader_tick_retry_node: Counter, 33 | /// Number of times the downloader actor ticked for a goodbye node 34 | pub downloader_tick_goodbye_node: Counter, 35 | } 36 | -------------------------------------------------------------------------------- /src/util/channel.rs: -------------------------------------------------------------------------------- 1 | #[cfg(feature = "fs-store")] 2 | pub mod oneshot { 3 | use std::{ 4 | future::Future, 5 | pin::Pin, 6 | task::{Context, Poll}, 7 | }; 8 | 9 | pub fn channel() -> (Sender, Receiver) { 10 | let (tx, rx) = tokio::sync::oneshot::channel::(); 11 | (Sender::Tokio(tx), Receiver::Tokio(rx)) 12 | } 13 | 14 | #[derive(Debug)] 15 | pub enum Sender { 16 | Tokio(tokio::sync::oneshot::Sender), 17 | } 18 | 19 | impl From> for irpc::channel::oneshot::Sender { 20 | fn from(sender: Sender) -> Self { 21 | match sender { 22 | Sender::Tokio(tx) => tx.into(), 23 | } 24 | } 25 | } 26 | 27 | impl Sender { 28 | pub fn send(self, value: T) { 29 | match self { 30 | Self::Tokio(tx) => tx.send(value).ok(), 31 | }; 32 | } 33 | } 34 | 35 | pub enum Receiver { 36 | Tokio(tokio::sync::oneshot::Receiver), 37 | } 38 | 39 | impl Future for Receiver { 40 | type Output = std::result::Result; 41 | 42 | fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { 43 | match self.as_mut().get_mut() { 44 | Self::Tokio(rx) => { 45 | if rx.is_terminated() { 46 | // don't panic when polling a terminated receiver 47 | Poll::Pending 48 | } else { 49 | Future::poll(Pin::new(rx), cx) 50 | } 51 | } 52 | } 53 | } 54 | } 55 | } 56 | -------------------------------------------------------------------------------- /code_of_conduct.md: -------------------------------------------------------------------------------- 1 | # Code of Conduct 2 | 3 | Online or off, Number Zero is a harassment-free environment for everyone, regardless of gender, gender identity and expression, sexual orientation, disability, physical appearance, body size, race, age or religion or technical skill level. We do not tolerate harassment of participants in any form. 4 | 5 | Harassment includes verbal comments that reinforce social structures of domination related to gender, gender identity and expression, sexual orientation, disability, physical appearance, body size, race, age, religion, sexual images in public spaces, deliberate intimidation, stalking, following, harassing photography or recording, sustained disruption of talks or other events, inappropriate physical contact, and unwelcome sexual attention. Participants asked to stop any harassing behavior are expected to comply immediately. 6 | 7 | If a participant engages in harassing behaviour, the organizers may take any action they deem appropriate, including warning the offender or expulsion from events and online forums. 8 | 9 | If you are being harassed, notice that someone else is being harassed, or have any other concerns, please contact a member of the organizing team immediately. 10 | 11 | At offline events, organizers will identify themselves, and will help participants contact venue security or local law enforcement, provide escorts, or otherwise assist those experiencing harassment to feel safe for the duration of the event. We value your participation! 12 | 13 | This document is based on a similar code from [EDGI](https://envirodatagov.org/) and [Civic Tech Toronto](http://civictech.ca/about-us/), itself derived from the [Recurse Center’s Social Rules](https://www.recurse.com/manual#sec-environment), and the [anti-harassment policy from the Geek Feminism Wiki](http://geekfeminism.wikia.com/wiki/Conference_anti-harassment/Policy). 14 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | #![doc = include_str!("../README.md")] 2 | //! # Module docs 3 | //! 4 | //! The crate is designed to be used from the [iroh] crate. 5 | //! 6 | //! It implements a [protocol] for streaming content-addressed data transfer using 7 | //! [BLAKE3] verified streaming. 8 | //! 9 | //! It also provides a [store] module for storage of blobs and outboards, 10 | //! as well as a [persistent](crate::store::fs) and a [memory](crate::store::mem) 11 | //! store implementation. 12 | //! 13 | //! To implement a server, the [provider] module provides helpers for handling 14 | //! connections and individual requests given a store. 15 | //! 16 | //! To perform get requests, the [get] module provides utilities to perform 17 | //! requests and store the result in a store, as well as a low level state 18 | //! machine for executing requests. 19 | //! 20 | //! The client API is available in the [api] module. You can get a client 21 | //! either from one of the [store] implementations, or from the [BlobsProtocol] 22 | //! via a 23 | //! 24 | //! The [downloader](api::downloader) module provides a component to download blobs from 25 | //! multiple sources and store them in a store. 26 | //! 27 | //! # Features: 28 | //! 29 | //! - `fs-store`: Enables the filesystem based store implementation. This comes with a few additional dependencies such as `redb` and `reflink-copy`. 30 | //! - `metrics`: Enables prometheus metrics for stores and the protocol. 31 | //! 32 | //! [BLAKE3]: https://github.com/BLAKE3-team/BLAKE3-specs/blob/master/blake3.pdf 33 | //! [iroh]: https://docs.rs/iroh 34 | mod hash; 35 | pub mod store; 36 | pub use hash::{BlobFormat, Hash, HashAndFormat}; 37 | pub mod api; 38 | 39 | pub mod format; 40 | pub mod get; 41 | pub mod hashseq; 42 | mod metrics; 43 | mod net_protocol; 44 | pub use net_protocol::BlobsProtocol; 45 | pub mod protocol; 46 | pub mod provider; 47 | pub mod ticket; 48 | 49 | #[doc(hidden)] 50 | pub mod test; 51 | pub mod util; 52 | 53 | #[cfg(test)] 54 | #[cfg(feature = "fs-store")] 55 | mod tests; 56 | 57 | pub use protocol::ALPN; 58 | -------------------------------------------------------------------------------- /src/store/util/partial_mem_storage.rs: -------------------------------------------------------------------------------- 1 | use std::io; 2 | 3 | use bao_tree::{ 4 | io::{sync::WriteAt, BaoContentItem}, 5 | BaoTree, 6 | }; 7 | 8 | use super::{size_info::SizeInfo, SparseMemFile}; 9 | use crate::{api::blobs::Bitfield, store::IROH_BLOCK_SIZE}; 10 | 11 | /// An incomplete entry, with all the logic to keep track of the state of the entry 12 | /// and for observing changes. 13 | #[derive(Debug, Default)] 14 | pub struct PartialMemStorage { 15 | pub(crate) data: SparseMemFile, 16 | pub(crate) outboard: SparseMemFile, 17 | pub(crate) size: SizeInfo, 18 | pub(crate) bitfield: Bitfield, 19 | } 20 | 21 | impl PartialMemStorage { 22 | pub fn current_size(&self) -> u64 { 23 | self.bitfield.size() 24 | } 25 | 26 | pub fn bitfield(&self) -> &Bitfield { 27 | &self.bitfield 28 | } 29 | 30 | pub fn write_batch(&mut self, size: u64, batch: &[BaoContentItem]) -> io::Result<()> { 31 | let tree = BaoTree::new(size, IROH_BLOCK_SIZE); 32 | for item in batch { 33 | match item { 34 | BaoContentItem::Parent(parent) => { 35 | if let Some(offset) = tree.pre_order_offset(parent.node) { 36 | let o0 = offset 37 | .checked_mul(64) 38 | .expect("u64 overflow multiplying to hash pair offset"); 39 | let outboard = &mut self.outboard; 40 | let mut buf = [0u8; 64]; 41 | buf[..32].copy_from_slice(parent.pair.0.as_bytes()); 42 | buf[32..].copy_from_slice(parent.pair.1.as_bytes()); 43 | outboard.write_all_at(o0, &buf)?; 44 | } 45 | } 46 | BaoContentItem::Leaf(leaf) => { 47 | self.size.write(leaf.offset, size); 48 | self.data.write_all_at(leaf.offset, leaf.data.as_ref())?; 49 | } 50 | } 51 | } 52 | Ok(()) 53 | } 54 | } 55 | -------------------------------------------------------------------------------- /src/store/util/size_info.rs: -------------------------------------------------------------------------------- 1 | use std::io; 2 | 3 | use bao_tree::io::sync::WriteAt; 4 | 5 | use crate::store::IROH_BLOCK_SIZE; 6 | 7 | /// Keep track of the most precise size we know of. 8 | /// 9 | /// When in memory, we don't have to write the size for every chunk to a separate 10 | /// slot, but can just keep the best one. 11 | #[derive(Debug, Default)] 12 | pub struct SizeInfo { 13 | pub offset: u64, 14 | pub size: u64, 15 | } 16 | 17 | #[allow(dead_code)] 18 | impl SizeInfo { 19 | /// Create a new size info for a complete file of size `size`. 20 | pub(crate) fn complete(size: u64) -> Self { 21 | let mask = (1 << IROH_BLOCK_SIZE.chunk_log()) - 1; 22 | // offset of the last bao chunk in a file of size `size` 23 | let last_chunk_offset = size & mask; 24 | Self { 25 | offset: last_chunk_offset, 26 | size, 27 | } 28 | } 29 | 30 | /// Write a size at the given offset. The size at the highest offset is going to be kept. 31 | pub fn write(&mut self, offset: u64, size: u64) { 32 | // >= instead of > because we want to be able to update size 0, the initial value. 33 | if offset >= self.offset { 34 | self.offset = offset; 35 | self.size = size; 36 | } 37 | } 38 | 39 | /// The current size, representing the most correct size we know. 40 | pub fn current_size(&self) -> u64 { 41 | self.size 42 | } 43 | 44 | /// Persist into a file where each chunk has its own slot. 45 | pub fn persist(&self, mut target: impl WriteAt) -> io::Result<()> { 46 | let size_offset = (self.offset >> IROH_BLOCK_SIZE.chunk_log()) << 3; 47 | target.write_all_at(size_offset, self.size.to_le_bytes().as_slice())?; 48 | Ok(()) 49 | } 50 | 51 | /// Convert to a vec in slot format. 52 | #[allow(dead_code)] 53 | pub fn to_vec(&self) -> Vec { 54 | let mut res = Vec::new(); 55 | self.persist(&mut res).expect("io error writing to vec"); 56 | res 57 | } 58 | } 59 | -------------------------------------------------------------------------------- /src/store/fs/util.rs: -------------------------------------------------------------------------------- 1 | use std::future::Future; 2 | 3 | use tokio::{select, sync::mpsc}; 4 | pub(crate) mod entity_manager; 5 | 6 | /// A wrapper for a tokio mpsc receiver that allows peeking at the next message. 7 | #[derive(Debug)] 8 | pub struct PeekableReceiver { 9 | msg: Option, 10 | recv: mpsc::Receiver, 11 | } 12 | 13 | #[allow(dead_code)] 14 | impl PeekableReceiver { 15 | pub fn new(recv: mpsc::Receiver) -> Self { 16 | Self { msg: None, recv } 17 | } 18 | 19 | /// Receive the next message. 20 | /// 21 | /// Will block if there are no messages. 22 | /// Returns None only if there are no more messages (sender is dropped). 23 | /// 24 | /// Cancel safe because the only async operation is the recv() call, which is cancel safe. 25 | pub async fn recv(&mut self) -> Option { 26 | if let Some(msg) = self.msg.take() { 27 | return Some(msg); 28 | } 29 | self.recv.recv().await 30 | } 31 | 32 | /// Receive the next message, but only if it passes the filter. 33 | /// 34 | /// Cancel safe because the only async operation is the [Self::recv] call, which is cancel safe. 35 | pub async fn extract( 36 | &mut self, 37 | f: impl Fn(T) -> std::result::Result, 38 | timeout: impl Future + Unpin, 39 | ) -> Option { 40 | let msg = select! { 41 | x = self.recv() => x?, 42 | _ = timeout => return None, 43 | }; 44 | match f(msg) { 45 | Ok(u) => Some(u), 46 | Err(msg) => { 47 | self.msg = Some(msg); 48 | None 49 | } 50 | } 51 | } 52 | 53 | /// Push back a message. This will only work if there is room for it. 54 | /// Otherwise, it will fail and return the message. 55 | pub fn push_back(&mut self, msg: T) -> std::result::Result<(), T> { 56 | if self.msg.is_none() { 57 | self.msg = Some(msg); 58 | Ok(()) 59 | } else { 60 | Err(msg) 61 | } 62 | } 63 | } 64 | -------------------------------------------------------------------------------- /src/test.rs: -------------------------------------------------------------------------------- 1 | use std::future::IntoFuture; 2 | 3 | use n0_future::{stream, StreamExt}; 4 | use rand::{RngCore, SeedableRng}; 5 | 6 | use crate::{ 7 | api::{blobs::AddBytesOptions, tags::TagInfo, RequestResult, Store}, 8 | hashseq::HashSeq, 9 | BlobFormat, 10 | }; 11 | 12 | pub async fn create_random_blobs( 13 | store: &Store, 14 | num_blobs: usize, 15 | blob_size: impl Fn(usize, &mut R) -> usize, 16 | mut rand: R, 17 | ) -> n0_error::Result> { 18 | // generate sizes and seeds, non-parrallelized so it is deterministic 19 | let sizes = (0..num_blobs) 20 | .map(|n| (blob_size(n, &mut rand), rand.random::())) 21 | .collect::>(); 22 | // generate random data and add it to the store 23 | let infos = stream::iter(sizes) 24 | .then(|(size, seed)| { 25 | let mut rand = rand::rngs::StdRng::seed_from_u64(seed); 26 | let mut data = vec![0u8; size]; 27 | rand.fill_bytes(&mut data); 28 | store.add_bytes(data).into_future() 29 | }) 30 | .collect::>() 31 | .await; 32 | let infos = infos.into_iter().collect::>>()?; 33 | Ok(infos) 34 | } 35 | 36 | pub async fn add_hash_sequences( 37 | store: &Store, 38 | tags: &[TagInfo], 39 | num_seqs: usize, 40 | seq_size: impl Fn(usize, &mut R) -> usize, 41 | mut rand: R, 42 | ) -> n0_error::Result> { 43 | let infos = stream::iter(0..num_seqs) 44 | .then(|n| { 45 | let size = seq_size(n, &mut rand); 46 | let hs = (0..size) 47 | .map(|_| { 48 | let j = rand.random_range(0..tags.len()); 49 | tags[j].hash 50 | }) 51 | .collect::(); 52 | store 53 | .add_bytes_with_opts(AddBytesOptions { 54 | data: hs.into(), 55 | format: BlobFormat::HashSeq, 56 | }) 57 | .into_future() 58 | }) 59 | .collect::>() 60 | .await; 61 | let infos = infos.into_iter().collect::>>()?; 62 | Ok(infos) 63 | } 64 | -------------------------------------------------------------------------------- /cliff.toml: -------------------------------------------------------------------------------- 1 | [changelog] 2 | # changelog header 3 | header = """ 4 | # Changelog\n 5 | All notable changes to iroh-blobs will be documented in this file.\n 6 | """ 7 | 8 | body = """ 9 | {% if version %}\ 10 | {% if previous.version %}\ 11 | ## [{{ version | trim_start_matches(pat="v") }}](/compare/{{ previous.version }}..{{ version }}) - {{ timestamp | date(format="%Y-%m-%d") }} 12 | {% else %}\ 13 | ## [{{ version | trim_start_matches(pat="v") }}] - {{ timestamp | date(format="%Y-%m-%d") }} 14 | {% endif %}\ 15 | {% else %}\ 16 | ## [unreleased] 17 | {% endif %}\ 18 | 19 | {% macro commit(commit) -%} 20 | - {% if commit.scope %}*({{ commit.scope }})* {% endif %}{% if commit.breaking %}[**breaking**] {% endif %}\ 21 | {{ commit.message | upper_first }} - ([{{ commit.id | truncate(length=7, end="") }}](/commit/{{ commit.id }}))\ 22 | {% endmacro -%} 23 | 24 | {% for group, commits in commits | group_by(attribute="group") %} 25 | ### {{ group | striptags | trim | upper_first }} 26 | {% for commit in commits 27 | | filter(attribute="scope") 28 | | sort(attribute="scope") %} 29 | {{ self::commit(commit=commit) }} 30 | {%- endfor -%} 31 | {% raw %}\n{% endraw %}\ 32 | {%- for commit in commits %} 33 | {%- if not commit.scope -%} 34 | {{ self::commit(commit=commit) }} 35 | {% endif -%} 36 | {% endfor -%} 37 | {% endfor %}\n 38 | """ 39 | 40 | footer = "" 41 | postprocessors = [ 42 | { pattern = '', replace = "https://github.com/n0-computer/iroh-blobs" }, 43 | { pattern = "\\(#([0-9]+)\\)", replace = "([#${1}](https://github.com/n0-computer/iroh-blobs/issues/${1}))"} 44 | ] 45 | 46 | 47 | [git] 48 | # regex for parsing and grouping commits 49 | commit_parsers = [ 50 | { message = "^feat", group = "⛰️ Features" }, 51 | { message = "^fix", group = "🐛 Bug Fixes" }, 52 | { message = "^doc", group = "📚 Documentation" }, 53 | { message = "^perf", group = "⚡ Performance" }, 54 | { message = "^refactor", group = "🚜 Refactor" }, 55 | { message = "^style", group = "🎨 Styling" }, 56 | { message = "^test", group = "🧪 Testing" }, 57 | { message = "^chore\\(release\\)", skip = true }, 58 | { message = "^chore\\(deps\\)", skip = true }, 59 | { message = "^chore\\(pr\\)", skip = true }, 60 | { message = "^chore\\(pull\\)", skip = true }, 61 | { message = "^chore|ci", group = "⚙️ Miscellaneous Tasks" }, 62 | { body = ".*security", group = "🛡️ Security" }, 63 | { message = "^revert", group = "◀️ Revert" }, 64 | ] 65 | -------------------------------------------------------------------------------- /.github/workflows/docs.yaml: -------------------------------------------------------------------------------- 1 | name: Docs Preview 2 | 3 | on: 4 | pull_request: 5 | workflow_dispatch: 6 | inputs: 7 | pr_number: 8 | required: true 9 | type: string 10 | 11 | # ensure job runs sequentially so pushing to the preview branch doesn't conflict 12 | concurrency: 13 | group: ci-docs-preview 14 | 15 | env: 16 | IROH_FORCE_STAGING_RELAYS: "1" 17 | 18 | jobs: 19 | preview_docs: 20 | permissions: write-all 21 | timeout-minutes: 30 22 | name: Docs preview 23 | if: ${{ (github.event_name == 'pull_request' || github.event_name == 'workflow_dispatch' ) && !github.event.pull_request.head.repo.fork }} 24 | runs-on: ubuntu-latest 25 | env: 26 | RUSTC_WRAPPER: "sccache" 27 | SCCACHE_GHA_ENABLED: "on" 28 | SCCACHE_CACHE_SIZE: "50G" 29 | PREVIEW_PATH: pr/${{ github.event.pull_request.number || inputs.pr_number }}/docs 30 | 31 | steps: 32 | - uses: actions/checkout@v6 33 | - uses: dtolnay/rust-toolchain@master 34 | with: 35 | toolchain: nightly-2025-09-28 36 | - name: Install sccache 37 | uses: mozilla-actions/sccache-action@v0.0.9 38 | 39 | - name: Generate Docs 40 | run: cargo doc --workspace --all-features --no-deps 41 | env: 42 | RUSTDOCFLAGS: --cfg iroh_docsrs 43 | 44 | - name: Deploy Docs to Preview Branch 45 | uses: peaceiris/actions-gh-pages@v4 46 | with: 47 | github_token: ${{ secrets.GITHUB_TOKEN }} 48 | publish_dir: ./target/doc/ 49 | destination_dir: ${{ env.PREVIEW_PATH }} 50 | publish_branch: generated-docs-preview 51 | 52 | - name: Find Docs Comment 53 | uses: peter-evans/find-comment@v4 54 | id: fc 55 | with: 56 | issue-number: ${{ github.event.pull_request.number || inputs.pr_number }} 57 | comment-author: 'github-actions[bot]' 58 | body-includes: Documentation for this PR has been generated 59 | 60 | - name: Get current timestamp 61 | id: get_timestamp 62 | run: echo "TIMESTAMP=$(date -u +'%Y-%m-%dT%H:%M:%SZ')" >> $GITHUB_ENV 63 | 64 | - name: Create or Update Docs Comment 65 | uses: peter-evans/create-or-update-comment@v5 66 | with: 67 | issue-number: ${{ github.event.pull_request.number || inputs.pr_number }} 68 | comment-id: ${{ steps.fc.outputs.comment-id }} 69 | body: | 70 | Documentation for this PR has been generated and is available at: https://${{ github.repository_owner }}.github.io/${{ github.event.repository.name }}/${{ env.PREVIEW_PATH }}/iroh_blobs/ 71 | 72 | Last updated: ${{ env.TIMESTAMP }} 73 | edit-mode: replace 74 | -------------------------------------------------------------------------------- /examples/get-blob.rs: -------------------------------------------------------------------------------- 1 | /// Example how to request a blob from a remote node without using a store. 2 | mod common; 3 | use bao_tree::io::BaoContentItem; 4 | use clap::Parser; 5 | use common::setup_logging; 6 | use iroh::discovery::pkarr::PkarrResolver; 7 | use iroh_blobs::{get::request::GetBlobItem, ticket::BlobTicket, BlobFormat}; 8 | use n0_future::StreamExt; 9 | use tokio::io::AsyncWriteExt; 10 | 11 | #[derive(Debug, Parser)] 12 | #[command(version, about)] 13 | pub struct Cli { 14 | /// Ticket describing the content to fetch and the node to fetch it from 15 | /// 16 | /// This example only supports raw blobs. 17 | ticket: BlobTicket, 18 | /// True to print data as it arrives, false to complete the download and then 19 | /// print the data. Defaults to true. 20 | /// 21 | /// Note that setting progress to false can lead to an out-of-memory error 22 | /// for very large blobs. 23 | #[arg(long, default_value = "true")] 24 | progress: bool, 25 | } 26 | 27 | #[tokio::main] 28 | async fn main() -> anyhow::Result<()> { 29 | setup_logging(); 30 | let cli = Cli::parse(); 31 | let ticket = cli.ticket; 32 | let endpoint = iroh::Endpoint::empty_builder(iroh::RelayMode::Default) 33 | .discovery(PkarrResolver::n0_dns()) 34 | .bind() 35 | .await?; 36 | anyhow::ensure!( 37 | ticket.format() == BlobFormat::Raw, 38 | "This example only supports raw blobs." 39 | ); 40 | let connection = endpoint.connect(ticket.addr().id, iroh_blobs::ALPN).await?; 41 | let mut progress = iroh_blobs::get::request::get_blob(connection, ticket.hash()); 42 | let stats = if cli.progress { 43 | loop { 44 | match progress.next().await { 45 | Some(GetBlobItem::Item(item)) => match item { 46 | BaoContentItem::Leaf(leaf) => { 47 | tokio::io::stdout().write_all(&leaf.data).await?; 48 | } 49 | BaoContentItem::Parent(parent) => { 50 | tracing::info!("Parent: {parent:?}"); 51 | } 52 | }, 53 | Some(GetBlobItem::Done(stats)) => { 54 | break stats; 55 | } 56 | Some(GetBlobItem::Error(err)) => { 57 | anyhow::bail!("Error while streaming blob: {err}"); 58 | } 59 | None => { 60 | anyhow::bail!("Stream ended unexpectedly."); 61 | } 62 | } 63 | } 64 | } else { 65 | let (bytes, stats) = progress.bytes_and_stats().await?; 66 | tokio::io::stdout().write_all(&bytes).await?; 67 | stats 68 | }; 69 | tracing::info!("Stream done with stats: {stats:?}"); 70 | Ok(()) 71 | } 72 | -------------------------------------------------------------------------------- /src/net_protocol.rs: -------------------------------------------------------------------------------- 1 | //! Adaptation of `iroh-blobs` as an [`iroh`] [`ProtocolHandler`]. 2 | //! 3 | //! This is the easiest way to share data from a [`crate::api::Store`] over iroh connections. 4 | //! 5 | //! # Example 6 | //! 7 | //! ```rust 8 | //! # async fn example() -> n0_error::Result<()> { 9 | //! use iroh::{protocol::Router, Endpoint}; 10 | //! use iroh_blobs::{store, ticket::BlobTicket, BlobsProtocol}; 11 | //! 12 | //! // create a store 13 | //! let store = store::fs::FsStore::load("blobs").await?; 14 | //! 15 | //! // add some data 16 | //! let t = store.add_slice(b"hello world").await?; 17 | //! 18 | //! // create an iroh endpoint 19 | //! let endpoint = Endpoint::bind().await?; 20 | //! endpoint.online().await; 21 | //! let addr = endpoint.addr(); 22 | //! 23 | //! // create a blobs protocol handler 24 | //! let blobs = BlobsProtocol::new(&store, None); 25 | //! 26 | //! // create a router and add the blobs protocol handler 27 | //! let router = Router::builder(endpoint) 28 | //! .accept(iroh_blobs::ALPN, blobs) 29 | //! .spawn(); 30 | //! 31 | //! // this data is now globally available using the ticket 32 | //! let ticket = BlobTicket::new(addr, t.hash, t.format); 33 | //! println!("ticket: {}", ticket); 34 | //! 35 | //! // wait for control-c to exit 36 | //! tokio::signal::ctrl_c().await?; 37 | //! # Ok(()) 38 | //! # } 39 | //! ``` 40 | 41 | use std::{fmt::Debug, ops::Deref, sync::Arc}; 42 | 43 | use iroh::{ 44 | endpoint::Connection, 45 | protocol::{AcceptError, ProtocolHandler}, 46 | }; 47 | use tracing::error; 48 | 49 | use crate::{api::Store, provider::events::EventSender}; 50 | 51 | #[derive(Debug)] 52 | pub(crate) struct BlobsInner { 53 | store: Store, 54 | events: EventSender, 55 | } 56 | 57 | /// A protocol handler for the blobs protocol. 58 | #[derive(Debug, Clone)] 59 | pub struct BlobsProtocol { 60 | inner: Arc, 61 | } 62 | 63 | impl Deref for BlobsProtocol { 64 | type Target = Store; 65 | 66 | fn deref(&self) -> &Self::Target { 67 | &self.inner.store 68 | } 69 | } 70 | 71 | impl BlobsProtocol { 72 | pub fn new(store: &Store, events: Option) -> Self { 73 | Self { 74 | inner: Arc::new(BlobsInner { 75 | store: store.clone(), 76 | events: events.unwrap_or(EventSender::DEFAULT), 77 | }), 78 | } 79 | } 80 | 81 | pub fn store(&self) -> &Store { 82 | &self.inner.store 83 | } 84 | } 85 | 86 | impl ProtocolHandler for BlobsProtocol { 87 | async fn accept(&self, conn: Connection) -> std::result::Result<(), AcceptError> { 88 | let store = self.store().clone(); 89 | let events = self.inner.events.clone(); 90 | crate::provider::handle_connection(conn, store, events).await; 91 | Ok(()) 92 | } 93 | 94 | async fn shutdown(&self) { 95 | if let Err(cause) = self.store().shutdown().await { 96 | error!("error shutting down store: {:?}", cause); 97 | } 98 | } 99 | } 100 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "iroh-blobs" 3 | version = "0.97.0" 4 | edition = "2021" 5 | description = "content-addressed blobs for iroh" 6 | license = "MIT OR Apache-2.0" 7 | authors = ["dignifiedquire ", "n0 team"] 8 | repository = "https://github.com/n0-computer/iroh-blobs" 9 | keywords = ["hashing", "quic", "blake3", "streaming"] 10 | 11 | # Sadly this also needs to be updated in .github/workflows/ci.yml 12 | rust-version = "1.85" 13 | 14 | [dependencies] 15 | bao-tree = { version = "0.16", features = ["experimental-mixed", "tokio_fsm", "validate", "serde"], default-features = false } 16 | bytes = { version = "1", features = ["serde"] } 17 | derive_more = { version = "2.0.1", features = ["from", "try_from", "into", "debug", "display", "deref", "deref_mut"] } 18 | futures-lite = "2.6.0" 19 | quinn = { package = "iroh-quinn", version = "0.14.0", optional = true } 20 | n0-future = "0.3.0" 21 | range-collections = { version = "0.4.6", features = ["serde"] } 22 | smallvec = { version = "1", features = ["serde", "const_new"] } 23 | tokio = { version = "1.43.0", default-features = false, features = ["sync"] } 24 | tracing = "0.1.41" 25 | iroh-io = "0.6.1" 26 | rand = "0.9.2" 27 | hex = "0.4.3" 28 | serde = "1.0.217" 29 | postcard = { version = "1.1.1", features = ["experimental-derive", "use-std"] } 30 | data-encoding = "2.8.0" 31 | ref-cast = "1.0.24" 32 | arrayvec = "0.7.6" 33 | iroh = { version = "0.95", default-features = false } 34 | self_cell = "1.1.0" 35 | genawaiter = { version = "0.99.1", features = ["futures03"] } 36 | iroh-base = "0.95" 37 | iroh-tickets = "0.2" 38 | irpc = { version = "0.11.0", features = ["spans", "stream", "derive", "varint-util"], default-features = false } 39 | iroh-metrics = { version = "0.37" } 40 | redb = { version = "2.6.3", optional = true } 41 | reflink-copy = { version = "0.1.24", optional = true } 42 | n0-error = "0.1.2" 43 | nested_enum_utils = "0.2.3" 44 | 45 | # non-wasm-in-browser dependencies 46 | [target.'cfg(not(all(target_family = "wasm", target_os = "unknown")))'.dependencies] 47 | chrono = { version = "0.4.39" } 48 | 49 | # wasm-in-browser dependencies 50 | [target.'cfg(all(target_family = "wasm", target_os = "unknown"))'.dependencies] 51 | chrono = {version = "0.4.39", default-features = false, features = ["js-sys", "wasmbind", "std"] } 52 | 53 | [dev-dependencies] 54 | clap = { version = "4.5.31", features = ["derive"] } 55 | hex = "0.4.3" 56 | iroh-test = "0.31.0" 57 | proptest = "1.6.0" 58 | serde_json = "1.0.138" 59 | serde_test = "1.0.177" 60 | tempfile = "3.17.1" 61 | test-strategy = "0.4.0" 62 | testresult = "0.4.1" 63 | tracing-subscriber = { version = "0.3.20", features = ["fmt"] } 64 | tracing-test = "0.2.5" 65 | walkdir = "2.5.0" 66 | atomic_refcell = "0.1.13" 67 | iroh = { version = "0.95", features = ["discovery-local-network"]} 68 | async-compression = { version = "0.4.30", features = ["lz4", "tokio"] } 69 | concat_const = "0.2.0" 70 | anyhow = "1.0.100" 71 | 72 | [build-dependencies] 73 | cfg_aliases = "0.2.1" 74 | 75 | [features] 76 | hide-proto-docs = [] 77 | metrics = [] 78 | default = ["hide-proto-docs", "fs-store", "rpc"] 79 | fs-store = ["dep:redb", "dep:reflink-copy", "bao-tree/fs"] 80 | rpc = ["dep:quinn", "irpc/rpc", "irpc/quinn_endpoint_setup"] 81 | 82 | [[example]] 83 | name = "expiring-tags" 84 | required-features = ["fs-store"] 85 | 86 | [[example]] 87 | name = "random_store" 88 | required-features = ["fs-store"] 89 | -------------------------------------------------------------------------------- /src/hashseq.rs: -------------------------------------------------------------------------------- 1 | //! Helpers for blobs that contain a sequence of hashes. 2 | use std::fmt::Debug; 3 | 4 | use bytes::Bytes; 5 | use n0_error::{anyerr, AnyError}; 6 | 7 | use crate::Hash; 8 | 9 | /// A sequence of links, backed by a [`Bytes`] object. 10 | #[derive(Clone, derive_more::Into)] 11 | pub struct HashSeq(Bytes); 12 | 13 | impl Debug for HashSeq { 14 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 15 | f.debug_list().entries(self.iter()).finish() 16 | } 17 | } 18 | 19 | impl<'a> FromIterator<&'a Hash> for HashSeq { 20 | fn from_iter>(iter: T) -> Self { 21 | iter.into_iter().copied().collect() 22 | } 23 | } 24 | 25 | impl FromIterator for HashSeq { 26 | fn from_iter>(iter: T) -> Self { 27 | let iter = iter.into_iter(); 28 | let (lower, _upper) = iter.size_hint(); 29 | let mut bytes = Vec::with_capacity(lower * 32); 30 | for hash in iter { 31 | bytes.extend_from_slice(hash.as_ref()); 32 | } 33 | Self(bytes.into()) 34 | } 35 | } 36 | 37 | impl TryFrom for HashSeq { 38 | type Error = AnyError; 39 | 40 | fn try_from(bytes: Bytes) -> Result { 41 | Self::new(bytes).ok_or_else(|| anyerr!("invalid hash sequence")) 42 | } 43 | } 44 | 45 | impl IntoIterator for HashSeq { 46 | type Item = Hash; 47 | type IntoIter = HashSeqIter; 48 | 49 | fn into_iter(self) -> Self::IntoIter { 50 | HashSeqIter(self) 51 | } 52 | } 53 | 54 | impl HashSeq { 55 | /// Create a new sequence of hashes. 56 | pub fn new(bytes: Bytes) -> Option { 57 | if bytes.len() % 32 == 0 { 58 | Some(Self(bytes)) 59 | } else { 60 | None 61 | } 62 | } 63 | 64 | /// Iterate over the hashes in this sequence. 65 | pub fn iter(&self) -> impl Iterator + '_ { 66 | self.0.chunks_exact(32).map(|chunk| { 67 | let hash: [u8; 32] = chunk.try_into().unwrap(); 68 | hash.into() 69 | }) 70 | } 71 | 72 | /// Get the number of hashes in this sequence. 73 | pub fn len(&self) -> usize { 74 | self.0.len() / 32 75 | } 76 | 77 | /// Check if this sequence is empty. 78 | pub fn is_empty(&self) -> bool { 79 | self.0.is_empty() 80 | } 81 | 82 | /// Get the hash at the given index. 83 | pub fn get(&self, index: usize) -> Option { 84 | if index < self.len() { 85 | let hash: [u8; 32] = self.0[index * 32..(index + 1) * 32].try_into().unwrap(); 86 | Some(hash.into()) 87 | } else { 88 | None 89 | } 90 | } 91 | 92 | /// Get and remove the first hash in this sequence. 93 | pub fn pop_front(&mut self) -> Option { 94 | if self.is_empty() { 95 | None 96 | } else { 97 | let hash = self.get(0).unwrap(); 98 | self.0 = self.0.slice(32..); 99 | Some(hash) 100 | } 101 | } 102 | 103 | /// Get the underlying bytes. 104 | pub fn into_inner(self) -> Bytes { 105 | self.0 106 | } 107 | } 108 | 109 | /// Iterator over the hashes in a [`HashSeq`]. 110 | #[derive(Debug, Clone)] 111 | pub struct HashSeqIter(HashSeq); 112 | 113 | impl Iterator for HashSeqIter { 114 | type Item = Hash; 115 | 116 | fn next(&mut self) -> Option { 117 | self.0.pop_front() 118 | } 119 | } 120 | -------------------------------------------------------------------------------- /examples/transfer.rs: -------------------------------------------------------------------------------- 1 | use std::path::PathBuf; 2 | 3 | use iroh::{protocol::Router, Endpoint}; 4 | use iroh_blobs::{store::mem::MemStore, ticket::BlobTicket, BlobsProtocol}; 5 | 6 | #[tokio::main] 7 | async fn main() -> anyhow::Result<()> { 8 | // Create an endpoint, it allows creating and accepting 9 | // connections in the iroh p2p world 10 | let endpoint = Endpoint::bind().await?; 11 | 12 | // We initialize an in-memory backing store for iroh-blobs 13 | let store = MemStore::new(); 14 | // Then we initialize a struct that can accept blobs requests over iroh connections 15 | let blobs = BlobsProtocol::new(&store, None); 16 | 17 | // Grab all passed in arguments, the first one is the binary itself, so we skip it. 18 | let args: Vec = std::env::args().skip(1).collect(); 19 | // Convert to &str, so we can pattern-match easily: 20 | let arg_refs: Vec<&str> = args.iter().map(String::as_str).collect(); 21 | 22 | match arg_refs.as_slice() { 23 | ["send", filename] => { 24 | let filename: PathBuf = filename.parse()?; 25 | let abs_path = std::path::absolute(&filename)?; 26 | 27 | println!("Hashing file."); 28 | 29 | // When we import a blob, we get back a "tag" that refers to said blob in the store 30 | // and allows us to control when/if it gets garbage-collected 31 | let tag = store.blobs().add_path(abs_path).await?; 32 | 33 | let node_id = endpoint.id(); 34 | let ticket = BlobTicket::new(node_id.into(), tag.hash, tag.format); 35 | 36 | println!("File hashed. Fetch this file by running:"); 37 | println!( 38 | "cargo run --example transfer -- receive {ticket} {}", 39 | filename.display() 40 | ); 41 | 42 | // For sending files we build a router that accepts blobs connections & routes them 43 | // to the blobs protocol. 44 | let router = Router::builder(endpoint) 45 | .accept(iroh_blobs::ALPN, blobs) 46 | .spawn(); 47 | 48 | tokio::signal::ctrl_c().await?; 49 | 50 | // Gracefully shut down the node 51 | println!("Shutting down."); 52 | router.shutdown().await?; 53 | } 54 | ["receive", ticket, filename] => { 55 | let filename: PathBuf = filename.parse()?; 56 | let abs_path = std::path::absolute(filename)?; 57 | let ticket: BlobTicket = ticket.parse()?; 58 | 59 | // For receiving files, we create a "downloader" that allows us to fetch files 60 | // from other nodes via iroh connections 61 | let downloader = store.downloader(&endpoint); 62 | 63 | println!("Starting download."); 64 | 65 | downloader 66 | .download(ticket.hash(), Some(ticket.addr().id)) 67 | .await?; 68 | 69 | println!("Finished download."); 70 | println!("Copying to destination."); 71 | 72 | store.blobs().export(ticket.hash(), abs_path).await?; 73 | 74 | println!("Finished copying."); 75 | 76 | // Gracefully shut down the node 77 | println!("Shutting down."); 78 | endpoint.close().await; 79 | } 80 | _ => { 81 | println!("Couldn't parse command line arguments: {args:?}"); 82 | println!("Usage:"); 83 | println!(" # to send:"); 84 | println!(" cargo run --example transfer -- send [FILE]"); 85 | println!(" # this will print a ticket."); 86 | println!(); 87 | println!(" # to receive:"); 88 | println!(" cargo run --example transfer -- receive [TICKET] [FILE]"); 89 | } 90 | } 91 | 92 | Ok(()) 93 | } 94 | -------------------------------------------------------------------------------- /src/get/error.rs: -------------------------------------------------------------------------------- 1 | //! Error returned from get operations 2 | use std::io; 3 | 4 | use iroh::endpoint::{ConnectionError, ReadError, VarInt, WriteError}; 5 | use n0_error::{stack_error, AnyError}; 6 | 7 | use crate::get::fsm::{ 8 | AtBlobHeaderNextError, AtClosingNextError, ConnectedNextError, DecodeError, InitialNextError, 9 | }; 10 | 11 | /// Failures for a get operation 12 | #[stack_error(derive, add_meta)] 13 | pub enum GetError { 14 | #[error(transparent)] 15 | InitialNext { 16 | #[error(from)] 17 | source: InitialNextError, 18 | }, 19 | #[error(transparent)] 20 | ConnectedNext { 21 | #[error(from)] 22 | source: ConnectedNextError, 23 | }, 24 | #[error(transparent)] 25 | AtBlobHeaderNext { 26 | #[error(from)] 27 | source: AtBlobHeaderNextError, 28 | }, 29 | #[error(transparent)] 30 | Decode { 31 | #[error(from)] 32 | source: DecodeError, 33 | }, 34 | #[error(transparent)] 35 | IrpcSend { 36 | #[error(from)] 37 | source: irpc::channel::SendError, 38 | }, 39 | #[error(transparent)] 40 | AtClosingNext { 41 | #[error(from)] 42 | source: AtClosingNextError, 43 | }, 44 | #[error("local failure")] 45 | LocalFailure { source: AnyError }, 46 | #[error("bad request")] 47 | BadRequest { source: AnyError }, 48 | } 49 | 50 | impl GetError { 51 | pub fn iroh_error_code(&self) -> Option { 52 | if let Some(ReadError::Reset(code)) = self 53 | .remote_read() 54 | .and_then(|source| source.get_ref()) 55 | .and_then(|e| e.downcast_ref::()) 56 | { 57 | Some(*code) 58 | } else if let Some(WriteError::Stopped(code)) = self 59 | .remote_write() 60 | .and_then(|source| source.get_ref()) 61 | .and_then(|e| e.downcast_ref::()) 62 | { 63 | Some(*code) 64 | } else if let Some(ConnectionError::ApplicationClosed(ac)) = self 65 | .open() 66 | .and_then(|source| source.get_ref()) 67 | .and_then(|e| e.downcast_ref::()) 68 | { 69 | Some(ac.error_code) 70 | } else { 71 | None 72 | } 73 | } 74 | 75 | pub fn remote_write(&self) -> Option<&io::Error> { 76 | match self { 77 | Self::ConnectedNext { 78 | source: ConnectedNextError::Write { source, .. }, 79 | .. 80 | } => Some(source), 81 | _ => None, 82 | } 83 | } 84 | 85 | pub fn open(&self) -> Option<&io::Error> { 86 | match self { 87 | Self::InitialNext { 88 | source: InitialNextError::Open { source, .. }, 89 | .. 90 | } => Some(source), 91 | _ => None, 92 | } 93 | } 94 | 95 | pub fn remote_read(&self) -> Option<&io::Error> { 96 | match self { 97 | Self::AtBlobHeaderNext { 98 | source: AtBlobHeaderNextError::Read { source, .. }, 99 | .. 100 | } => Some(source), 101 | Self::Decode { 102 | source: DecodeError::Read { source, .. }, 103 | .. 104 | } => Some(source), 105 | Self::AtClosingNext { 106 | source: AtClosingNextError::Read { source, .. }, 107 | .. 108 | } => Some(source), 109 | _ => None, 110 | } 111 | } 112 | 113 | pub fn local_write(&self) -> Option<&io::Error> { 114 | match self { 115 | Self::Decode { 116 | source: DecodeError::Write { source, .. }, 117 | .. 118 | } => Some(source), 119 | _ => None, 120 | } 121 | } 122 | } 123 | 124 | pub type GetResult = std::result::Result; 125 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # iroh-blobs 2 | 3 | **NOTE: this version of iroh-blobs is not yet considered production quality. For now, if you need production quality, use iroh-blobs 0.35** 4 | 5 | This crate provides blob and blob sequence transfer support for iroh. It implements a simple request-response protocol based on BLAKE3 verified streaming. 6 | 7 | A request describes data in terms of BLAKE3 hashes and byte ranges. It is possible to request blobs or ranges of blobs, as well as entire sequences of blobs in one request. 8 | 9 | The requester opens a QUIC stream to the provider and sends the request. The provider answers with the requested data, encoded as [BLAKE3](https://github.com/BLAKE3-team/BLAKE3-specs/blob/master/blake3.pdf) verified streams, on the same QUIC stream. 10 | 11 | This crate is used together with [iroh](https://crates.io/crates/iroh). Connection establishment is left up to the user or higher level APIs. 12 | 13 | ## Concepts 14 | 15 | - **Blob:** a sequence of bytes of arbitrary size, without any metadata. 16 | 17 | - **Link:** a 32 byte BLAKE3 hash of a blob. 18 | 19 | - **HashSeq:** a blob that contains a sequence of links. Its size is a multiple of 32. 20 | 21 | - **Provider:** The side that provides data and answers requests. Providers wait for incoming requests from Requests. 22 | 23 | - **Requester:** The side that asks for data. It is initiating requests to one or many providers. 24 | 25 | A node can be a provider and a requester at the same time. 26 | 27 | ## Getting started 28 | 29 | The `iroh-blobs` protocol was designed to be used in conjunction with `iroh`. [Iroh](https://docs.rs/iroh) is a networking library for making direct connections, these connections are what power the data transfers in `iroh-blobs`. 30 | 31 | Iroh provides a [`Router`](https://docs.rs/iroh/latest/iroh/protocol/struct.Router.html) that takes an [`Endpoint`](https://docs.rs/iroh/latest/iroh/endpoint/struct.Endpoint.html) and any protocols needed for the application. Similar to a router in webserver library, it runs a loop accepting incoming connections and routes them to the specific protocol handler, based on `ALPN`. 32 | 33 | Here is a basic example of how to set up `iroh-blobs` with `iroh`: 34 | 35 | ```rust,no_run 36 | use iroh::{protocol::Router, Endpoint}; 37 | use iroh_blobs::{store::mem::MemStore, BlobsProtocol, ticket::BlobTicket}; 38 | 39 | #[tokio::main] 40 | async fn main() -> anyhow::Result<()> { 41 | // create an iroh endpoint that includes the standard discovery mechanisms 42 | // we've built at number0 43 | let endpoint = Endpoint::bind().await?; 44 | 45 | // create a protocol handler using an in-memory blob store. 46 | let store = MemStore::new(); 47 | let tag = store.add_slice(b"Hello world").await?; 48 | 49 | let _ = endpoint.online().await; 50 | let addr = endpoint.addr(); 51 | let ticket = BlobTicket::new(addr, tag.hash, tag.format); 52 | 53 | // build the router 54 | let blobs = BlobsProtocol::new(&store, None); 55 | let router = Router::builder(endpoint) 56 | .accept(iroh_blobs::ALPN, blobs) 57 | .spawn(); 58 | 59 | println!("We are now serving {}", ticket); 60 | 61 | // wait for control-c 62 | tokio::signal::ctrl_c().await; 63 | 64 | // clean shutdown of router and store 65 | router.shutdown().await?; 66 | Ok(()) 67 | } 68 | ``` 69 | 70 | ## Examples 71 | 72 | Examples that use `iroh-blobs` can be found in [this repo](https://github.com/n0-computer/iroh-blobs/tree/main/examples). 73 | 74 | # License 75 | 76 | This project is licensed under either of 77 | 78 | * Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or 79 | ) 80 | * MIT license ([LICENSE-MIT](LICENSE-MIT) or 81 | ) 82 | 83 | at your option. 84 | 85 | ### Contribution 86 | 87 | Unless you explicitly state otherwise, any contribution intentionally submitted 88 | for inclusion in this project by you, as defined in the Apache-2.0 license, 89 | shall be dual licensed as above, without any additional terms or conditions. 90 | -------------------------------------------------------------------------------- /src/store/fs/meta/tables.rs: -------------------------------------------------------------------------------- 1 | //! Table definitions and accessors for the redb database. 2 | use redb::{ReadableTable, TableDefinition, TableError}; 3 | 4 | use super::EntryState; 5 | use crate::{api::Tag, store::fs::delete_set::FileTransaction, Hash, HashAndFormat}; 6 | 7 | pub(super) const BLOBS_TABLE: TableDefinition = TableDefinition::new("blobs-0"); 8 | 9 | pub(super) const TAGS_TABLE: TableDefinition = TableDefinition::new("tags-0"); 10 | 11 | pub(super) const INLINE_DATA_TABLE: TableDefinition = 12 | TableDefinition::new("inline-data-0"); 13 | 14 | pub(super) const INLINE_OUTBOARD_TABLE: TableDefinition = 15 | TableDefinition::new("inline-outboard-0"); 16 | 17 | /// A trait similar to [`redb::ReadableTable`] but for all tables that make up 18 | /// the blob store. This can be used in places where either a readonly or 19 | /// mutable table is needed. 20 | pub(super) trait ReadableTables { 21 | fn blobs(&self) -> &impl ReadableTable; 22 | fn tags(&self) -> &impl ReadableTable; 23 | fn inline_data(&self) -> &impl ReadableTable; 24 | fn inline_outboard(&self) -> &impl ReadableTable; 25 | } 26 | 27 | /// A struct similar to [`redb::Table`] but for all tables that make up the 28 | /// blob store. 29 | pub(super) struct Tables<'a> { 30 | pub blobs: redb::Table<'a, Hash, EntryState>, 31 | pub tags: redb::Table<'a, Tag, HashAndFormat>, 32 | pub inline_data: redb::Table<'a, Hash, &'static [u8]>, 33 | pub inline_outboard: redb::Table<'a, Hash, &'static [u8]>, 34 | pub ftx: &'a FileTransaction<'a>, 35 | } 36 | 37 | impl<'txn> Tables<'txn> { 38 | pub fn new( 39 | tx: &'txn redb::WriteTransaction, 40 | ds: &'txn FileTransaction<'txn>, 41 | ) -> std::result::Result { 42 | Ok(Self { 43 | blobs: tx.open_table(BLOBS_TABLE)?, 44 | tags: tx.open_table(TAGS_TABLE)?, 45 | inline_data: tx.open_table(INLINE_DATA_TABLE)?, 46 | inline_outboard: tx.open_table(INLINE_OUTBOARD_TABLE)?, 47 | ftx: ds, 48 | }) 49 | } 50 | } 51 | 52 | impl ReadableTables for Tables<'_> { 53 | fn blobs(&self) -> &impl ReadableTable { 54 | &self.blobs 55 | } 56 | fn tags(&self) -> &impl ReadableTable { 57 | &self.tags 58 | } 59 | fn inline_data(&self) -> &impl ReadableTable { 60 | &self.inline_data 61 | } 62 | fn inline_outboard(&self) -> &impl ReadableTable { 63 | &self.inline_outboard 64 | } 65 | } 66 | 67 | /// A struct similar to [`redb::ReadOnlyTable`] but for all tables that make up 68 | /// the blob store. 69 | #[derive(Debug)] 70 | pub(crate) struct ReadOnlyTables { 71 | pub blobs: redb::ReadOnlyTable, 72 | pub tags: redb::ReadOnlyTable, 73 | pub inline_data: redb::ReadOnlyTable, 74 | pub inline_outboard: redb::ReadOnlyTable, 75 | } 76 | 77 | impl ReadOnlyTables { 78 | pub fn new(tx: &redb::ReadTransaction) -> std::result::Result { 79 | Ok(Self { 80 | blobs: tx.open_table(BLOBS_TABLE)?, 81 | tags: tx.open_table(TAGS_TABLE)?, 82 | inline_data: tx.open_table(INLINE_DATA_TABLE)?, 83 | inline_outboard: tx.open_table(INLINE_OUTBOARD_TABLE)?, 84 | }) 85 | } 86 | } 87 | 88 | impl ReadableTables for ReadOnlyTables { 89 | fn blobs(&self) -> &impl ReadableTable { 90 | &self.blobs 91 | } 92 | fn tags(&self) -> &impl ReadableTable { 93 | &self.tags 94 | } 95 | fn inline_data(&self) -> &impl ReadableTable { 96 | &self.inline_data 97 | } 98 | fn inline_outboard(&self) -> &impl ReadableTable { 99 | &self.inline_outboard 100 | } 101 | } 102 | -------------------------------------------------------------------------------- /src/store/util/sparse_mem_file.rs: -------------------------------------------------------------------------------- 1 | use std::{io, ops::Deref}; 2 | 3 | use bao_tree::io::{ 4 | mixed::ReadBytesAt, 5 | sync::{ReadAt, Size, WriteAt}, 6 | }; 7 | use bytes::Bytes; 8 | use range_collections::{range_set::RangeSetRange, RangeSet2}; 9 | 10 | /// A file that is sparse in memory 11 | /// 12 | /// It is not actually using sparse storage to make reading faster, so it will 13 | /// not conserve memory. It is just a way to remember the gaps so we can 14 | /// write it to a file in a sparse way later. 15 | #[derive(derive_more::Debug)] 16 | pub struct SparseMemFile { 17 | /// The data, with gaps filled with zeros 18 | #[debug("{} bytes", data.len())] 19 | data: Vec, 20 | /// The ranges that are not zeros, so we can distinguish between zeros and gaps 21 | ranges: RangeSet2, 22 | } 23 | 24 | impl Default for SparseMemFile { 25 | fn default() -> Self { 26 | Self::new() 27 | } 28 | } 29 | 30 | impl From> for SparseMemFile { 31 | fn from(data: Vec) -> Self { 32 | let ranges = RangeSet2::from(0..data.len()); 33 | Self { data, ranges } 34 | } 35 | } 36 | 37 | impl TryFrom for Vec { 38 | type Error = io::Error; 39 | 40 | fn try_from(value: SparseMemFile) -> Result { 41 | let (data, ranges) = value.into_parts(); 42 | if ranges == RangeSet2::from(0..data.len()) { 43 | Ok(data) 44 | } else { 45 | Err(io::Error::new( 46 | io::ErrorKind::InvalidData, 47 | "SparseMemFile has gaps", 48 | )) 49 | } 50 | } 51 | } 52 | 53 | impl SparseMemFile { 54 | /// Create a new, empty SparseMemFile 55 | pub fn new() -> Self { 56 | Self { 57 | data: Vec::new(), 58 | ranges: RangeSet2::empty(), 59 | } 60 | } 61 | 62 | /// Get the data and the valid ranges 63 | pub fn into_parts(self) -> (Vec, RangeSet2) { 64 | (self.data, self.ranges) 65 | } 66 | 67 | /// Persist the SparseMemFile to a WriteAt 68 | /// 69 | /// This will not persist the gaps, only the data that was written. 70 | pub fn persist(&self, mut target: impl WriteAt) -> io::Result<()> { 71 | let size = self.data.len(); 72 | for range in self.ranges.iter() { 73 | let range = match range { 74 | RangeSetRange::Range(range) => *range.start..*range.end, 75 | RangeSetRange::RangeFrom(range) => *range.start..size, 76 | }; 77 | let start = range.start.try_into().unwrap(); 78 | let buf = &self.data[range]; 79 | target.write_at(start, buf)?; 80 | } 81 | Ok(()) 82 | } 83 | } 84 | 85 | impl AsRef<[u8]> for SparseMemFile { 86 | fn as_ref(&self) -> &[u8] { 87 | &self.data 88 | } 89 | } 90 | 91 | impl Deref for SparseMemFile { 92 | type Target = [u8]; 93 | 94 | fn deref(&self) -> &Self::Target { 95 | &self.data 96 | } 97 | } 98 | 99 | impl ReadAt for SparseMemFile { 100 | fn read_at(&self, offset: u64, buf: &mut [u8]) -> io::Result { 101 | self.data.read_at(offset, buf) 102 | } 103 | } 104 | 105 | impl ReadBytesAt for SparseMemFile { 106 | fn read_bytes_at(&self, offset: u64, size: usize) -> io::Result { 107 | self.data.read_bytes_at(offset, size) 108 | } 109 | } 110 | 111 | impl WriteAt for SparseMemFile { 112 | fn write_at(&mut self, offset: u64, buf: &[u8]) -> io::Result { 113 | let start: usize = offset.try_into().map_err(|_| io::ErrorKind::InvalidInput)?; 114 | let end = start 115 | .checked_add(buf.len()) 116 | .ok_or(io::ErrorKind::InvalidInput)?; 117 | let n = self.data.write_at(offset, buf)?; 118 | self.ranges |= RangeSet2::from(start..end); 119 | Ok(n) 120 | } 121 | 122 | fn flush(&mut self) -> io::Result<()> { 123 | Ok(()) 124 | } 125 | } 126 | 127 | impl Size for SparseMemFile { 128 | fn size(&self) -> io::Result> { 129 | Ok(Some(self.data.len() as u64)) 130 | } 131 | } 132 | -------------------------------------------------------------------------------- /src/store/util/mem_or_file.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | fmt::Debug, 3 | fs::File, 4 | io::{self, Read, Seek}, 5 | }; 6 | 7 | use bao_tree::io::{ 8 | mixed::ReadBytesAt, 9 | sync::{ReadAt, Size}, 10 | }; 11 | use bytes::Bytes; 12 | 13 | use super::SliceInfoExt; 14 | 15 | /// A wrapper for a file with a fixed size. 16 | #[derive(Debug)] 17 | pub struct FixedSize { 18 | file: T, 19 | pub size: u64, 20 | } 21 | 22 | impl FixedSize { 23 | pub fn new(file: T, size: u64) -> Self { 24 | Self { file, size } 25 | } 26 | } 27 | 28 | impl FixedSize { 29 | pub fn try_clone(&self) -> io::Result { 30 | Ok(Self::new(self.file.try_clone()?, self.size)) 31 | } 32 | } 33 | 34 | impl ReadAt for FixedSize { 35 | fn read_at(&self, offset: u64, buf: &mut [u8]) -> io::Result { 36 | self.file.read_at(offset, buf) 37 | } 38 | } 39 | 40 | impl ReadBytesAt for FixedSize { 41 | fn read_bytes_at(&self, offset: u64, size: usize) -> io::Result { 42 | self.file.read_bytes_at(offset, size) 43 | } 44 | } 45 | 46 | impl Size for FixedSize { 47 | fn size(&self) -> io::Result> { 48 | Ok(Some(self.size)) 49 | } 50 | } 51 | 52 | /// This is a general purpose Either, just like Result, except that the two cases 53 | /// are Mem for something that is in memory, and File for something that is somewhere 54 | /// external and only available via io. 55 | #[derive(Debug)] 56 | pub enum MemOrFile { 57 | /// We got it all in memory 58 | Mem(M), 59 | /// A file 60 | File(F), 61 | } 62 | 63 | impl, F: Debug> MemOrFile { 64 | pub fn fmt_short(&self) -> String { 65 | match self { 66 | Self::Mem(mem) => format!("Mem(size={},addr={})", mem.as_ref().len(), mem.addr_short()), 67 | Self::File(_) => "File".to_string(), 68 | } 69 | } 70 | } 71 | 72 | impl MemOrFile> { 73 | pub fn size(&self) -> u64 { 74 | match self { 75 | MemOrFile::Mem(mem) => mem.len() as u64, 76 | MemOrFile::File(file) => file.size, 77 | } 78 | } 79 | } 80 | 81 | impl Read for MemOrFile { 82 | fn read(&mut self, buf: &mut [u8]) -> io::Result { 83 | match self { 84 | MemOrFile::Mem(mem) => mem.read(buf), 85 | MemOrFile::File(file) => file.read(buf), 86 | } 87 | } 88 | } 89 | 90 | impl Seek for MemOrFile { 91 | fn seek(&mut self, pos: io::SeekFrom) -> io::Result { 92 | match self { 93 | MemOrFile::Mem(mem) => mem.seek(pos), 94 | MemOrFile::File(file) => file.seek(pos), 95 | } 96 | } 97 | } 98 | 99 | impl, B: ReadAt> ReadAt for MemOrFile { 100 | fn read_at(&self, offset: u64, buf: &mut [u8]) -> io::Result { 101 | match self { 102 | MemOrFile::Mem(mem) => mem.as_ref().read_at(offset, buf), 103 | MemOrFile::File(file) => file.read_at(offset, buf), 104 | } 105 | } 106 | } 107 | 108 | impl ReadBytesAt for MemOrFile { 109 | fn read_bytes_at(&self, offset: u64, size: usize) -> io::Result { 110 | match self { 111 | MemOrFile::Mem(mem) => mem.read_bytes_at(offset, size), 112 | MemOrFile::File(file) => file.read_bytes_at(offset, size), 113 | } 114 | } 115 | } 116 | 117 | impl Size for MemOrFile { 118 | fn size(&self) -> io::Result> { 119 | match self { 120 | MemOrFile::Mem(mem) => mem.size(), 121 | MemOrFile::File(file) => file.size(), 122 | } 123 | } 124 | } 125 | 126 | impl Default for MemOrFile { 127 | fn default() -> Self { 128 | MemOrFile::Mem(Default::default()) 129 | } 130 | } 131 | 132 | impl MemOrFile { 133 | /// Create an empty MemOrFile, using a Bytes for the Mem part 134 | pub fn empty() -> Self { 135 | MemOrFile::default() 136 | } 137 | } 138 | 139 | impl MemOrFile { 140 | /// True if this is a Mem 141 | pub fn is_mem(&self) -> bool { 142 | matches!(self, MemOrFile::Mem(_)) 143 | } 144 | } 145 | -------------------------------------------------------------------------------- /.github/workflows/flaky.yaml: -------------------------------------------------------------------------------- 1 | # Run all tests, including flaky test. 2 | # 3 | # The default CI workflow ignores flaky tests. This workflow will run 4 | # all tests, including ignored ones. 5 | # 6 | # To use this workflow you can either: 7 | # 8 | # - Label a PR with "flaky-test", the normal CI workflow will not run 9 | # any jobs but the jobs here will be run. Note that to merge the PR 10 | # you'll need to remove the label eventually because the normal CI 11 | # jobs are required by branch protection. 12 | # 13 | # - Manually trigger the workflow, you may choose a branch for this to 14 | # run on. 15 | # 16 | # Additionally this jobs runs once a day on a schedule. 17 | # 18 | # Currently doctests are not run by this workflow. 19 | 20 | name: Flaky CI 21 | 22 | on: 23 | pull_request: 24 | types: [ 'labeled', 'unlabeled', 'opened', 'synchronize', 'reopened' ] 25 | schedule: 26 | # 06:30 UTC every day 27 | - cron: '30 6 * * *' 28 | workflow_dispatch: 29 | inputs: 30 | branch: 31 | description: 'Branch to run on, defaults to main' 32 | required: true 33 | default: 'main' 34 | type: string 35 | 36 | concurrency: 37 | group: flaky-${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} 38 | cancel-in-progress: true 39 | 40 | env: 41 | IROH_FORCE_STAGING_RELAYS: "1" 42 | 43 | jobs: 44 | tests: 45 | if: "contains(github.event.pull_request.labels.*.name, 'flaky-test') || github.event_name == 'workflow_dispatch' || github.event_name == 'schedule'" 46 | uses: './.github/workflows/tests.yaml' 47 | with: 48 | flaky: true 49 | git-ref: ${{ inputs.branch }} 50 | notify: 51 | needs: tests 52 | if: ${{ always() }} 53 | runs-on: ubuntu-latest 54 | steps: 55 | - name: Extract test results 56 | run: | 57 | printf '${{ toJSON(needs) }}\n' 58 | result=$(echo '${{ toJSON(needs) }}' | jq -r .tests.result) 59 | echo TESTS_RESULT=$result 60 | echo "TESTS_RESULT=$result" >>"$GITHUB_ENV" 61 | - name: download nextest reports 62 | uses: actions/download-artifact@v6 63 | with: 64 | pattern: libtest_run_${{ github.run_number }}-${{ github.run_attempt }}-* 65 | merge-multiple: true 66 | path: nextest-results 67 | - name: create summary report 68 | id: make_summary 69 | run: | 70 | # prevent the glob expression in the loop to match on itself when the dir is empty 71 | shopt -s nullglob 72 | # to deal with multiline outputs it's recommended to use a random EOF, the syntax is based on 73 | # https://docs.github.com/en/actions/using-workflows/workflow-commands-for-github-actions#multiline-strings 74 | EOF=aP51VriWCxNJ1JjvmO9i 75 | echo "summary<<$EOF" >> $GITHUB_OUTPUT 76 | echo "Flaky tests failure:" >> $GITHUB_OUTPUT 77 | echo " " >> $GITHUB_OUTPUT 78 | for report in nextest-results/*.json; do 79 | # remove the name prefix and extension, and split the parts 80 | name=$(echo ${report:16:-5} | tr _ ' ') 81 | echo $name 82 | echo "- **$name**" >> $GITHUB_OUTPUT 83 | # select the failed tests 84 | # the tests have this format "crate::module$test_name", the sed expressions remove the quotes and replace $ for :: 85 | failure=$(jq --slurp '.[] | select(.["type"] == "test" and .["event"] == "failed" ) | .["name"]' $report | sed -e 's/^"//g' -e 's/\$/::/' -e 's/"//') 86 | echo "$failure" 87 | echo "$failure" >> $GITHUB_OUTPUT 88 | done 89 | echo "" >> $GITHUB_OUTPUT 90 | echo "See https://github.com/${{ github.repository }}/actions/workflows/flaky.yaml" >> $GITHUB_OUTPUT 91 | echo "$EOF" >> $GITHUB_OUTPUT 92 | - name: Notify discord on failure 93 | uses: n0-computer/discord-webhook-notify@v1 94 | if: ${{ env.TESTS_RESULT == 'failure' || env.TESTS_RESULT == 'success' }} 95 | with: 96 | text: "Flaky tests in **${{ github.repository }}**:" 97 | severity: ${{ env.TESTS_RESULT == 'failure' && 'warn' || 'info' }} 98 | details: ${{ env.TESTS_RESULT == 'failure' && steps.make_summary.outputs.summary || 'No flaky failures!' }} 99 | webhookUrl: ${{ secrets.DISCORD_N0_GITHUB_CHANNEL_WEBHOOK_URL }} 100 | -------------------------------------------------------------------------------- /tests/blobs.rs: -------------------------------------------------------------------------------- 1 | #![cfg(feature = "fs-store")] 2 | use std::{ 3 | net::{Ipv4Addr, SocketAddr, SocketAddrV4}, 4 | ops::Deref, 5 | path::Path, 6 | }; 7 | 8 | use iroh_blobs::{ 9 | api::{ 10 | blobs::{AddProgressItem, Blobs}, 11 | Store, 12 | }, 13 | store::{fs::FsStore, mem::MemStore}, 14 | Hash, 15 | }; 16 | use n0_future::StreamExt; 17 | use testresult::TestResult; 18 | 19 | /// Interesting sizes for testing. 20 | pub const INTERESTING_SIZES: [usize; 8] = [ 21 | 0, // annoying corner case - always present, handled by the api 22 | 1, // less than 1 chunk, data inline, outboard not needed 23 | 1024, // exactly 1 chunk, data inline, outboard not needed 24 | 1024 * 16 - 1, // less than 1 chunk group, data inline, outboard not needed 25 | 1024 * 16, // exactly 1 chunk group, data inline, outboard not needed 26 | 1024 * 16 + 1, // data file, outboard inline (just 1 hash pair) 27 | 1024 * 1024, // data file, outboard inline (many hash pairs) 28 | 1024 * 1024 * 8, // data file, outboard file 29 | ]; 30 | 31 | async fn blobs_smoke(path: &Path, blobs: &Blobs) -> TestResult<()> { 32 | // test importing and exporting bytes 33 | { 34 | let expected = b"hello".to_vec(); 35 | let expected_hash = Hash::new(&expected); 36 | let tt = blobs.add_bytes(expected.clone()).await?; 37 | let hash = tt.hash; 38 | assert_eq!(hash, expected_hash); 39 | let actual = blobs.get_bytes(hash).await?; 40 | assert_eq!(actual, expected); 41 | } 42 | 43 | // test importing and exporting a file 44 | { 45 | let expected = b"somestuffinafile".to_vec(); 46 | let temp1 = path.join("test1"); 47 | std::fs::write(&temp1, &expected)?; 48 | let tt = blobs.add_path(temp1).await?; 49 | let hash = tt.hash; 50 | let expected_hash = Hash::new(&expected); 51 | assert_eq!(hash, expected_hash); 52 | 53 | let temp2 = path.join("test2"); 54 | blobs.export(hash, &temp2).await?; 55 | let actual = std::fs::read(&temp2)?; 56 | assert_eq!(actual, expected); 57 | } 58 | 59 | // test importing a large file with progress 60 | { 61 | let expected = vec![0u8; 1024 * 1024]; 62 | let temp1 = path.join("test3"); 63 | std::fs::write(&temp1, &expected)?; 64 | let mut stream = blobs.add_path(temp1).stream().await; 65 | let mut res = None; 66 | while let Some(item) = stream.next().await { 67 | if let AddProgressItem::Done(tt) = item { 68 | res = Some(tt); 69 | break; 70 | } 71 | } 72 | let actual_hash = res.as_ref().map(|x| x.hash()); 73 | let expected_hash = Hash::new(&expected); 74 | assert_eq!(actual_hash, Some(expected_hash)); 75 | } 76 | 77 | { 78 | let hashes = blobs.list().hashes().await?; 79 | assert_eq!(hashes.len(), 3); 80 | } 81 | Ok(()) 82 | } 83 | 84 | #[tokio::test] 85 | async fn blobs_smoke_fs() -> TestResult { 86 | tracing_subscriber::fmt::try_init().ok(); 87 | let td = tempfile::tempdir()?; 88 | let store = FsStore::load(td.path().join("a")).await?; 89 | blobs_smoke(td.path(), store.blobs()).await?; 90 | store.shutdown().await?; 91 | Ok(()) 92 | } 93 | 94 | #[tokio::test] 95 | async fn blobs_smoke_mem() -> TestResult { 96 | tracing_subscriber::fmt::try_init().ok(); 97 | let td = tempfile::tempdir()?; 98 | let store = MemStore::new(); 99 | blobs_smoke(td.path(), store.blobs()).await?; 100 | store.shutdown().await?; 101 | Ok(()) 102 | } 103 | 104 | #[tokio::test] 105 | async fn blobs_smoke_fs_rpc() -> TestResult { 106 | tracing_subscriber::fmt::try_init().ok(); 107 | let unspecified = SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::LOCALHOST, 0)); 108 | let (server, cert) = irpc::util::make_server_endpoint(unspecified)?; 109 | let client = irpc::util::make_client_endpoint(unspecified, &[cert.as_ref()])?; 110 | let td = tempfile::tempdir()?; 111 | let store = FsStore::load(td.path().join("a")).await?; 112 | n0_future::task::spawn(store.deref().clone().listen(server.clone())); 113 | let api = Store::connect(client, server.local_addr()?); 114 | blobs_smoke(td.path(), api.blobs()).await?; 115 | api.shutdown().await?; 116 | Ok(()) 117 | } 118 | -------------------------------------------------------------------------------- /src/store/fs/options.rs: -------------------------------------------------------------------------------- 1 | //! Options for configuring the file store. 2 | use std::path::{Path, PathBuf}; 3 | 4 | use n0_future::time::Duration; 5 | 6 | use super::{meta::raw_outboard_size, temp_name}; 7 | use crate::{store::gc::GcConfig, Hash}; 8 | 9 | /// Options for directories used by the file store. 10 | #[derive(Debug, Clone)] 11 | pub struct PathOptions { 12 | /// Path to the directory where data and outboard files are stored. 13 | pub data_path: PathBuf, 14 | /// Path to the directory where temp files are stored. 15 | /// This *must* be on the same device as `data_path`, since we need to 16 | /// atomically move temp files into place. 17 | pub temp_path: PathBuf, 18 | } 19 | 20 | impl PathOptions { 21 | pub fn new(root: &Path) -> Self { 22 | Self { 23 | data_path: root.join("data"), 24 | temp_path: root.join("temp"), 25 | } 26 | } 27 | 28 | pub fn data_path(&self, hash: &Hash) -> PathBuf { 29 | self.data_path.join(format!("{}.data", hash.to_hex())) 30 | } 31 | 32 | pub fn outboard_path(&self, hash: &Hash) -> PathBuf { 33 | self.data_path.join(format!("{}.obao4", hash.to_hex())) 34 | } 35 | 36 | pub fn sizes_path(&self, hash: &Hash) -> PathBuf { 37 | self.data_path.join(format!("{}.sizes4", hash.to_hex())) 38 | } 39 | 40 | pub fn bitfield_path(&self, hash: &Hash) -> PathBuf { 41 | self.data_path.join(format!("{}.bitfield", hash.to_hex())) 42 | } 43 | 44 | pub fn temp_file_name(&self) -> PathBuf { 45 | self.temp_path.join(temp_name()) 46 | } 47 | } 48 | 49 | /// Options for inlining small complete data or outboards. 50 | #[derive(Debug, Clone)] 51 | pub struct InlineOptions { 52 | /// Maximum data size to inline. 53 | pub max_data_inlined: u64, 54 | /// Maximum outboard size to inline. 55 | pub max_outboard_inlined: u64, 56 | } 57 | 58 | impl InlineOptions { 59 | /// Do not inline anything, ever. 60 | pub const NO_INLINE: Self = Self { 61 | max_data_inlined: 0, 62 | max_outboard_inlined: 0, 63 | }; 64 | /// Always inline everything 65 | pub const ALWAYS_INLINE: Self = Self { 66 | max_data_inlined: u64::MAX, 67 | max_outboard_inlined: u64::MAX, 68 | }; 69 | } 70 | 71 | impl Default for InlineOptions { 72 | fn default() -> Self { 73 | Self { 74 | max_data_inlined: 1024 * 16, 75 | max_outboard_inlined: 1024 * 16, 76 | } 77 | } 78 | } 79 | 80 | /// Options for transaction batching. 81 | #[derive(Debug, Clone)] 82 | pub struct BatchOptions { 83 | /// Maximum number of actor messages to batch before creating a new read transaction. 84 | pub max_read_batch: usize, 85 | /// Maximum duration to wait before committing a read transaction. 86 | pub max_read_duration: Duration, 87 | /// Maximum number of actor messages to batch before committing write transaction. 88 | pub max_write_batch: usize, 89 | /// Maximum duration to wait before committing a write transaction. 90 | pub max_write_duration: Duration, 91 | } 92 | 93 | impl Default for BatchOptions { 94 | fn default() -> Self { 95 | Self { 96 | max_read_batch: 10000, 97 | max_read_duration: Duration::from_secs(1), 98 | max_write_batch: 1000, 99 | max_write_duration: Duration::from_millis(500), 100 | } 101 | } 102 | } 103 | 104 | /// Options for the file store. 105 | #[derive(Debug, Clone)] 106 | pub struct Options { 107 | /// Path options. 108 | pub path: PathOptions, 109 | /// Inline storage options. 110 | pub inline: InlineOptions, 111 | /// Transaction batching options. 112 | pub batch: BatchOptions, 113 | /// Gc configuration. 114 | pub gc: Option, 115 | } 116 | 117 | impl Options { 118 | /// Create new optinos with the given root path and everything else default. 119 | pub fn new(root: &Path) -> Self { 120 | Self { 121 | path: PathOptions::new(root), 122 | inline: InlineOptions::default(), 123 | batch: BatchOptions::default(), 124 | gc: None, 125 | } 126 | } 127 | 128 | // check if the data will be inlined, based on the size of the data 129 | pub fn is_inlined_data(&self, data_size: u64) -> bool { 130 | data_size <= self.inline.max_data_inlined 131 | } 132 | 133 | // check if the outboard will be inlined, based on the size of the *outboard* 134 | pub fn is_inlined_outboard(&self, outboard_size: u64) -> bool { 135 | outboard_size <= self.inline.max_outboard_inlined 136 | } 137 | 138 | // check if both the data and outboard will be inlined, based on the size of the data 139 | pub fn is_inlined_all(&self, data_size: u64) -> bool { 140 | let outboard_size = raw_outboard_size(data_size); 141 | self.is_inlined_data(data_size) && self.is_inlined_outboard(outboard_size) 142 | } 143 | } 144 | -------------------------------------------------------------------------------- /tests/tags.rs: -------------------------------------------------------------------------------- 1 | #![cfg(feature = "fs-store")] 2 | use std::{ 3 | net::{Ipv4Addr, SocketAddr, SocketAddrV4}, 4 | ops::Deref, 5 | }; 6 | 7 | use iroh_blobs::{ 8 | api::{ 9 | self, 10 | tags::{TagInfo, Tags}, 11 | Store, 12 | }, 13 | store::{fs::FsStore, mem::MemStore}, 14 | BlobFormat, Hash, HashAndFormat, 15 | }; 16 | use n0_future::{Stream, StreamExt}; 17 | use testresult::TestResult; 18 | 19 | async fn to_vec(stream: impl Stream>) -> api::Result> { 20 | let res = stream.collect::>().await; 21 | res.into_iter().collect::>>() 22 | } 23 | 24 | fn expected(tags: impl IntoIterator) -> Vec { 25 | tags.into_iter() 26 | .map(|tag| TagInfo::new(tag, Hash::new(tag))) 27 | .collect() 28 | } 29 | 30 | async fn set(tags: &Tags, names: impl IntoIterator) -> TestResult<()> { 31 | for name in names { 32 | tags.set(name, Hash::new(name)).await?; 33 | } 34 | Ok(()) 35 | } 36 | 37 | async fn tags_smoke(tags: &Tags) -> TestResult<()> { 38 | set(tags, ["a", "b", "c", "d", "e"]).await?; 39 | let stream = tags.list().await?; 40 | let res = to_vec(stream).await?; 41 | assert_eq!(res, expected(["a", "b", "c", "d", "e"])); 42 | 43 | let stream = tags.list_range("b".."d").await?; 44 | let res = to_vec(stream).await?; 45 | assert_eq!(res, expected(["b", "c"])); 46 | 47 | let stream = tags.list_range("b"..).await?; 48 | let res = to_vec(stream).await?; 49 | assert_eq!(res, expected(["b", "c", "d", "e"])); 50 | 51 | let stream = tags.list_range(.."d").await?; 52 | let res = to_vec(stream).await?; 53 | assert_eq!(res, expected(["a", "b", "c"])); 54 | 55 | let stream = tags.list_range(..="d").await?; 56 | let res = to_vec(stream).await?; 57 | assert_eq!(res, expected(["a", "b", "c", "d"])); 58 | 59 | tags.delete_range("b"..).await?; 60 | let stream = tags.list().await?; 61 | let res = to_vec(stream).await?; 62 | assert_eq!(res, expected(["a"])); 63 | 64 | tags.delete_range(..="a").await?; 65 | let stream = tags.list().await?; 66 | let res = to_vec(stream).await?; 67 | assert_eq!(res, expected([])); 68 | 69 | set(tags, ["a", "aa", "aaa", "aab", "b"]).await?; 70 | 71 | let stream = tags.list_prefix("aa").await?; 72 | let res = to_vec(stream).await?; 73 | assert_eq!(res, expected(["aa", "aaa", "aab"])); 74 | 75 | tags.delete_prefix("aa").await?; 76 | let stream = tags.list().await?; 77 | let res = to_vec(stream).await?; 78 | assert_eq!(res, expected(["a", "b"])); 79 | 80 | tags.delete_prefix("").await?; 81 | let stream = tags.list().await?; 82 | let res = to_vec(stream).await?; 83 | assert_eq!(res, expected([])); 84 | 85 | set(tags, ["a", "b", "c"]).await?; 86 | 87 | assert_eq!( 88 | tags.get("b").await?, 89 | Some(TagInfo::new("b", Hash::new("b"))) 90 | ); 91 | 92 | tags.delete("b").await?; 93 | let stream = tags.list().await?; 94 | let res = to_vec(stream).await?; 95 | assert_eq!(res, expected(["a", "c"])); 96 | 97 | assert_eq!(tags.get("b").await?, None); 98 | 99 | tags.delete_all().await?; 100 | 101 | tags.set("a", HashAndFormat::hash_seq(Hash::new("a"))) 102 | .await?; 103 | tags.set("b", HashAndFormat::raw(Hash::new("b"))).await?; 104 | let stream = tags.list_hash_seq().await?; 105 | let res = to_vec(stream).await?; 106 | assert_eq!( 107 | res, 108 | vec![TagInfo { 109 | name: "a".into(), 110 | hash: Hash::new("a"), 111 | format: BlobFormat::HashSeq, 112 | }] 113 | ); 114 | 115 | tags.delete_all().await?; 116 | set(tags, ["c"]).await?; 117 | tags.rename("c", "f").await?; 118 | let stream = tags.list().await?; 119 | let res = to_vec(stream).await?; 120 | assert_eq!( 121 | res, 122 | vec![TagInfo { 123 | name: "f".into(), 124 | hash: Hash::new("c"), 125 | format: BlobFormat::Raw, 126 | }] 127 | ); 128 | 129 | let res = tags.rename("y", "z").await; 130 | assert!(res.is_err()); 131 | Ok(()) 132 | } 133 | 134 | #[tokio::test] 135 | async fn tags_smoke_mem() -> TestResult<()> { 136 | tracing_subscriber::fmt::try_init().ok(); 137 | let store = MemStore::new(); 138 | tags_smoke(store.tags()).await 139 | } 140 | 141 | #[tokio::test] 142 | async fn tags_smoke_fs() -> TestResult<()> { 143 | tracing_subscriber::fmt::try_init().ok(); 144 | let td = tempfile::tempdir()?; 145 | let store = FsStore::load(td.path().join("a")).await?; 146 | tags_smoke(store.tags()).await 147 | } 148 | 149 | #[tokio::test] 150 | async fn tags_smoke_fs_rpc() -> TestResult<()> { 151 | tracing_subscriber::fmt::try_init().ok(); 152 | let unspecified = SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::LOCALHOST, 0)); 153 | let (server, cert) = irpc::util::make_server_endpoint(unspecified)?; 154 | let client = irpc::util::make_client_endpoint(unspecified, &[cert.as_ref()])?; 155 | let td = tempfile::tempdir()?; 156 | let store = FsStore::load(td.path().join("a")).await?; 157 | n0_future::task::spawn(store.deref().clone().listen(server.clone())); 158 | let api = Store::connect(client, server.local_addr()?); 159 | tags_smoke(api.tags()).await?; 160 | api.shutdown().await?; 161 | Ok(()) 162 | } 163 | -------------------------------------------------------------------------------- /src/store/fs/delete_set.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | collections::BTreeSet, 3 | sync::{Arc, Mutex}, 4 | }; 5 | 6 | use tracing::warn; 7 | 8 | use super::options::PathOptions; 9 | use crate::Hash; 10 | 11 | #[derive(Debug, PartialEq, Eq, PartialOrd, Ord)] 12 | pub(super) enum BaoFilePart { 13 | Outboard, 14 | Data, 15 | Sizes, 16 | Bitfield, 17 | } 18 | 19 | /// Creates a pair of a protect handle and a delete handle. 20 | /// 21 | /// The protect handle can be used to protect files from deletion. 22 | /// The delete handle can be used to create transactions in which files can be marked for deletion. 23 | pub(super) fn pair(options: Arc) -> (ProtectHandle, DeleteHandle) { 24 | let ds = Arc::new(Mutex::new(DeleteSet::default())); 25 | (ProtectHandle(ds.clone()), DeleteHandle::new(ds, options)) 26 | } 27 | 28 | /// Helper to keep track of files to delete after a transaction is committed. 29 | #[derive(Debug, Default)] 30 | struct DeleteSet(BTreeSet<(Hash, BaoFilePart)>); 31 | 32 | impl DeleteSet { 33 | /// Mark a file as to be deleted after the transaction is committed. 34 | fn delete(&mut self, hash: Hash, parts: impl IntoIterator) { 35 | for part in parts { 36 | self.0.insert((hash, part)); 37 | } 38 | } 39 | 40 | /// Mark a file as to be kept after the transaction is committed. 41 | /// 42 | /// This will cancel any previous delete for the same file in the same transaction. 43 | fn protect(&mut self, hash: Hash, parts: impl IntoIterator) { 44 | for part in parts { 45 | self.0.remove(&(hash, part)); 46 | } 47 | } 48 | 49 | /// Apply the delete set and clear it. 50 | /// 51 | /// This will delete all files marked for deletion and then clear the set. 52 | /// Errors will just be logged. 53 | fn commit(&mut self, options: &PathOptions) { 54 | for (hash, to_delete) in &self.0 { 55 | tracing::debug!("deleting {:?} for {hash}", to_delete); 56 | let path = match to_delete { 57 | BaoFilePart::Data => options.data_path(hash), 58 | BaoFilePart::Outboard => options.outboard_path(hash), 59 | BaoFilePart::Sizes => options.sizes_path(hash), 60 | BaoFilePart::Bitfield => options.bitfield_path(hash), 61 | }; 62 | if let Err(cause) = std::fs::remove_file(&path) { 63 | // Ignore NotFound errors, if the file is already gone that's fine. 64 | if cause.kind() != std::io::ErrorKind::NotFound { 65 | warn!( 66 | "failed to delete {:?} {}: {}", 67 | to_delete, 68 | path.display(), 69 | cause 70 | ); 71 | } 72 | } 73 | } 74 | self.0.clear(); 75 | } 76 | 77 | fn clear(&mut self) { 78 | self.0.clear(); 79 | } 80 | 81 | fn is_empty(&self) -> bool { 82 | self.0.is_empty() 83 | } 84 | } 85 | 86 | #[derive(Debug, Clone)] 87 | pub(super) struct ProtectHandle(Arc>); 88 | 89 | /// Protect handle, to be used concurrently with transactions to mark files for keeping. 90 | impl ProtectHandle { 91 | /// Inside or outside a transaction, mark files as to be kept 92 | /// 93 | /// If we are not inside a transaction, this will do nothing. 94 | pub(super) fn protect(&self, hash: Hash, parts: impl IntoIterator) { 95 | let mut guard = self.0.lock().unwrap(); 96 | guard.protect(hash, parts); 97 | } 98 | } 99 | 100 | /// A delete handle. The only thing you can do with this is to open transactions that keep track of files to delete. 101 | #[derive(Debug)] 102 | pub(super) struct DeleteHandle { 103 | ds: Arc>, 104 | options: Arc, 105 | } 106 | 107 | impl DeleteHandle { 108 | fn new(ds: Arc>, options: Arc) -> Self { 109 | Self { ds, options } 110 | } 111 | 112 | /// Open a file transaction. You can open only one transaction at a time. 113 | pub(super) fn begin_write(&mut self) -> FileTransaction<'_> { 114 | FileTransaction::new(self) 115 | } 116 | } 117 | 118 | /// A file transaction. Inside a transaction, you can mark files for deletion. 119 | /// 120 | /// Dropping a transaction will clear the delete set. Committing a transaction will apply the delete set by actually deleting the files. 121 | #[derive(Debug)] 122 | pub(super) struct FileTransaction<'a>(&'a DeleteHandle); 123 | 124 | impl<'a> FileTransaction<'a> { 125 | fn new(inner: &'a DeleteHandle) -> Self { 126 | let guard = inner.ds.lock().unwrap(); 127 | debug_assert!(guard.is_empty()); 128 | drop(guard); 129 | Self(inner) 130 | } 131 | 132 | /// Mark files as to be deleted 133 | pub fn delete(&self, hash: Hash, parts: impl IntoIterator) { 134 | let mut guard = self.0.ds.lock().unwrap(); 135 | guard.delete(hash, parts); 136 | } 137 | 138 | /// Apply the delete set and clear it. 139 | pub fn commit(self) { 140 | let mut guard = self.0.ds.lock().unwrap(); 141 | guard.commit(&self.0.options); 142 | } 143 | } 144 | 145 | impl Drop for FileTransaction<'_> { 146 | fn drop(&mut self) { 147 | self.0.ds.lock().unwrap().clear(); 148 | } 149 | } 150 | -------------------------------------------------------------------------------- /examples/mdns-discovery.rs: -------------------------------------------------------------------------------- 1 | //! Example that runs an iroh node with local node discovery and no relay server. 2 | //! 3 | //! You can think of this as a local version of [sendme](https://www.iroh.computer/sendme) 4 | //! that only works for individual files. 5 | //! 6 | //! **This example is using a non-default feature of iroh, so you need to run it with the 7 | //! examples feature enabled.** 8 | //! 9 | //! Run the follow command to run the "accept" side, that hosts the content: 10 | //! $ cargo run --example mdns-discovery --features examples -- accept [FILE_PATH] 11 | //! Wait for output that looks like the following: 12 | //! $ cargo run --example mdns-discovery --features examples -- connect [NODE_ID] [HASH] -o [FILE_PATH] 13 | //! Run that command on another machine in the same local network, replacing [FILE_PATH] to the path on which you want to save the transferred content. 14 | use std::path::{Path, PathBuf}; 15 | 16 | use anyhow::{ensure, Result}; 17 | use clap::{Parser, Subcommand}; 18 | use iroh::{ 19 | discovery::mdns::MdnsDiscovery, protocol::Router, Endpoint, PublicKey, RelayMode, SecretKey, 20 | }; 21 | use iroh_blobs::{store::mem::MemStore, BlobsProtocol, Hash}; 22 | 23 | mod common; 24 | use common::{get_or_generate_secret_key, setup_logging}; 25 | 26 | #[derive(Debug, Parser)] 27 | #[command(version, about)] 28 | pub struct Cli { 29 | #[clap(subcommand)] 30 | command: Commands, 31 | } 32 | 33 | #[derive(Subcommand, Clone, Debug)] 34 | pub enum Commands { 35 | /// Launch an iroh node and provide the content at the given path 36 | Accept { 37 | /// path to the file you want to provide 38 | path: PathBuf, 39 | }, 40 | /// Get the node_id and hash string from a node running accept in the local network 41 | /// Download the content from that node. 42 | Connect { 43 | /// Endpoint ID of a node on the local network 44 | endpoint_id: PublicKey, 45 | /// Hash of content you want to download from the node 46 | hash: Hash, 47 | /// save the content to a file 48 | #[clap(long, short)] 49 | out: Option, 50 | }, 51 | } 52 | 53 | async fn accept(path: &Path) -> Result<()> { 54 | if !path.is_file() { 55 | println!("Content must be a file."); 56 | return Ok(()); 57 | } 58 | 59 | let key = get_or_generate_secret_key()?; 60 | 61 | println!("Starting iroh node with mdns discovery..."); 62 | // create a new node 63 | let endpoint = Endpoint::empty_builder(RelayMode::Default) 64 | .secret_key(key) 65 | .discovery(MdnsDiscovery::builder()) 66 | .relay_mode(RelayMode::Disabled) 67 | .bind() 68 | .await?; 69 | let builder = Router::builder(endpoint.clone()); 70 | let store = MemStore::new(); 71 | let blobs = BlobsProtocol::new(&store, None); 72 | let builder = builder.accept(iroh_blobs::ALPN, blobs.clone()); 73 | let node = builder.spawn(); 74 | 75 | if !path.is_file() { 76 | println!("Content must be a file."); 77 | node.shutdown().await?; 78 | return Ok(()); 79 | } 80 | let absolute = path.canonicalize()?; 81 | println!("Adding {} as {}...", path.display(), absolute.display()); 82 | let tag = store.add_path(absolute).await?; 83 | println!("To fetch the blob:\n\tcargo run --example mdns-discovery --features examples -- connect {} {} -o [FILE_PATH]", node.endpoint().id(), tag.hash); 84 | tokio::signal::ctrl_c().await?; 85 | node.shutdown().await?; 86 | Ok(()) 87 | } 88 | 89 | async fn connect(node_id: PublicKey, hash: Hash, out: Option) -> Result<()> { 90 | let key = SecretKey::generate(&mut rand::rng()); 91 | // todo: disable discovery publishing once https://github.com/n0-computer/iroh/issues/3401 is implemented 92 | let discovery = MdnsDiscovery::builder(); 93 | 94 | println!("Starting iroh node with mdns discovery..."); 95 | // create a new node 96 | let endpoint = Endpoint::empty_builder(RelayMode::Disabled) 97 | .secret_key(key) 98 | .discovery(discovery) 99 | .bind() 100 | .await?; 101 | let store = MemStore::new(); 102 | 103 | println!("NodeID: {}", endpoint.id()); 104 | let conn = endpoint.connect(node_id, iroh_blobs::ALPN).await?; 105 | let stats = store.remote().fetch(conn, hash).await?; 106 | println!( 107 | "Fetched {} bytes for hash {}", 108 | stats.payload_bytes_read, hash 109 | ); 110 | if let Some(path) = out { 111 | let absolute = std::env::current_dir()?.join(&path); 112 | ensure!(!absolute.is_dir(), "output must not be a directory"); 113 | println!( 114 | "exporting {hash} to {} -> {}", 115 | path.display(), 116 | absolute.display() 117 | ); 118 | let size = store.export(hash, absolute).await?; 119 | println!("Exported {size} bytes"); 120 | } 121 | 122 | endpoint.close().await; 123 | // Shutdown the store. This is not needed for the mem store, but would be 124 | // necessary for a persistent store to allow it to write any pending data to disk. 125 | store.shutdown().await?; 126 | Ok(()) 127 | } 128 | 129 | #[tokio::main] 130 | async fn main() -> anyhow::Result<()> { 131 | setup_logging(); 132 | let cli = Cli::parse(); 133 | 134 | match &cli.command { 135 | Commands::Accept { path } => { 136 | accept(path).await?; 137 | } 138 | Commands::Connect { 139 | endpoint_id, 140 | hash, 141 | out, 142 | } => { 143 | connect(*endpoint_id, *hash, out.clone()).await?; 144 | } 145 | } 146 | Ok(()) 147 | } 148 | -------------------------------------------------------------------------------- /examples/transfer-collection.rs: -------------------------------------------------------------------------------- 1 | //! Example that shows how to create a collection, and transfer it to another 2 | //! node. It also shows patterns for defining a "Node" struct in higher-level 3 | //! code that abstracts over these operations with an API that feels closer to 4 | //! what an application would use. 5 | //! 6 | //! Run the entire example in one command: 7 | //! $ cargo run --example transfer-collection 8 | use std::collections::HashMap; 9 | 10 | use anyhow::{Context, Result}; 11 | use iroh::{ 12 | discovery::static_provider::StaticProvider, protocol::Router, Endpoint, EndpointAddr, RelayMode, 13 | }; 14 | use iroh_blobs::{ 15 | api::{downloader::Shuffled, Store, TempTag}, 16 | format::collection::Collection, 17 | store::mem::MemStore, 18 | BlobsProtocol, Hash, HashAndFormat, 19 | }; 20 | 21 | /// Node is something you'd define in your application. It can contain whatever 22 | /// shared state you'd want to couple with network operations. 23 | struct Node { 24 | store: Store, 25 | /// Router with the blobs protocol registered, to accept blobs requests. 26 | /// We can always get the endpoint with router.endpoint() 27 | router: Router, 28 | } 29 | 30 | impl Node { 31 | async fn new(disc: &StaticProvider) -> Result { 32 | let endpoint = Endpoint::empty_builder(RelayMode::Default) 33 | .discovery(disc.clone()) 34 | .bind() 35 | .await?; 36 | 37 | let store = MemStore::new(); 38 | 39 | // this BlobsProtocol accepts connections from other nodes and serves blobs from the store 40 | // we pass None to skip subscribing to request events 41 | let blobs = BlobsProtocol::new(&store, None); 42 | // Routers group one or more protocols together to accept connections from other nodes, 43 | // here we're only using one, but could add more in a real world use case as needed 44 | let router = Router::builder(endpoint) 45 | .accept(iroh_blobs::ALPN, blobs) 46 | .spawn(); 47 | 48 | Ok(Self { 49 | store: store.into(), 50 | router, 51 | }) 52 | } 53 | 54 | // get address of this node. Has the side effect of waiting for the node 55 | // to be online & ready to accept connections 56 | async fn node_addr(&self) -> Result { 57 | self.router.endpoint().online().await; 58 | let addr = self.router.endpoint().addr(); 59 | Ok(addr) 60 | } 61 | 62 | async fn list_hashes(&self) -> Result> { 63 | self.store 64 | .blobs() 65 | .list() 66 | .hashes() 67 | .await 68 | .context("Failed to list hashes") 69 | } 70 | 71 | /// creates a collection from a given set of named blobs, adds it to the local store 72 | /// and returns the hash of the collection. 73 | async fn create_collection(&self, named_blobs: Vec<(&str, Vec)>) -> Result { 74 | let mut collection_items: HashMap<&str, TempTag> = HashMap::new(); 75 | 76 | let tx = self.store.batch().await?; 77 | for (name, data) in named_blobs { 78 | let tmp_tag = tx.add_bytes(data).await?; 79 | collection_items.insert(name, tmp_tag); 80 | } 81 | 82 | let collection_items = collection_items 83 | .iter() 84 | .map(|(name, tag)| (name.to_string(), tag.hash())) 85 | .collect::>(); 86 | 87 | let collection = Collection::from_iter(collection_items); 88 | 89 | let tt = collection.store(&self.store).await?; 90 | self.store.tags().create(tt.hash_and_format()).await?; 91 | Ok(tt.hash()) 92 | } 93 | 94 | /// retrieve an entire collection from a given hash and provider 95 | async fn get_collection(&self, hash: Hash, provider: EndpointAddr) -> Result<()> { 96 | let req = HashAndFormat::hash_seq(hash); 97 | let addrs = Shuffled::new(vec![provider.id]); 98 | self.store 99 | .downloader(self.router.endpoint()) 100 | .download(req, addrs) 101 | .await?; 102 | Ok(()) 103 | } 104 | } 105 | 106 | #[tokio::main] 107 | async fn main() -> anyhow::Result<()> { 108 | // create a local provider for nodes to discover each other. 109 | // outside of a development environment, production apps would 110 | // use `Endpoint::bind()` or a similar method 111 | let disc = StaticProvider::new(); 112 | 113 | // create a sending node 114 | let send_node = Node::new(&disc).await?; 115 | let send_node_addr = send_node.node_addr().await?; 116 | // add a collection with three files 117 | let hash = send_node 118 | .create_collection(vec![ 119 | ("a.txt", b"this is file a".into()), 120 | ("b.txt", b"this is file b".into()), 121 | ("c.txt", b"this is file c".into()), 122 | ]) 123 | .await?; 124 | 125 | // create the receiving node 126 | let recv_node = Node::new(&disc).await?; 127 | 128 | // add the send node to the discovery provider so the recv node can find it 129 | disc.add_endpoint_info(send_node_addr.clone()); 130 | // fetch the collection and all contents 131 | recv_node.get_collection(hash, send_node_addr).await?; 132 | 133 | // when listing hashes, you'll see 5 hashes in total: 134 | // - one hash for each of the three files 135 | // - hash of the collection's metadata (this is where the "a.txt" filenames live) 136 | // - the hash of the entire collection which is just the above 4 hashes concatenated, then hashed 137 | let send_hashes = send_node.list_hashes().await?; 138 | let recv_hashes = recv_node.list_hashes().await?; 139 | assert_eq!(send_hashes.len(), recv_hashes.len()); 140 | 141 | println!("Transfer complete!"); 142 | Ok(()) 143 | } 144 | -------------------------------------------------------------------------------- /src/store/fs/meta/proto.rs: -------------------------------------------------------------------------------- 1 | //! Protocol for the metadata database. 2 | use std::fmt; 3 | 4 | use bytes::Bytes; 5 | use nested_enum_utils::enum_conversions; 6 | use tracing::Span; 7 | 8 | use super::{ActorResult, ReadOnlyTables}; 9 | use crate::{ 10 | api::proto::{ 11 | BlobStatusMsg, ClearProtectedMsg, DeleteBlobsMsg, ProcessExitRequest, ShutdownMsg, 12 | SyncDbMsg, 13 | }, 14 | store::{fs::entry_state::EntryState, util::DD}, 15 | util::channel::oneshot, 16 | Hash, 17 | }; 18 | 19 | /// Get the entry state for a hash. 20 | /// 21 | /// This will read from the blobs table and enrich the result with the content 22 | /// of the inline data and inline outboard tables if necessary. 23 | pub struct Get { 24 | pub hash: Hash, 25 | pub tx: oneshot::Sender, 26 | pub span: Span, 27 | } 28 | 29 | impl fmt::Debug for Get { 30 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 31 | f.debug_struct("Get") 32 | .field("hash", &DD(self.hash.to_hex())) 33 | .finish_non_exhaustive() 34 | } 35 | } 36 | 37 | #[derive(Debug)] 38 | pub struct GetResult { 39 | pub state: ActorResult>>, 40 | } 41 | 42 | /// Get the entry state for a hash. 43 | /// 44 | /// This will read from the blobs table and enrich the result with the content 45 | /// of the inline data and inline outboard tables if necessary. 46 | #[derive(Debug)] 47 | pub struct Dump { 48 | pub tx: oneshot::Sender>, 49 | pub span: Span, 50 | } 51 | 52 | #[derive(Debug)] 53 | pub struct Snapshot { 54 | pub(crate) tx: tokio::sync::oneshot::Sender, 55 | pub span: Span, 56 | } 57 | 58 | pub struct Update { 59 | pub hash: Hash, 60 | pub state: EntryState, 61 | /// do I need this? Optional? 62 | pub tx: Option>>, 63 | pub span: Span, 64 | } 65 | 66 | impl fmt::Debug for Update { 67 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 68 | f.debug_struct("Update") 69 | .field("hash", &self.hash) 70 | .field("state", &DD(self.state.fmt_short())) 71 | .field("tx", &self.tx.is_some()) 72 | .finish() 73 | } 74 | } 75 | 76 | pub struct Set { 77 | pub hash: Hash, 78 | pub state: EntryState, 79 | pub tx: oneshot::Sender>, 80 | pub span: Span, 81 | } 82 | 83 | impl fmt::Debug for Set { 84 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 85 | f.debug_struct("Set") 86 | .field("hash", &self.hash) 87 | .field("state", &DD(self.state.fmt_short())) 88 | .finish_non_exhaustive() 89 | } 90 | } 91 | 92 | /// Modification method: create a new unique tag and set it to a value. 93 | pub use crate::api::proto::CreateTagMsg; 94 | /// Modification method: remove a range of tags. 95 | pub use crate::api::proto::DeleteTagsMsg; 96 | /// Read method: list a range of tags. 97 | pub use crate::api::proto::ListTagsMsg; 98 | /// Modification method: rename a tag. 99 | pub use crate::api::proto::RenameTagMsg; 100 | /// Modification method: set a tag to a value, or remove it. 101 | pub use crate::api::proto::SetTagMsg; 102 | 103 | #[derive(Debug)] 104 | #[enum_conversions(Command)] 105 | pub enum ReadOnlyCommand { 106 | Get(Get), 107 | Dump(Dump), 108 | ListTags(ListTagsMsg), 109 | ClearProtected(ClearProtectedMsg), 110 | GetBlobStatus(BlobStatusMsg), 111 | } 112 | 113 | impl ReadOnlyCommand { 114 | pub fn parent_span(&self) -> tracing::Span { 115 | self.parent_span_opt() 116 | .cloned() 117 | .unwrap_or_else(tracing::Span::current) 118 | } 119 | 120 | pub fn parent_span_opt(&self) -> Option<&tracing::Span> { 121 | match self { 122 | Self::Get(x) => Some(&x.span), 123 | Self::Dump(x) => Some(&x.span), 124 | Self::ListTags(x) => x.parent_span_opt(), 125 | Self::ClearProtected(x) => x.parent_span_opt(), 126 | Self::GetBlobStatus(x) => x.parent_span_opt(), 127 | } 128 | } 129 | } 130 | 131 | #[derive(Debug)] 132 | #[enum_conversions(Command)] 133 | pub enum ReadWriteCommand { 134 | Update(Update), 135 | Set(Set), 136 | DeleteBlobw(DeleteBlobsMsg), 137 | SetTag(SetTagMsg), 138 | DeleteTags(DeleteTagsMsg), 139 | RenameTag(RenameTagMsg), 140 | CreateTag(CreateTagMsg), 141 | ProcessExit(ProcessExitRequest), 142 | } 143 | 144 | impl ReadWriteCommand { 145 | pub fn parent_span(&self) -> tracing::Span { 146 | self.parent_span_opt() 147 | .cloned() 148 | .unwrap_or_else(tracing::Span::current) 149 | } 150 | 151 | pub fn parent_span_opt(&self) -> Option<&tracing::Span> { 152 | match self { 153 | Self::Update(x) => Some(&x.span), 154 | Self::Set(x) => Some(&x.span), 155 | Self::DeleteBlobw(x) => Some(&x.span), 156 | Self::SetTag(x) => x.parent_span_opt(), 157 | Self::DeleteTags(x) => x.parent_span_opt(), 158 | Self::RenameTag(x) => x.parent_span_opt(), 159 | Self::CreateTag(x) => x.parent_span_opt(), 160 | Self::ProcessExit(_) => None, 161 | } 162 | } 163 | } 164 | 165 | #[derive(Debug)] 166 | #[enum_conversions(Command)] 167 | pub enum TopLevelCommand { 168 | SyncDb(SyncDbMsg), 169 | Shutdown(ShutdownMsg), 170 | Snapshot(Snapshot), 171 | } 172 | 173 | impl TopLevelCommand { 174 | pub fn parent_span(&self) -> tracing::Span { 175 | self.parent_span_opt() 176 | .cloned() 177 | .unwrap_or_else(tracing::Span::current) 178 | } 179 | 180 | pub fn parent_span_opt(&self) -> Option<&tracing::Span> { 181 | match self { 182 | Self::SyncDb(x) => x.parent_span_opt(), 183 | Self::Shutdown(x) => x.parent_span_opt(), 184 | Self::Snapshot(x) => Some(&x.span), 185 | } 186 | } 187 | } 188 | 189 | #[enum_conversions()] 190 | pub enum Command { 191 | ReadOnly(ReadOnlyCommand), 192 | ReadWrite(ReadWriteCommand), 193 | TopLevel(TopLevelCommand), 194 | } 195 | 196 | impl Command { 197 | pub fn non_top_level(self) -> std::result::Result { 198 | match self { 199 | Self::ReadOnly(cmd) => Ok(NonTopLevelCommand::ReadOnly(cmd)), 200 | Self::ReadWrite(cmd) => Ok(NonTopLevelCommand::ReadWrite(cmd)), 201 | _ => Err(self), 202 | } 203 | } 204 | 205 | pub fn read_only(self) -> std::result::Result { 206 | match self { 207 | Self::ReadOnly(cmd) => Ok(cmd), 208 | _ => Err(self), 209 | } 210 | } 211 | } 212 | 213 | #[derive(Debug)] 214 | #[enum_conversions()] 215 | pub enum NonTopLevelCommand { 216 | ReadOnly(ReadOnlyCommand), 217 | ReadWrite(ReadWriteCommand), 218 | } 219 | 220 | impl fmt::Debug for Command { 221 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 222 | match self { 223 | Self::ReadOnly(cmd) => cmd.fmt(f), 224 | Self::ReadWrite(cmd) => cmd.fmt(f), 225 | Self::TopLevel(cmd) => cmd.fmt(f), 226 | } 227 | } 228 | } 229 | -------------------------------------------------------------------------------- /examples/expiring-tags.rs: -------------------------------------------------------------------------------- 1 | //! This example shows how to create tags that expire after a certain time. 2 | //! 3 | //! We use a prefix so we can distinguish between expiring and normal tags, and 4 | //! then encode the expiry date in the tag name after the prefix, in a format 5 | //! that sorts in the same order as the expiry date. 6 | //! 7 | //! The example creates a number of blobs and protects them directly or indirectly 8 | //! with expiring tags. Watch as the expired tags are deleted and the blobs 9 | //! are removed from the store. 10 | use std::{ 11 | ops::Deref, 12 | time::{Duration, SystemTime}, 13 | }; 14 | 15 | use chrono::Utc; 16 | use futures_lite::StreamExt; 17 | use iroh_blobs::{ 18 | api::{blobs::AddBytesOptions, Store, Tag}, 19 | hashseq::HashSeq, 20 | store::{ 21 | fs::options::{BatchOptions, InlineOptions, Options, PathOptions}, 22 | GcConfig, 23 | }, 24 | BlobFormat, Hash, 25 | }; 26 | use tokio::signal::ctrl_c; 27 | 28 | /// Using an iroh rpc client, create a tag that is marked to expire at `expiry` for all the given hashes. 29 | /// 30 | /// The tag name will be `prefix`- followed by the expiry date in iso8601 format (e.g. `expiry-2025-01-01T12:00:00Z`). 31 | async fn create_expiring_tag( 32 | store: &Store, 33 | hashes: &[Hash], 34 | prefix: &str, 35 | expiry: SystemTime, 36 | ) -> anyhow::Result<()> { 37 | let expiry = chrono::DateTime::::from(expiry); 38 | let expiry = expiry.to_rfc3339_opts(chrono::SecondsFormat::Secs, true); 39 | let tagname = format!("{prefix}-{expiry}"); 40 | if hashes.is_empty() { 41 | return Ok(()); 42 | } else if hashes.len() == 1 { 43 | let hash = hashes[0]; 44 | store.tags().set(&tagname, hash).await?; 45 | } else { 46 | let hs = hashes.iter().copied().collect::(); 47 | store 48 | .add_bytes_with_opts(AddBytesOptions { 49 | data: hs.into(), 50 | format: BlobFormat::HashSeq, 51 | }) 52 | .with_named_tag(&tagname) 53 | .await?; 54 | }; 55 | println!("Created tag {tagname}"); 56 | Ok(()) 57 | } 58 | 59 | async fn delete_expired_tags(blobs: &Store, prefix: &str, bulk: bool) -> anyhow::Result<()> { 60 | let prefix = format!("{prefix}-"); 61 | let now = chrono::Utc::now(); 62 | let end = format!( 63 | "{}-{}", 64 | prefix, 65 | now.to_rfc3339_opts(chrono::SecondsFormat::Secs, true) 66 | ); 67 | if bulk { 68 | // delete all tags with the prefix and an expiry date before now 69 | // 70 | // this should be very efficient, since it is just a single database operation 71 | blobs 72 | .tags() 73 | .delete_range(Tag::from(prefix.clone())..Tag::from(end)) 74 | .await?; 75 | } else { 76 | // find tags to delete one by one and then delete them 77 | // 78 | // this allows us to print the tags before deleting them 79 | let mut tags = blobs.tags().list().await?; 80 | let mut to_delete = Vec::new(); 81 | while let Some(tag) = tags.next().await { 82 | let tag = tag?.name; 83 | if let Some(rest) = tag.0.strip_prefix(prefix.as_bytes()) { 84 | let Ok(expiry) = std::str::from_utf8(rest) else { 85 | tracing::warn!("Tag {} does have non utf8 expiry", tag); 86 | continue; 87 | }; 88 | let Ok(expiry) = chrono::DateTime::parse_from_rfc3339(expiry) else { 89 | tracing::warn!("Tag {} does have invalid expiry date", tag); 90 | continue; 91 | }; 92 | let expiry = expiry.with_timezone(&Utc); 93 | if expiry < now { 94 | to_delete.push(tag); 95 | } 96 | } 97 | } 98 | for tag in to_delete { 99 | println!("Deleting expired tag {tag}\n"); 100 | blobs.tags().delete(tag).await?; 101 | } 102 | } 103 | Ok(()) 104 | } 105 | 106 | async fn print_store_info(store: &Store) -> anyhow::Result<()> { 107 | let now = chrono::Utc::now(); 108 | let mut tags = store.tags().list().await?; 109 | println!( 110 | "Current time: {}", 111 | now.to_rfc3339_opts(chrono::SecondsFormat::Secs, true) 112 | ); 113 | println!("Tags:"); 114 | while let Some(tag) = tags.next().await { 115 | let tag = tag?; 116 | println!(" {tag:?}"); 117 | } 118 | let mut blobs = store.list().stream().await?; 119 | println!("Blobs:"); 120 | while let Some(item) = blobs.next().await { 121 | println!(" {}", item?); 122 | } 123 | println!(); 124 | Ok(()) 125 | } 126 | 127 | async fn info_task(store: Store) -> anyhow::Result<()> { 128 | n0_future::time::sleep(Duration::from_secs(1)).await; 129 | loop { 130 | print_store_info(&store).await?; 131 | n0_future::time::sleep(Duration::from_secs(5)).await; 132 | } 133 | } 134 | 135 | async fn delete_expired_tags_task(store: Store, prefix: &str) -> anyhow::Result<()> { 136 | loop { 137 | delete_expired_tags(&store, prefix, false).await?; 138 | n0_future::time::sleep(Duration::from_secs(5)).await; 139 | } 140 | } 141 | 142 | #[tokio::main] 143 | async fn main() -> anyhow::Result<()> { 144 | tracing_subscriber::fmt::init(); 145 | let path = std::env::current_dir()?.join("blobs"); 146 | let options = Options { 147 | path: PathOptions::new(&path), 148 | gc: Some(GcConfig { 149 | add_protected: None, 150 | interval: Duration::from_secs(10), 151 | }), 152 | inline: InlineOptions::default(), 153 | batch: BatchOptions::default(), 154 | }; 155 | let store = 156 | iroh_blobs::store::fs::FsStore::load_with_opts(path.join("blobs.db"), options).await?; 157 | 158 | // setup: add some data and tag it 159 | { 160 | // add several blobs and tag them with an expiry date 10 seconds in the future 161 | let batch = store.batch().await?; 162 | let a = batch.add_bytes("blob 1".as_bytes()).await?; 163 | let b = batch.add_bytes("blob 2".as_bytes()).await?; 164 | 165 | let expires_at = SystemTime::now() 166 | .checked_add(Duration::from_secs(10)) 167 | .unwrap(); 168 | create_expiring_tag(&store, &[a.hash(), b.hash()], "expiring", expires_at).await?; 169 | 170 | // add a single blob and tag it with an expiry date 60 seconds in the future 171 | let c = batch.add_bytes("blob 3".as_bytes()).await?; 172 | let expires_at = SystemTime::now() 173 | .checked_add(Duration::from_secs(60)) 174 | .unwrap(); 175 | create_expiring_tag(&store, &[c.hash()], "expiring", expires_at).await?; 176 | // batch goes out of scope, so data is only protected by the tags we created 177 | } 178 | 179 | // delete expired tags every 5 seconds 180 | let delete_task = tokio::spawn(delete_expired_tags_task(store.deref().clone(), "expiring")); 181 | // print all tags and blobs every 5 seconds 182 | let info_task = tokio::spawn(info_task(store.deref().clone())); 183 | 184 | ctrl_c().await?; 185 | delete_task.abort(); 186 | info_task.abort(); 187 | store.shutdown().await?; 188 | Ok(()) 189 | } 190 | -------------------------------------------------------------------------------- /src/api/tags.rs: -------------------------------------------------------------------------------- 1 | //! Tags API 2 | //! 3 | //! The main entry point is the [`Tags`] struct. 4 | use std::ops::RangeBounds; 5 | 6 | use n0_future::{Stream, StreamExt}; 7 | use ref_cast::RefCast; 8 | use tracing::trace; 9 | 10 | pub use super::proto::{ 11 | CreateTagRequest as CreateOptions, DeleteTagsRequest as DeleteOptions, 12 | ListTagsRequest as ListOptions, RenameTagRequest as RenameOptions, SetTagRequest as SetOptions, 13 | TagInfo, 14 | }; 15 | use super::{ 16 | proto::{CreateTempTagRequest, Scope}, 17 | ApiClient, Tag, TempTag, 18 | }; 19 | use crate::{api::proto::ListTempTagsRequest, HashAndFormat}; 20 | 21 | /// The API for interacting with tags and temp tags. 22 | #[derive(Debug, Clone, ref_cast::RefCast)] 23 | #[repr(transparent)] 24 | pub struct Tags { 25 | client: ApiClient, 26 | } 27 | 28 | impl Tags { 29 | pub(crate) fn ref_from_sender(sender: &ApiClient) -> &Self { 30 | Self::ref_cast(sender) 31 | } 32 | 33 | pub async fn list_temp_tags(&self) -> irpc::Result> { 34 | let options = ListTempTagsRequest; 35 | trace!("{:?}", options); 36 | let res = self.client.rpc(options).await?; 37 | Ok(n0_future::stream::iter(res)) 38 | } 39 | 40 | /// List all tags with options. 41 | /// 42 | /// This is the most flexible way to list tags. All the other list methods are just convenience 43 | /// methods that call this one with the appropriate options. 44 | pub async fn list_with_opts( 45 | &self, 46 | options: ListOptions, 47 | ) -> irpc::Result>> { 48 | trace!("{:?}", options); 49 | let res = self.client.rpc(options).await?; 50 | Ok(n0_future::stream::iter(res)) 51 | } 52 | 53 | /// Get the value of a single tag 54 | pub async fn get(&self, name: impl AsRef<[u8]>) -> super::RequestResult> { 55 | let mut stream = self 56 | .list_with_opts(ListOptions::single(name.as_ref())) 57 | .await?; 58 | Ok(stream.next().await.transpose()?) 59 | } 60 | 61 | pub async fn set_with_opts(&self, options: SetOptions) -> super::RequestResult<()> { 62 | trace!("{:?}", options); 63 | self.client.rpc(options).await??; 64 | Ok(()) 65 | } 66 | 67 | pub async fn set( 68 | &self, 69 | name: impl AsRef<[u8]>, 70 | value: impl Into, 71 | ) -> super::RequestResult<()> { 72 | self.set_with_opts(SetOptions { 73 | name: Tag::from(name.as_ref()), 74 | value: value.into(), 75 | }) 76 | .await 77 | } 78 | 79 | /// List a range of tags 80 | pub async fn list_range( 81 | &self, 82 | range: R, 83 | ) -> irpc::Result>> 84 | where 85 | R: RangeBounds, 86 | E: AsRef<[u8]>, 87 | { 88 | self.list_with_opts(ListOptions::range(range)).await 89 | } 90 | 91 | /// Lists all tags with the given prefix. 92 | pub async fn list_prefix( 93 | &self, 94 | prefix: impl AsRef<[u8]>, 95 | ) -> irpc::Result>> { 96 | self.list_with_opts(ListOptions::prefix(prefix.as_ref())) 97 | .await 98 | } 99 | 100 | /// Lists all tags. 101 | pub async fn list(&self) -> irpc::Result>> { 102 | self.list_with_opts(ListOptions::all()).await 103 | } 104 | 105 | /// Lists all tags with a hash_seq format. 106 | pub async fn list_hash_seq(&self) -> irpc::Result>> { 107 | self.list_with_opts(ListOptions::hash_seq()).await 108 | } 109 | 110 | /// Deletes a tag, with full control over options. All other delete methods 111 | /// wrap this. 112 | /// 113 | /// Returns the number of tags actually removed. Attempting to delete a non-existent tag will *not* fail. 114 | pub async fn delete_with_opts(&self, options: DeleteOptions) -> super::RequestResult { 115 | trace!("{:?}", options); 116 | let deleted = self.client.rpc(options).await??; 117 | Ok(deleted) 118 | } 119 | 120 | /// Deletes a tag. 121 | /// 122 | /// Returns the number of tags actually removed. Attempting to delete a non-existent tag will *not* fail. 123 | pub async fn delete(&self, name: impl AsRef<[u8]>) -> super::RequestResult { 124 | self.delete_with_opts(DeleteOptions::single(name.as_ref())) 125 | .await 126 | } 127 | 128 | /// Deletes a range of tags. 129 | /// 130 | /// Returns the number of tags actually removed. Attempting to delete a non-existent tag will *not* fail. 131 | pub async fn delete_range(&self, range: R) -> super::RequestResult 132 | where 133 | R: RangeBounds, 134 | E: AsRef<[u8]>, 135 | { 136 | self.delete_with_opts(DeleteOptions::range(range)).await 137 | } 138 | 139 | /// Delete all tags with the given prefix. 140 | /// 141 | /// Returns the number of tags actually removed. Attempting to delete a non-existent tag will *not* fail. 142 | pub async fn delete_prefix(&self, prefix: impl AsRef<[u8]>) -> super::RequestResult { 143 | self.delete_with_opts(DeleteOptions::prefix(prefix.as_ref())) 144 | .await 145 | } 146 | 147 | /// Delete all tags. Use with care. After this, all data will be garbage collected. 148 | /// 149 | /// Returns the number of tags actually removed. Attempting to delete a non-existent tag will *not* fail. 150 | pub async fn delete_all(&self) -> super::RequestResult { 151 | self.delete_with_opts(DeleteOptions { 152 | from: None, 153 | to: None, 154 | }) 155 | .await 156 | } 157 | 158 | /// Rename a tag atomically 159 | /// 160 | /// If the tag does not exist, this will return an error. 161 | pub async fn rename_with_opts(&self, options: RenameOptions) -> super::RequestResult<()> { 162 | trace!("{:?}", options); 163 | self.client.rpc(options).await??; 164 | Ok(()) 165 | } 166 | 167 | /// Rename a tag atomically 168 | /// 169 | /// If the tag does not exist, this will return an error. 170 | pub async fn rename( 171 | &self, 172 | from: impl AsRef<[u8]>, 173 | to: impl AsRef<[u8]>, 174 | ) -> super::RequestResult<()> { 175 | self.rename_with_opts(RenameOptions { 176 | from: Tag::from(from.as_ref()), 177 | to: Tag::from(to.as_ref()), 178 | }) 179 | .await 180 | } 181 | 182 | pub async fn create_with_opts(&self, options: CreateOptions) -> super::RequestResult { 183 | trace!("{:?}", options); 184 | let rx = self.client.rpc(options); 185 | Ok(rx.await??) 186 | } 187 | 188 | pub async fn create(&self, value: impl Into) -> super::RequestResult { 189 | self.create_with_opts(CreateOptions { 190 | value: value.into(), 191 | }) 192 | .await 193 | } 194 | 195 | pub async fn temp_tag(&self, value: impl Into) -> irpc::Result { 196 | let value = value.into(); 197 | let msg = CreateTempTagRequest { 198 | scope: Scope::GLOBAL, 199 | value, 200 | }; 201 | self.client.rpc(msg).await 202 | } 203 | } 204 | -------------------------------------------------------------------------------- /examples/compression.rs: -------------------------------------------------------------------------------- 1 | /// Example how to use compression with iroh-blobs 2 | /// 3 | /// We create a derived protocol that compresses both requests and responses using lz4 4 | /// or any other compression algorithm supported by async-compression. 5 | mod common; 6 | use std::{fmt::Debug, path::PathBuf}; 7 | 8 | use anyhow::Result; 9 | use clap::Parser; 10 | use common::setup_logging; 11 | use iroh::protocol::ProtocolHandler; 12 | use iroh_blobs::{ 13 | api::Store, 14 | get::StreamPair, 15 | provider::{ 16 | self, 17 | events::{ClientConnected, EventSender, HasErrorCode}, 18 | handle_stream, 19 | }, 20 | store::mem::MemStore, 21 | ticket::BlobTicket, 22 | }; 23 | use tracing::debug; 24 | 25 | use crate::common::get_or_generate_secret_key; 26 | 27 | #[derive(Debug, Parser)] 28 | #[command(version, about)] 29 | pub enum Args { 30 | /// Limit requests by endpoint id 31 | Provide { 32 | /// Path for files to add. 33 | path: PathBuf, 34 | }, 35 | /// Get a blob. Just for completeness sake. 36 | Get { 37 | /// Ticket for the blob to download 38 | ticket: BlobTicket, 39 | /// Path to save the blob to 40 | #[clap(long)] 41 | target: Option, 42 | }, 43 | } 44 | 45 | trait Compression: Clone + Send + Sync + Debug + 'static { 46 | const ALPN: &'static [u8]; 47 | fn recv_stream( 48 | &self, 49 | stream: iroh::endpoint::RecvStream, 50 | ) -> impl iroh_blobs::util::RecvStream + Sync + 'static; 51 | fn send_stream( 52 | &self, 53 | stream: iroh::endpoint::SendStream, 54 | ) -> impl iroh_blobs::util::SendStream + Sync + 'static; 55 | } 56 | 57 | mod lz4 { 58 | use std::io; 59 | 60 | use async_compression::tokio::{bufread::Lz4Decoder, write::Lz4Encoder}; 61 | use iroh::endpoint::VarInt; 62 | use iroh_blobs::util::{ 63 | AsyncReadRecvStream, AsyncReadRecvStreamExtra, AsyncWriteSendStream, 64 | AsyncWriteSendStreamExtra, 65 | }; 66 | use tokio::io::{AsyncRead, AsyncWrite, BufReader}; 67 | 68 | struct SendStream(Lz4Encoder); 69 | 70 | impl SendStream { 71 | pub fn new(inner: iroh::endpoint::SendStream) -> AsyncWriteSendStream { 72 | AsyncWriteSendStream::new(Self(Lz4Encoder::new(inner))) 73 | } 74 | } 75 | 76 | impl AsyncWriteSendStreamExtra for SendStream { 77 | fn inner(&mut self) -> &mut (impl AsyncWrite + Unpin + Send) { 78 | &mut self.0 79 | } 80 | 81 | fn reset(&mut self, code: VarInt) -> io::Result<()> { 82 | Ok(self.0.get_mut().reset(code)?) 83 | } 84 | 85 | async fn stopped(&mut self) -> io::Result> { 86 | Ok(self.0.get_mut().stopped().await?) 87 | } 88 | 89 | fn id(&self) -> u64 { 90 | self.0.get_ref().id().index() 91 | } 92 | } 93 | 94 | struct RecvStream(Lz4Decoder>); 95 | 96 | impl RecvStream { 97 | pub fn new(inner: iroh::endpoint::RecvStream) -> AsyncReadRecvStream { 98 | AsyncReadRecvStream::new(Self(Lz4Decoder::new(BufReader::new(inner)))) 99 | } 100 | } 101 | 102 | impl AsyncReadRecvStreamExtra for RecvStream { 103 | fn inner(&mut self) -> &mut (impl AsyncRead + Unpin + Send) { 104 | &mut self.0 105 | } 106 | 107 | fn stop(&mut self, code: VarInt) -> io::Result<()> { 108 | Ok(self.0.get_mut().get_mut().stop(code)?) 109 | } 110 | 111 | fn id(&self) -> u64 { 112 | self.0.get_ref().get_ref().id().index() 113 | } 114 | } 115 | 116 | #[derive(Debug, Clone)] 117 | pub struct Compression; 118 | 119 | impl super::Compression for Compression { 120 | const ALPN: &[u8] = concat_const::concat_bytes!(b"lz4/", iroh_blobs::ALPN); 121 | fn recv_stream( 122 | &self, 123 | stream: iroh::endpoint::RecvStream, 124 | ) -> impl iroh_blobs::util::RecvStream + Sync + 'static { 125 | RecvStream::new(stream) 126 | } 127 | fn send_stream( 128 | &self, 129 | stream: iroh::endpoint::SendStream, 130 | ) -> impl iroh_blobs::util::SendStream + Sync + 'static { 131 | SendStream::new(stream) 132 | } 133 | } 134 | } 135 | 136 | #[derive(Debug, Clone)] 137 | struct CompressedBlobsProtocol { 138 | store: Store, 139 | events: EventSender, 140 | compression: C, 141 | } 142 | 143 | impl CompressedBlobsProtocol { 144 | fn new(store: &Store, events: EventSender, compression: C) -> Self { 145 | Self { 146 | store: store.clone(), 147 | events, 148 | compression, 149 | } 150 | } 151 | } 152 | 153 | impl ProtocolHandler for CompressedBlobsProtocol { 154 | async fn accept( 155 | &self, 156 | connection: iroh::endpoint::Connection, 157 | ) -> std::result::Result<(), iroh::protocol::AcceptError> { 158 | let connection_id = connection.stable_id() as u64; 159 | if let Err(cause) = self 160 | .events 161 | .client_connected(|| ClientConnected { 162 | connection_id, 163 | endpoint_id: Some(connection.remote_id()), 164 | }) 165 | .await 166 | { 167 | connection.close(cause.code(), cause.reason()); 168 | debug!("closing connection: {cause}"); 169 | return Ok(()); 170 | } 171 | while let Ok((send, recv)) = connection.accept_bi().await { 172 | let send = self.compression.send_stream(send); 173 | let recv = self.compression.recv_stream(recv); 174 | let store = self.store.clone(); 175 | let pair = provider::StreamPair::new(connection_id, recv, send, self.events.clone()); 176 | tokio::spawn(handle_stream(pair, store)); 177 | } 178 | Ok(()) 179 | } 180 | } 181 | 182 | #[tokio::main] 183 | async fn main() -> Result<()> { 184 | setup_logging(); 185 | let args = Args::parse(); 186 | let secret = get_or_generate_secret_key()?; 187 | let endpoint = iroh::Endpoint::builder().secret_key(secret).bind().await?; 188 | let compression = lz4::Compression; 189 | match args { 190 | Args::Provide { path } => { 191 | let store = MemStore::new(); 192 | let tag = store.add_path(path).await?; 193 | let blobs = CompressedBlobsProtocol::new(&store, EventSender::DEFAULT, compression); 194 | let router = iroh::protocol::Router::builder(endpoint.clone()) 195 | .accept(lz4::Compression::ALPN, blobs) 196 | .spawn(); 197 | let ticket = BlobTicket::new(endpoint.id().into(), tag.hash, tag.format); 198 | println!("Serving blob with hash {}", tag.hash); 199 | println!("Ticket: {ticket}"); 200 | println!("Node is running. Press Ctrl-C to exit."); 201 | tokio::signal::ctrl_c().await?; 202 | println!("Shutting down."); 203 | router.shutdown().await?; 204 | } 205 | Args::Get { ticket, target } => { 206 | let store = MemStore::new(); 207 | let conn = endpoint 208 | .connect(ticket.addr().clone(), lz4::Compression::ALPN) 209 | .await?; 210 | let connection_id = conn.stable_id() as u64; 211 | let (send, recv) = conn.open_bi().await?; 212 | let send = compression.send_stream(send); 213 | let recv = compression.recv_stream(recv); 214 | let sp = StreamPair::new(connection_id, recv, send); 215 | let _stats = store.remote().fetch(sp, ticket.hash_and_format()).await?; 216 | if let Some(target) = target { 217 | let size = store.export(ticket.hash(), &target).await?; 218 | println!("Wrote {} bytes to {}", size, target.display()); 219 | } else { 220 | println!("Hash: {}", ticket.hash()); 221 | } 222 | } 223 | } 224 | Ok(()) 225 | } 226 | -------------------------------------------------------------------------------- /src/ticket.rs: -------------------------------------------------------------------------------- 1 | //! Tickets for blobs. 2 | use std::{collections::BTreeSet, net::SocketAddr, str::FromStr}; 3 | 4 | use iroh::{EndpointAddr, EndpointId, RelayUrl}; 5 | use iroh_tickets::{ParseError, Ticket}; 6 | use n0_error::Result; 7 | use serde::{Deserialize, Serialize}; 8 | 9 | use crate::{BlobFormat, Hash, HashAndFormat}; 10 | 11 | /// A token containing everything to get a file from the provider. 12 | /// 13 | /// It is a single item which can be easily serialized and deserialized. 14 | #[derive(Debug, Clone, PartialEq, Eq, derive_more::Display)] 15 | #[display("{}", Ticket::serialize(self))] 16 | pub struct BlobTicket { 17 | /// The provider to get a file from. 18 | addr: EndpointAddr, 19 | /// The format of the blob. 20 | format: BlobFormat, 21 | /// The hash to retrieve. 22 | hash: Hash, 23 | } 24 | 25 | impl From for HashAndFormat { 26 | fn from(val: BlobTicket) -> Self { 27 | HashAndFormat { 28 | hash: val.hash, 29 | format: val.format, 30 | } 31 | } 32 | } 33 | 34 | /// Wire format for [`BlobTicket`]. 35 | /// 36 | /// In the future we might have multiple variants (not versions, since they 37 | /// might be both equally valid), so this is a single variant enum to force 38 | /// postcard to add a discriminator. 39 | #[derive(Serialize, Deserialize)] 40 | enum TicketWireFormat { 41 | Variant0(Variant0BlobTicket), 42 | } 43 | 44 | // Legacy 45 | #[derive(Serialize, Deserialize)] 46 | struct Variant0BlobTicket { 47 | node: Variant0NodeAddr, 48 | format: BlobFormat, 49 | hash: Hash, 50 | } 51 | 52 | #[derive(Serialize, Deserialize)] 53 | struct Variant0NodeAddr { 54 | endpoint_id: EndpointId, 55 | info: Variant0AddrInfo, 56 | } 57 | 58 | #[derive(Serialize, Deserialize)] 59 | struct Variant0AddrInfo { 60 | relay_url: Option, 61 | direct_addresses: BTreeSet, 62 | } 63 | 64 | impl Ticket for BlobTicket { 65 | const KIND: &'static str = "blob"; 66 | 67 | fn to_bytes(&self) -> Vec { 68 | let data = TicketWireFormat::Variant0(Variant0BlobTicket { 69 | node: Variant0NodeAddr { 70 | endpoint_id: self.addr.id, 71 | info: Variant0AddrInfo { 72 | relay_url: self.addr.relay_urls().next().cloned(), 73 | direct_addresses: self.addr.ip_addrs().cloned().collect(), 74 | }, 75 | }, 76 | format: self.format, 77 | hash: self.hash, 78 | }); 79 | postcard::to_stdvec(&data).expect("postcard serialization failed") 80 | } 81 | 82 | fn from_bytes(bytes: &[u8]) -> std::result::Result { 83 | let res: TicketWireFormat = postcard::from_bytes(bytes)?; 84 | let TicketWireFormat::Variant0(Variant0BlobTicket { node, format, hash }) = res; 85 | let mut addr = EndpointAddr::new(node.endpoint_id); 86 | if let Some(relay_url) = node.info.relay_url { 87 | addr = addr.with_relay_url(relay_url); 88 | } 89 | for ip_addr in node.info.direct_addresses { 90 | addr = addr.with_ip_addr(ip_addr); 91 | } 92 | Ok(Self { addr, format, hash }) 93 | } 94 | } 95 | 96 | impl FromStr for BlobTicket { 97 | type Err = ParseError; 98 | 99 | fn from_str(s: &str) -> Result { 100 | Ticket::deserialize(s) 101 | } 102 | } 103 | 104 | impl BlobTicket { 105 | /// Creates a new ticket. 106 | pub fn new(addr: EndpointAddr, hash: Hash, format: BlobFormat) -> Self { 107 | Self { hash, format, addr } 108 | } 109 | 110 | /// The hash of the item this ticket can retrieve. 111 | pub fn hash(&self) -> Hash { 112 | self.hash 113 | } 114 | 115 | /// The [`EndpointAddr`] of the provider for this ticket. 116 | pub fn addr(&self) -> &EndpointAddr { 117 | &self.addr 118 | } 119 | 120 | /// The [`BlobFormat`] for this ticket. 121 | pub fn format(&self) -> BlobFormat { 122 | self.format 123 | } 124 | 125 | pub fn hash_and_format(&self) -> HashAndFormat { 126 | HashAndFormat { 127 | hash: self.hash, 128 | format: self.format, 129 | } 130 | } 131 | 132 | /// True if the ticket is for a collection and should retrieve all blobs in it. 133 | pub fn recursive(&self) -> bool { 134 | self.format.is_hash_seq() 135 | } 136 | 137 | /// Get the contents of the ticket, consuming it. 138 | pub fn into_parts(self) -> (EndpointAddr, Hash, BlobFormat) { 139 | let BlobTicket { addr, hash, format } = self; 140 | (addr, hash, format) 141 | } 142 | } 143 | 144 | impl Serialize for BlobTicket { 145 | fn serialize(&self, serializer: S) -> Result { 146 | if serializer.is_human_readable() { 147 | serializer.serialize_str(&self.to_string()) 148 | } else { 149 | let BlobTicket { 150 | addr: node, 151 | format, 152 | hash, 153 | } = self; 154 | (node, format, hash).serialize(serializer) 155 | } 156 | } 157 | } 158 | 159 | impl<'de> Deserialize<'de> for BlobTicket { 160 | fn deserialize>(deserializer: D) -> Result { 161 | if deserializer.is_human_readable() { 162 | let s = String::deserialize(deserializer)?; 163 | Self::from_str(&s).map_err(serde::de::Error::custom) 164 | } else { 165 | let (peer, format, hash) = Deserialize::deserialize(deserializer)?; 166 | Ok(Self::new(peer, hash, format)) 167 | } 168 | } 169 | } 170 | 171 | #[cfg(test)] 172 | mod tests { 173 | use std::net::SocketAddr; 174 | 175 | use iroh::{PublicKey, SecretKey, TransportAddr}; 176 | use iroh_test::{assert_eq_hex, hexdump::parse_hexdump}; 177 | 178 | use super::*; 179 | 180 | fn make_ticket() -> BlobTicket { 181 | let hash = Hash::new(b"hi there"); 182 | let peer = SecretKey::generate(&mut rand::rng()).public(); 183 | let addr = SocketAddr::from_str("127.0.0.1:1234").unwrap(); 184 | BlobTicket { 185 | hash, 186 | addr: EndpointAddr::from_parts(peer, [TransportAddr::Ip(addr)]), 187 | format: BlobFormat::HashSeq, 188 | } 189 | } 190 | 191 | #[test] 192 | fn test_ticket_postcard() { 193 | let ticket = make_ticket(); 194 | let bytes = postcard::to_stdvec(&ticket).unwrap(); 195 | let ticket2: BlobTicket = postcard::from_bytes(&bytes).unwrap(); 196 | assert_eq!(ticket2, ticket); 197 | } 198 | 199 | #[test] 200 | fn test_ticket_json() { 201 | let ticket = make_ticket(); 202 | let json = serde_json::to_string(&ticket).unwrap(); 203 | let ticket2: BlobTicket = serde_json::from_str(&json).unwrap(); 204 | assert_eq!(ticket2, ticket); 205 | } 206 | 207 | #[test] 208 | fn test_ticket_base32() { 209 | let hash = 210 | Hash::from_str("0b84d358e4c8be6c38626b2182ff575818ba6bd3f4b90464994be14cb354a072") 211 | .unwrap(); 212 | let endpoint_id = 213 | PublicKey::from_str("ae58ff8833241ac82d6ff7611046ed67b5072d142c588d0063e942d9a75502b6") 214 | .unwrap(); 215 | 216 | let ticket = BlobTicket { 217 | addr: EndpointAddr::new(endpoint_id), 218 | format: BlobFormat::Raw, 219 | hash, 220 | }; 221 | let encoded = ticket.to_string(); 222 | let stripped = encoded.strip_prefix("blob").unwrap(); 223 | let base32 = data_encoding::BASE32_NOPAD 224 | .decode(stripped.to_ascii_uppercase().as_bytes()) 225 | .unwrap(); 226 | let expected = parse_hexdump(" 227 | 00 # discriminator for variant 0 228 | ae58ff8833241ac82d6ff7611046ed67b5072d142c588d0063e942d9a75502b6 # endpoint id, 32 bytes, see above 229 | 00 # relay url 230 | 00 # number of addresses (0) 231 | 00 # format (raw) 232 | 0b84d358e4c8be6c38626b2182ff575818ba6bd3f4b90464994be14cb354a072 # hash, 32 bytes, see above 233 | ").unwrap(); 234 | assert_eq_hex!(base32, expected); 235 | } 236 | } 237 | -------------------------------------------------------------------------------- /.github/workflows/tests.yaml: -------------------------------------------------------------------------------- 1 | # Run all tests, with or without flaky tests. 2 | 3 | name: Tests 4 | 5 | on: 6 | workflow_call: 7 | inputs: 8 | rust-version: 9 | description: 'The version of the rust compiler to run' 10 | type: string 11 | default: 'stable' 12 | flaky: 13 | description: 'Whether to also run flaky tests' 14 | type: boolean 15 | default: false 16 | git-ref: 17 | description: 'Which git ref to checkout' 18 | type: string 19 | default: ${{ github.ref }} 20 | 21 | env: 22 | RUST_BACKTRACE: 1 23 | RUSTFLAGS: -Dwarnings 24 | RUSTDOCFLAGS: -Dwarnings 25 | SCCACHE_CACHE_SIZE: "50G" 26 | CRATES_LIST: "iroh-blobs" 27 | IROH_FORCE_STAGING_RELAYS: "1" 28 | 29 | jobs: 30 | build_and_test_nix: 31 | timeout-minutes: 30 32 | name: "Tests" 33 | runs-on: ${{ matrix.runner }} 34 | strategy: 35 | fail-fast: false 36 | matrix: 37 | name: [ubuntu-latest, macOS-arm-latest] 38 | rust: [ '${{ inputs.rust-version }}' ] 39 | features: [all, none, default] 40 | include: 41 | - name: ubuntu-latest 42 | os: ubuntu-latest 43 | release-os: linux 44 | release-arch: amd64 45 | runner: [self-hosted, linux, X64] 46 | - name: macOS-arm-latest 47 | os: macOS-latest 48 | release-os: darwin 49 | release-arch: aarch64 50 | runner: [self-hosted, macOS, ARM64] 51 | env: 52 | # Using self-hosted runners so use local cache for sccache and 53 | # not SCCACHE_GHA_ENABLED. 54 | RUSTC_WRAPPER: "sccache" 55 | steps: 56 | - name: Checkout 57 | uses: actions/checkout@v6 58 | with: 59 | ref: ${{ inputs.git-ref }} 60 | 61 | - name: Install ${{ matrix.rust }} rust 62 | uses: dtolnay/rust-toolchain@master 63 | with: 64 | toolchain: ${{ matrix.rust }} 65 | 66 | - name: Install cargo-nextest 67 | uses: taiki-e/install-action@v2 68 | with: 69 | tool: nextest@0.9.80 70 | 71 | - name: Install sccache 72 | uses: mozilla-actions/sccache-action@v0.0.9 73 | 74 | - name: Select features 75 | run: | 76 | case "${{ matrix.features }}" in 77 | all) 78 | echo "FEATURES=--all-features" >> "$GITHUB_ENV" 79 | ;; 80 | none) 81 | echo "FEATURES=--no-default-features" >> "$GITHUB_ENV" 82 | ;; 83 | default) 84 | echo "FEATURES=" >> "$GITHUB_ENV" 85 | ;; 86 | *) 87 | exit 1 88 | esac 89 | 90 | - name: check features 91 | if: ${{ ! inputs.flaky }} 92 | run: | 93 | for i in ${CRATES_LIST//,/ } 94 | do 95 | echo "Checking $i $FEATURES" 96 | if [ $i = "iroh-cli" ]; then 97 | targets="--bins" 98 | else 99 | targets="--lib --bins" 100 | fi 101 | echo cargo check -p $i $FEATURES $targets 102 | cargo check -p $i $FEATURES $targets 103 | done 104 | env: 105 | RUST_LOG: ${{ runner.debug && 'TRACE' || 'DEBUG'}} 106 | 107 | - name: build tests 108 | run: | 109 | cargo nextest run --workspace ${{ env.FEATURES }} --lib --bins --tests --no-run 110 | 111 | - name: list ignored tests 112 | run: | 113 | cargo nextest list --workspace ${{ env.FEATURES }} --lib --bins --tests --run-ignored ignored-only 114 | 115 | - name: run tests 116 | run: | 117 | mkdir -p output 118 | cargo nextest run --workspace ${{ env.FEATURES }} --lib --bins --tests --profile ci --run-ignored ${{ inputs.flaky && 'all' || 'default' }} --no-fail-fast --message-format ${{ inputs.flaky && 'libtest-json' || 'human' }} > output/${{ matrix.name }}_${{ matrix.features }}_${{ matrix.rust }}.json 119 | env: 120 | RUST_LOG: ${{ runner.debug && 'TRACE' || 'DEBUG'}} 121 | NEXTEST_EXPERIMENTAL_LIBTEST_JSON: 1 122 | 123 | - name: upload results 124 | if: ${{ failure() && inputs.flaky }} 125 | uses: actions/upload-artifact@v5 126 | with: 127 | name: libtest_run_${{ github.run_number }}-${{ github.run_attempt }}-${{ matrix.name }}_${{ matrix.features }}_${{ matrix.rust }}.json 128 | path: output 129 | retention-days: 45 130 | compression-level: 0 131 | 132 | - name: doctests 133 | if: ${{ (! inputs.flaky) && matrix.features == 'all' }} 134 | run: | 135 | if [ -n "${{ runner.debug }}" ]; then 136 | export RUST_LOG=TRACE 137 | else 138 | export RUST_LOG=DEBUG 139 | fi 140 | cargo test --workspace --all-features --doc 141 | 142 | build_and_test_windows: 143 | timeout-minutes: 30 144 | name: "Tests" 145 | runs-on: ${{ matrix.runner }} 146 | strategy: 147 | fail-fast: false 148 | matrix: 149 | name: [windows-latest] 150 | rust: [ '${{ inputs.rust-version}}' ] 151 | features: [all, none, default] 152 | target: 153 | - x86_64-pc-windows-msvc 154 | include: 155 | - name: windows-latest 156 | os: windows 157 | runner: [self-hosted, windows, x64] 158 | env: 159 | # Using self-hosted runners so use local cache for sccache and 160 | # not SCCACHE_GHA_ENABLED. 161 | RUSTC_WRAPPER: "sccache" 162 | steps: 163 | - name: Checkout 164 | uses: actions/checkout@v6 165 | with: 166 | ref: ${{ inputs.git-ref }} 167 | 168 | - name: Install ${{ matrix.rust }} 169 | run: | 170 | rustup toolchain install ${{ matrix.rust }} 171 | rustup toolchain default ${{ matrix.rust }} 172 | rustup target add ${{ matrix.target }} 173 | rustup set default-host ${{ matrix.target }} 174 | 175 | - name: Install cargo-nextest 176 | shell: powershell 177 | run: | 178 | $tmp = New-TemporaryFile | Rename-Item -NewName { $_ -replace 'tmp$', 'zip' } -PassThru 179 | Invoke-WebRequest -OutFile $tmp https://get.nexte.st/latest/windows 180 | $outputDir = if ($Env:CARGO_HOME) { Join-Path $Env:CARGO_HOME "bin" } else { "~/.cargo/bin" } 181 | $tmp | Expand-Archive -DestinationPath $outputDir -Force 182 | $tmp | Remove-Item 183 | 184 | - name: Select features 185 | run: | 186 | switch ("${{ matrix.features }}") { 187 | "all" { 188 | echo "FEATURES=--all-features" | Out-File -FilePath $Env:GITHUB_ENV -Encoding utf8 -Append 189 | } 190 | "none" { 191 | echo "FEATURES=--no-default-features" | Out-File -FilePath $Env:GITHUB_ENV -Encoding utf8 -Append 192 | } 193 | "default" { 194 | echo "FEATURES=" | Out-File -FilePath $Env:GITHUB_ENV -Encoding utf8 -Append 195 | } 196 | default { 197 | Exit 1 198 | } 199 | } 200 | 201 | - name: Install sccache 202 | uses: mozilla-actions/sccache-action@v0.0.9 203 | 204 | - uses: msys2/setup-msys2@v2 205 | 206 | - name: build tests 207 | run: | 208 | cargo nextest run --workspace ${{ env.FEATURES }} --lib --bins --tests --target ${{ matrix.target }} --no-run 209 | 210 | - name: list ignored tests 211 | run: | 212 | cargo nextest list --workspace ${{ env.FEATURES }} --lib --bins --tests --target ${{ matrix.target }} --run-ignored ignored-only 213 | 214 | - name: tests 215 | run: | 216 | mkdir -p output 217 | cargo nextest run --workspace ${{ env.FEATURES }} --lib --bins --tests --profile ci --target ${{ matrix.target }} --run-ignored ${{ inputs.flaky && 'all' || 'default' }} --no-fail-fast --message-format ${{ inputs.flaky && 'libtest-json' || 'human' }} > output/${{ matrix.name }}_${{ matrix.features }}_${{ matrix.rust }}.json 218 | env: 219 | RUST_LOG: ${{ runner.debug && 'TRACE' || 'DEBUG'}} 220 | NEXTEST_EXPERIMENTAL_LIBTEST_JSON: 1 221 | 222 | - name: upload results 223 | if: ${{ failure() && inputs.flaky }} 224 | uses: actions/upload-artifact@v5 225 | with: 226 | name: libtest_run_${{ github.run_number }}-${{ github.run_attempt }}-${{ matrix.name }}_${{ matrix.features }}_${{ matrix.rust }}.json 227 | path: output 228 | retention-days: 1 229 | compression-level: 0 230 | -------------------------------------------------------------------------------- /src/util/temp_tag.rs: -------------------------------------------------------------------------------- 1 | #![allow(dead_code)] 2 | use std::{ 3 | collections::HashMap, 4 | sync::{Arc, Mutex, Weak}, 5 | }; 6 | 7 | use serde::{Deserialize, Serialize}; 8 | use tracing::{trace, warn}; 9 | 10 | use crate::{api::proto::Scope, BlobFormat, Hash, HashAndFormat}; 11 | 12 | /// An ephemeral, in-memory tag that protects content while the process is running. 13 | /// 14 | /// If format is raw, this will protect just the blob 15 | /// If format is collection, this will protect the collection and all blobs in it 16 | #[derive(Debug, Serialize, Deserialize)] 17 | #[must_use = "TempTag is a temporary tag that should be used to protect content while the process is running. \ 18 | If you want to keep the content alive, use TempTag::leak()"] 19 | pub struct TempTag { 20 | /// The hash and format we are pinning 21 | inner: HashAndFormat, 22 | /// optional callback to call on drop 23 | #[serde(skip)] 24 | on_drop: Option>, 25 | } 26 | 27 | impl AsRef for TempTag { 28 | fn as_ref(&self) -> &Hash { 29 | &self.inner.hash 30 | } 31 | } 32 | 33 | /// A trait for things that can track liveness of blobs and collections. 34 | /// 35 | /// This trait works together with [TempTag] to keep track of the liveness of a 36 | /// blob or collection. 37 | /// 38 | /// It is important to include the format in the liveness tracking, since 39 | /// protecting a collection means protecting the blob and all its children, 40 | /// whereas protecting a raw blob only protects the blob itself. 41 | pub trait TagCounter: TagDrop + Sized { 42 | /// Called on creation of a temp tag 43 | fn on_create(&self, inner: &HashAndFormat); 44 | 45 | /// Get this as a weak reference for use in temp tags 46 | fn as_weak(self: &Arc) -> Weak { 47 | let on_drop: Arc = self.clone(); 48 | Arc::downgrade(&on_drop) 49 | } 50 | 51 | /// Create a new temp tag for the given hash and format 52 | fn temp_tag(self: &Arc, inner: HashAndFormat) -> TempTag { 53 | self.on_create(&inner); 54 | TempTag::new(inner, Some(self.as_weak())) 55 | } 56 | } 57 | 58 | /// Trait used from temp tags to notify an abstract store that a temp tag is 59 | /// being dropped. 60 | pub trait TagDrop: std::fmt::Debug + Send + Sync + 'static { 61 | /// Called on drop 62 | fn on_drop(&self, inner: &HashAndFormat); 63 | } 64 | 65 | impl From<&TempTag> for HashAndFormat { 66 | fn from(val: &TempTag) -> Self { 67 | val.inner 68 | } 69 | } 70 | 71 | impl From for HashAndFormat { 72 | fn from(val: TempTag) -> Self { 73 | val.inner 74 | } 75 | } 76 | 77 | impl TempTag { 78 | /// Create a new temp tag for the given hash and format 79 | /// 80 | /// This should only be used by store implementations. 81 | /// 82 | /// The caller is responsible for increasing the refcount on creation and to 83 | /// make sure that temp tags that are created between a mark phase and a sweep 84 | /// phase are protected. 85 | pub fn new(inner: HashAndFormat, on_drop: Option>) -> Self { 86 | Self { inner, on_drop } 87 | } 88 | 89 | /// The empty temp tag. We don't track the empty blob since we always have it. 90 | pub fn leaking_empty(format: BlobFormat) -> Self { 91 | Self { 92 | inner: HashAndFormat { 93 | hash: Hash::EMPTY, 94 | format, 95 | }, 96 | on_drop: None, 97 | } 98 | } 99 | 100 | /// The hash of the pinned item 101 | pub fn hash(&self) -> Hash { 102 | self.inner.hash 103 | } 104 | 105 | /// The format of the pinned item 106 | pub fn format(&self) -> BlobFormat { 107 | self.inner.format 108 | } 109 | 110 | /// The hash and format of the pinned item 111 | pub fn hash_and_format(&self) -> HashAndFormat { 112 | self.inner 113 | } 114 | 115 | /// Keep the item alive until the end of the process 116 | pub fn leak(&mut self) { 117 | // set the liveness tracker to None, so that the refcount is not decreased 118 | // during drop. This means that the refcount will never reach 0 and the 119 | // item will not be gced until the end of the process. 120 | self.on_drop = None; 121 | } 122 | } 123 | 124 | impl Drop for TempTag { 125 | fn drop(&mut self) { 126 | if let Some(on_drop) = self.on_drop.take() { 127 | if let Some(on_drop) = on_drop.upgrade() { 128 | on_drop.on_drop(&self.inner); 129 | } 130 | } 131 | } 132 | } 133 | 134 | #[derive(Debug, Default, Clone)] 135 | struct TempCounters { 136 | /// number of raw temp tags for a hash 137 | raw: u64, 138 | /// number of hash seq temp tags for a hash 139 | hash_seq: u64, 140 | } 141 | 142 | impl TempCounters { 143 | fn counter(&self, format: BlobFormat) -> u64 { 144 | match format { 145 | BlobFormat::Raw => self.raw, 146 | BlobFormat::HashSeq => self.hash_seq, 147 | } 148 | } 149 | 150 | fn counter_mut(&mut self, format: BlobFormat) -> &mut u64 { 151 | match format { 152 | BlobFormat::Raw => &mut self.raw, 153 | BlobFormat::HashSeq => &mut self.hash_seq, 154 | } 155 | } 156 | 157 | fn inc(&mut self, format: BlobFormat) { 158 | let counter = self.counter_mut(format); 159 | *counter = counter.checked_add(1).unwrap(); 160 | } 161 | 162 | fn dec(&mut self, format: BlobFormat) { 163 | let counter = self.counter_mut(format); 164 | *counter = counter.saturating_sub(1); 165 | } 166 | 167 | fn is_empty(&self) -> bool { 168 | self.raw == 0 && self.hash_seq == 0 169 | } 170 | } 171 | 172 | #[derive(Debug, Default)] 173 | pub(crate) struct TempTags { 174 | scopes: HashMap>, 175 | next_scope: u64, 176 | } 177 | 178 | impl TempTags { 179 | pub fn create_scope(&mut self) -> (Scope, Arc) { 180 | self.next_scope += 1; 181 | let id = Scope(self.next_scope); 182 | let scope = self.scopes.entry(id).or_default(); 183 | (id, scope.clone()) 184 | } 185 | 186 | pub fn end_scope(&mut self, scope: Scope) { 187 | self.scopes.remove(&scope); 188 | } 189 | 190 | pub fn list(&self) -> Vec { 191 | self.scopes 192 | .values() 193 | .flat_map(|scope| scope.list()) 194 | .collect() 195 | } 196 | 197 | pub fn create(&mut self, scope: Scope, content: HashAndFormat) -> TempTag { 198 | let scope = self.scopes.entry(scope).or_default(); 199 | 200 | scope.temp_tag(content) 201 | } 202 | 203 | pub fn contains(&self, hash: Hash) -> bool { 204 | self.scopes 205 | .values() 206 | .any(|scope| scope.0.lock().unwrap().contains(&HashAndFormat::raw(hash))) 207 | } 208 | } 209 | 210 | #[derive(Debug, Default)] 211 | pub(crate) struct TempTagScope(Mutex); 212 | 213 | impl TempTagScope { 214 | pub fn list(&self) -> impl Iterator + 'static { 215 | let guard = self.0.lock().unwrap(); 216 | let res = guard.keys(); 217 | drop(guard); 218 | res.into_iter() 219 | } 220 | } 221 | 222 | impl TagDrop for TempTagScope { 223 | fn on_drop(&self, inner: &HashAndFormat) { 224 | trace!("Dropping temp tag {:?}", inner); 225 | self.0.lock().unwrap().dec(inner); 226 | } 227 | } 228 | 229 | impl TagCounter for TempTagScope { 230 | fn on_create(&self, inner: &HashAndFormat) { 231 | trace!("Creating temp tag {:?}", inner); 232 | self.0.lock().unwrap().inc(*inner); 233 | } 234 | } 235 | 236 | #[derive(Debug, Clone, Default)] 237 | pub(crate) struct TempCounterMap(HashMap); 238 | 239 | impl TempCounterMap { 240 | pub fn inc(&mut self, value: HashAndFormat) { 241 | self.0.entry(value.hash).or_default().inc(value.format) 242 | } 243 | 244 | pub fn dec(&mut self, value: &HashAndFormat) { 245 | let HashAndFormat { hash, format } = value; 246 | let Some(counters) = self.0.get_mut(hash) else { 247 | warn!("Decrementing non-existent temp tag"); 248 | return; 249 | }; 250 | counters.dec(*format); 251 | if counters.is_empty() { 252 | self.0.remove(hash); 253 | } 254 | } 255 | 256 | pub fn contains(&self, haf: &HashAndFormat) -> bool { 257 | let Some(entry) = self.0.get(&haf.hash) else { 258 | return false; 259 | }; 260 | entry.counter(haf.format) > 0 261 | } 262 | 263 | pub fn keys(&self) -> Vec { 264 | let mut res = Vec::new(); 265 | for (k, v) in self.0.iter() { 266 | if v.raw > 0 { 267 | res.push(HashAndFormat::raw(*k)); 268 | } 269 | if v.hash_seq > 0 { 270 | res.push(HashAndFormat::hash_seq(*k)); 271 | } 272 | } 273 | res 274 | } 275 | } 276 | -------------------------------------------------------------------------------- /src/api.rs: -------------------------------------------------------------------------------- 1 | //! The user facing API of the store. 2 | //! 3 | //! This API is both for interacting with an in-process store and for interacting 4 | //! with a remote store via rpc calls. 5 | //! 6 | //! The entry point for the api is the [`Store`] struct. There are several ways 7 | //! to obtain a `Store` instance: it is available via [`Deref`] 8 | //! from the different store implementations 9 | //! (e.g. [`MemStore`](crate::store::mem::MemStore) 10 | //! and [`FsStore`](crate::store::fs::FsStore)) as well as on the 11 | //! [`BlobsProtocol`](crate::BlobsProtocol) iroh protocol handler. 12 | //! 13 | //! You can also [`connect`](Store::connect) to a remote store that is listening 14 | //! to rpc requests. 15 | use std::{io, ops::Deref}; 16 | 17 | use bao_tree::io::EncodeError; 18 | use iroh::Endpoint; 19 | use n0_error::{e, stack_error}; 20 | use proto::{ShutdownRequest, SyncDbRequest}; 21 | use ref_cast::RefCast; 22 | use serde::{Deserialize, Serialize}; 23 | use tags::Tags; 24 | 25 | pub mod blobs; 26 | pub mod downloader; 27 | pub mod proto; 28 | pub mod remote; 29 | pub mod tags; 30 | use crate::{api::proto::WaitIdleRequest, provider::events::ProgressError}; 31 | pub use crate::{store::util::Tag, util::temp_tag::TempTag}; 32 | 33 | pub(crate) type ApiClient = irpc::Client; 34 | 35 | #[allow(missing_docs)] 36 | #[non_exhaustive] 37 | #[stack_error(derive, add_meta)] 38 | pub enum RequestError { 39 | /// Request failed due to rpc error. 40 | #[error("rpc error: {source}")] 41 | Rpc { source: irpc::Error }, 42 | /// Request failed due an actual error. 43 | #[error("inner error: {source}")] 44 | Inner { 45 | #[error(std_err)] 46 | source: Error, 47 | }, 48 | } 49 | 50 | impl From for RequestError { 51 | fn from(value: irpc::Error) -> Self { 52 | e!(RequestError::Rpc, value) 53 | } 54 | } 55 | 56 | impl From for RequestError { 57 | fn from(value: Error) -> Self { 58 | e!(RequestError::Inner, value) 59 | } 60 | } 61 | 62 | impl From for RequestError { 63 | fn from(value: io::Error) -> Self { 64 | e!(RequestError::Inner, value.into()) 65 | } 66 | } 67 | 68 | impl From for RequestError { 69 | fn from(value: irpc::channel::mpsc::RecvError) -> Self { 70 | e!(RequestError::Rpc, value.into()) 71 | } 72 | } 73 | 74 | pub type RequestResult = std::result::Result; 75 | 76 | #[allow(missing_docs)] 77 | #[non_exhaustive] 78 | #[stack_error(derive, add_meta, from_sources)] 79 | pub enum ExportBaoError { 80 | #[error("send error")] 81 | Send { source: irpc::channel::SendError }, 82 | #[error("mpsc recv e api.acp.pro-channelsrror")] 83 | MpscRecv { 84 | source: irpc::channel::mpsc::RecvError, 85 | }, 86 | #[error("oneshot recv error")] 87 | OneshotRecv { 88 | source: irpc::channel::oneshot::RecvError, 89 | }, 90 | #[error("request error")] 91 | Request { source: irpc::RequestError }, 92 | #[error("io error")] 93 | ExportBaoIo { 94 | #[error(std_err)] 95 | source: io::Error, 96 | }, 97 | #[error("encode error")] 98 | ExportBaoInner { 99 | #[error(std_err)] 100 | source: bao_tree::io::EncodeError, 101 | }, 102 | #[error("client error")] 103 | ClientError { source: ProgressError }, 104 | } 105 | 106 | impl From for Error { 107 | fn from(e: ExportBaoError) -> Self { 108 | match e { 109 | ExportBaoError::Send { source, .. } => Self::Io(source.into()), 110 | ExportBaoError::MpscRecv { source, .. } => Self::Io(source.into()), 111 | ExportBaoError::OneshotRecv { source, .. } => Self::Io(source.into()), 112 | ExportBaoError::Request { source, .. } => Self::Io(source.into()), 113 | ExportBaoError::ExportBaoIo { source, .. } => Self::Io(source), 114 | ExportBaoError::ExportBaoInner { source, .. } => Self::Io(source.into()), 115 | ExportBaoError::ClientError { source, .. } => Self::Io(source.into()), 116 | } 117 | } 118 | } 119 | 120 | impl From for ExportBaoError { 121 | fn from(e: irpc::Error) -> Self { 122 | match e { 123 | irpc::Error::MpscRecv { source: e, .. } => e!(ExportBaoError::MpscRecv, e), 124 | irpc::Error::OneshotRecv { source: e, .. } => e!(ExportBaoError::OneshotRecv, e), 125 | irpc::Error::Send { source: e, .. } => e!(ExportBaoError::Send, e), 126 | irpc::Error::Request { source: e, .. } => e!(ExportBaoError::Request, e), 127 | #[cfg(feature = "rpc")] 128 | irpc::Error::Write { source: e, .. } => e!(ExportBaoError::ExportBaoIo, e.into()), 129 | } 130 | } 131 | } 132 | 133 | pub type ExportBaoResult = std::result::Result; 134 | 135 | #[derive(Serialize, Deserialize)] 136 | #[stack_error(derive, std_sources, from_sources)] 137 | pub enum Error { 138 | #[serde(with = "crate::util::serde::io_error_serde")] 139 | Io(#[error(source)] io::Error), 140 | } 141 | 142 | impl Error { 143 | pub fn io( 144 | kind: io::ErrorKind, 145 | msg: impl Into>, 146 | ) -> Self { 147 | Self::Io(io::Error::new(kind, msg.into())) 148 | } 149 | 150 | pub fn other(msg: E) -> Self 151 | where 152 | E: Into>, 153 | { 154 | Self::Io(io::Error::other(msg.into())) 155 | } 156 | } 157 | 158 | impl From for Error { 159 | fn from(e: irpc::Error) -> Self { 160 | Self::Io(e.into()) 161 | } 162 | } 163 | 164 | impl From for Error { 165 | fn from(e: RequestError) -> Self { 166 | match e { 167 | RequestError::Rpc { source, .. } => Self::Io(source.into()), 168 | RequestError::Inner { source, .. } => source, 169 | } 170 | } 171 | } 172 | 173 | impl From for Error { 174 | fn from(e: irpc::channel::mpsc::RecvError) -> Self { 175 | Self::Io(e.into()) 176 | } 177 | } 178 | 179 | #[cfg(feature = "rpc")] 180 | impl From for Error { 181 | fn from(e: irpc::rpc::WriteError) -> Self { 182 | Self::Io(e.into()) 183 | } 184 | } 185 | 186 | impl From for Error { 187 | fn from(e: irpc::RequestError) -> Self { 188 | Self::Io(e.into()) 189 | } 190 | } 191 | 192 | impl From for Error { 193 | fn from(e: irpc::channel::SendError) -> Self { 194 | Self::Io(e.into()) 195 | } 196 | } 197 | 198 | impl From for Error { 199 | fn from(value: EncodeError) -> Self { 200 | match value { 201 | EncodeError::Io(cause) => Self::Io(cause), 202 | _ => Self::Io(io::Error::other(value)), 203 | } 204 | } 205 | } 206 | 207 | pub type Result = std::result::Result; 208 | 209 | /// The main entry point for the store API. 210 | #[derive(Debug, Clone, ref_cast::RefCast)] 211 | #[repr(transparent)] 212 | pub struct Store { 213 | client: ApiClient, 214 | } 215 | 216 | impl Deref for Store { 217 | type Target = blobs::Blobs; 218 | 219 | fn deref(&self) -> &Self::Target { 220 | blobs::Blobs::ref_from_sender(&self.client) 221 | } 222 | } 223 | 224 | impl Store { 225 | /// The tags API. 226 | pub fn tags(&self) -> &Tags { 227 | Tags::ref_from_sender(&self.client) 228 | } 229 | 230 | /// The blobs API. 231 | pub fn blobs(&self) -> &blobs::Blobs { 232 | blobs::Blobs::ref_from_sender(&self.client) 233 | } 234 | 235 | /// API for getting blobs from a *single* remote node. 236 | pub fn remote(&self) -> &remote::Remote { 237 | remote::Remote::ref_from_sender(&self.client) 238 | } 239 | 240 | /// Create a downloader for more complex downloads. 241 | /// 242 | /// Unlike the other APIs, this creates an object that has internal state, 243 | /// so don't create it ad hoc but store it somewhere if you need it multiple 244 | /// times. 245 | pub fn downloader(&self, endpoint: &Endpoint) -> downloader::Downloader { 246 | downloader::Downloader::new(self, endpoint) 247 | } 248 | 249 | /// Connect to a remote store as a rpc client. 250 | #[cfg(feature = "rpc")] 251 | pub fn connect(endpoint: quinn::Endpoint, addr: std::net::SocketAddr) -> Self { 252 | let sender = irpc::Client::quinn(endpoint, addr); 253 | Store::from_sender(sender) 254 | } 255 | 256 | /// Listen on a quinn endpoint for incoming rpc connections. 257 | #[cfg(feature = "rpc")] 258 | pub async fn listen(self, endpoint: quinn::Endpoint) { 259 | use irpc::rpc::RemoteService; 260 | 261 | use self::proto::Request; 262 | let local = self.client.as_local().unwrap().clone(); 263 | let handler = Request::remote_handler(local); 264 | irpc::rpc::listen::(endpoint, handler).await 265 | } 266 | 267 | pub async fn sync_db(&self) -> RequestResult<()> { 268 | let msg = SyncDbRequest; 269 | self.client.rpc(msg).await??; 270 | Ok(()) 271 | } 272 | 273 | pub async fn shutdown(&self) -> irpc::Result<()> { 274 | let msg = ShutdownRequest; 275 | self.client.rpc(msg).await?; 276 | Ok(()) 277 | } 278 | 279 | /// Waits for the store to become completely idle. 280 | /// 281 | /// This is mostly useful for tests, where you want to check that e.g. the 282 | /// store has written all data to disk. 283 | /// 284 | /// Note that a store is not guaranteed to become idle, if it is being 285 | /// interacted with concurrently. So this might wait forever. 286 | /// 287 | /// Also note that once you get the callback, the store is not guaranteed to 288 | /// still be idle. All this tells you that there was a point in time where 289 | /// the store was idle between the call and the response. 290 | pub async fn wait_idle(&self) -> irpc::Result<()> { 291 | let msg = WaitIdleRequest; 292 | self.client.rpc(msg).await?; 293 | Ok(()) 294 | } 295 | 296 | pub(crate) fn from_sender(client: ApiClient) -> Self { 297 | Self { client } 298 | } 299 | 300 | pub(crate) fn ref_from_sender(client: &ApiClient) -> &Self { 301 | Self::ref_cast(client) 302 | } 303 | } 304 | -------------------------------------------------------------------------------- /.github/workflows/ci.yaml: -------------------------------------------------------------------------------- 1 | name: CI 2 | 3 | on: 4 | pull_request: 5 | types: [ 'labeled', 'unlabeled', 'opened', 'synchronize', 'reopened' ] 6 | merge_group: 7 | push: 8 | branches: 9 | - main 10 | 11 | concurrency: 12 | group: ci-${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} 13 | cancel-in-progress: true 14 | 15 | env: 16 | RUST_BACKTRACE: 1 17 | RUSTFLAGS: -Dwarnings 18 | RUSTDOCFLAGS: -Dwarnings 19 | MSRV: "1.85" 20 | SCCACHE_CACHE_SIZE: "50G" 21 | IROH_FORCE_STAGING_RELAYS: "1" 22 | 23 | jobs: 24 | tests: 25 | name: CI Test Suite 26 | if: "github.event_name != 'pull_request' || ! contains(github.event.pull_request.labels.*.name, 'flaky-test')" 27 | uses: './.github/workflows/tests.yaml' 28 | 29 | cross_build: 30 | name: Cross Build Only 31 | if: "github.event_name != 'pull_request' || ! contains(github.event.pull_request.labels.*.name, 'flaky-test')" 32 | timeout-minutes: 30 33 | runs-on: [self-hosted, linux, X64] 34 | strategy: 35 | fail-fast: false 36 | matrix: 37 | target: 38 | # cross tests are currently broken vor armv7 and aarch64 39 | # see https://github.com/cross-rs/cross/issues/1311 40 | # - armv7-linux-androideabi 41 | # - aarch64-linux-android 42 | # Freebsd execution fails in cross 43 | # - i686-unknown-freebsd # Linking fails :/ 44 | - x86_64-unknown-freebsd 45 | # Netbsd execution fails to link in cross 46 | # - x86_64-unknown-netbsd 47 | steps: 48 | - name: Checkout 49 | uses: actions/checkout@v6 50 | with: 51 | submodules: recursive 52 | 53 | - name: Install rust stable 54 | uses: dtolnay/rust-toolchain@stable 55 | 56 | - name: Cleanup Docker 57 | continue-on-error: true 58 | run: | 59 | docker kill $(docker ps -q) 60 | 61 | # See https://github.com/cross-rs/cross/issues/1222 62 | - uses: taiki-e/install-action@cross 63 | 64 | - name: build 65 | # cross tests are currently broken vor armv7 and aarch64 66 | # see https://github.com/cross-rs/cross/issues/1311. So on 67 | # those platforms we only build but do not run tests. 68 | run: cross build --all --target ${{ matrix.target }} 69 | env: 70 | RUST_LOG: ${{ runner.debug && 'TRACE' || 'DEBUG'}} 71 | 72 | android_build: 73 | name: Android Build Only 74 | if: "github.event_name != 'pull_request' || ! contains(github.event.pull_request.labels.*.name, 'flaky-test')" 75 | timeout-minutes: 30 76 | # runs-on: ubuntu-latest 77 | runs-on: [self-hosted, linux, X64] 78 | strategy: 79 | fail-fast: false 80 | matrix: 81 | target: 82 | - aarch64-linux-android 83 | - armv7-linux-androideabi 84 | steps: 85 | - name: Checkout 86 | uses: actions/checkout@v6 87 | 88 | - name: Set up Rust 89 | uses: dtolnay/rust-toolchain@stable 90 | with: 91 | target: ${{ matrix.target }} 92 | - name: Install rustup target 93 | run: rustup target add ${{ matrix.target }} 94 | 95 | - name: Setup Java 96 | uses: actions/setup-java@v5 97 | with: 98 | distribution: 'temurin' 99 | java-version: '17' 100 | 101 | - name: Setup Android SDK 102 | uses: android-actions/setup-android@v3 103 | 104 | - name: Setup Android NDK 105 | uses: arqu/setup-ndk@main 106 | id: setup-ndk 107 | with: 108 | ndk-version: r23 109 | add-to-path: true 110 | 111 | - name: Build 112 | env: 113 | ANDROID_NDK_HOME: ${{ steps.setup-ndk.outputs.ndk-path }} 114 | run: | 115 | cargo install --version 3.5.4 cargo-ndk 116 | cargo ndk --target ${{ matrix.target }} build 117 | 118 | cross_test: 119 | name: Cross Test 120 | if: "github.event_name != 'pull_request' || ! contains(github.event.pull_request.labels.*.name, 'flaky-test')" 121 | timeout-minutes: 30 122 | runs-on: [self-hosted, linux, X64] 123 | strategy: 124 | fail-fast: false 125 | matrix: 126 | target: 127 | - i686-unknown-linux-gnu 128 | steps: 129 | - name: Checkout 130 | uses: actions/checkout@v6 131 | with: 132 | submodules: recursive 133 | 134 | - name: Install rust stable 135 | uses: dtolnay/rust-toolchain@stable 136 | 137 | - name: Cleanup Docker 138 | continue-on-error: true 139 | run: | 140 | docker kill $(docker ps -q) 141 | 142 | # See https://github.com/cross-rs/cross/issues/1222 143 | - uses: taiki-e/install-action@cross 144 | 145 | - name: test 146 | run: cross test --all --target ${{ matrix.target }} -- --test-threads=1 147 | env: 148 | RUST_LOG: ${{ runner.debug && 'TRACE' || 'DEBUG' }} 149 | 150 | check_semver: 151 | runs-on: ubuntu-latest 152 | env: 153 | RUSTC_WRAPPER: "sccache" 154 | SCCACHE_GHA_ENABLED: "on" 155 | steps: 156 | - uses: actions/checkout@v6 157 | with: 158 | fetch-depth: 0 159 | - name: Install sccache 160 | uses: mozilla-actions/sccache-action@v0.0.9 161 | 162 | - name: Setup Environment (PR) 163 | if: ${{ github.event_name == 'pull_request' }} 164 | shell: bash 165 | run: | 166 | echo "HEAD_COMMIT_SHA=$(git rev-parse origin/${{ github.base_ref }})" >> ${GITHUB_ENV} 167 | - name: Setup Environment (Push) 168 | if: ${{ github.event_name == 'push' || github.event_name == 'merge_group' }} 169 | shell: bash 170 | run: | 171 | echo "HEAD_COMMIT_SHA=$(git rev-parse origin/main)" >> ${GITHUB_ENV} 172 | - name: Check semver 173 | # uses: obi1kenobi/cargo-semver-checks-action@v2 174 | uses: n0-computer/cargo-semver-checks-action@feat-baseline 175 | with: 176 | package: iroh-blobs 177 | baseline-rev: ${{ env.HEAD_COMMIT_SHA }} 178 | use-cache: false 179 | 180 | check_fmt: 181 | timeout-minutes: 30 182 | name: Checking fmt 183 | runs-on: ubuntu-latest 184 | env: 185 | RUSTC_WRAPPER: "sccache" 186 | SCCACHE_GHA_ENABLED: "on" 187 | steps: 188 | - uses: actions/checkout@v6 189 | - uses: dtolnay/rust-toolchain@stable 190 | with: 191 | components: rustfmt 192 | - uses: mozilla-actions/sccache-action@v0.0.9 193 | - uses: taiki-e/install-action@cargo-make 194 | - run: cargo make format-check 195 | 196 | check_docs: 197 | timeout-minutes: 30 198 | name: Checking docs 199 | runs-on: ubuntu-latest 200 | env: 201 | RUSTC_WRAPPER: "sccache" 202 | SCCACHE_GHA_ENABLED: "on" 203 | steps: 204 | - uses: actions/checkout@v6 205 | - uses: dtolnay/rust-toolchain@master 206 | with: 207 | toolchain: nightly-2025-09-28 208 | - name: Install sccache 209 | uses: mozilla-actions/sccache-action@v0.0.9 210 | 211 | - name: Docs 212 | run: cargo doc --workspace --all-features --no-deps --document-private-items 213 | env: 214 | RUSTDOCFLAGS: --cfg docsrs 215 | 216 | clippy_check: 217 | timeout-minutes: 30 218 | runs-on: ubuntu-latest 219 | env: 220 | RUSTC_WRAPPER: "sccache" 221 | SCCACHE_GHA_ENABLED: "on" 222 | steps: 223 | - uses: actions/checkout@v6 224 | - uses: dtolnay/rust-toolchain@stable 225 | with: 226 | components: clippy 227 | - name: Install sccache 228 | uses: mozilla-actions/sccache-action@v0.0.9 229 | 230 | # TODO: We have a bunch of platform-dependent code so should 231 | # probably run this job on the full platform matrix 232 | - name: clippy check (all features) 233 | run: cargo clippy --workspace --all-features --all-targets --bins --tests --benches 234 | 235 | - name: clippy check (no features) 236 | run: cargo clippy --workspace --no-default-features --lib --bins --tests 237 | 238 | - name: clippy check (default features) 239 | run: cargo clippy --workspace --all-targets 240 | 241 | msrv: 242 | if: "github.event_name != 'pull_request' || ! contains(github.event.pull_request.labels.*.name, 'flaky-test')" 243 | timeout-minutes: 30 244 | name: Minimal Supported Rust Version 245 | runs-on: ubuntu-latest 246 | env: 247 | RUSTC_WRAPPER: "sccache" 248 | SCCACHE_GHA_ENABLED: "on" 249 | steps: 250 | - uses: actions/checkout@v6 251 | - uses: dtolnay/rust-toolchain@master 252 | with: 253 | toolchain: ${{ env.MSRV }} 254 | - name: Install sccache 255 | uses: mozilla-actions/sccache-action@v0.0.9 256 | 257 | - name: Check MSRV all features 258 | run: | 259 | cargo +$MSRV check --workspace --all-targets 260 | 261 | cargo_deny: 262 | timeout-minutes: 30 263 | name: cargo deny 264 | runs-on: ubuntu-latest 265 | steps: 266 | - uses: actions/checkout@v6 267 | - uses: EmbarkStudios/cargo-deny-action@v2 268 | with: 269 | arguments: --workspace --all-features 270 | command: check 271 | command-arguments: "-Dwarnings" 272 | 273 | codespell: 274 | timeout-minutes: 30 275 | runs-on: ubuntu-latest 276 | steps: 277 | - uses: actions/checkout@v6 278 | - run: pip install --user codespell[toml] 279 | - run: codespell --ignore-words-list=ans,atmost,crate,inout,ratatui,ser,stayin,swarmin,worl --skip=CHANGELOG.md 280 | 281 | wasm_build: 282 | name: Build & test wasm32 283 | runs-on: ubuntu-latest 284 | env: 285 | RUSTFLAGS: '--cfg getrandom_backend="wasm_js"' 286 | steps: 287 | - name: Checkout sources 288 | uses: actions/checkout@v6 289 | 290 | - name: Install Node.js 291 | uses: actions/setup-node@v6 292 | with: 293 | node-version: 20 294 | 295 | - name: Install stable toolchain 296 | uses: dtolnay/rust-toolchain@stable 297 | 298 | - name: Add wasm target 299 | run: rustup target add wasm32-unknown-unknown 300 | 301 | - name: Install wasm-tools 302 | uses: bytecodealliance/actions/wasm-tools/setup@v1 303 | 304 | - name: Install wasm-pack 305 | uses: taiki-e/install-action@v2 306 | with: 307 | tool: wasm-bindgen,wasm-pack 308 | 309 | - name: wasm32 build 310 | run: cargo build --target wasm32-unknown-unknown --no-default-features 311 | 312 | # If the Wasm file contains any 'import "env"' declarations, then 313 | # some non-Wasm-compatible code made it into the final code. 314 | - name: Ensure no 'import "env"' in wasm 315 | run: | 316 | ! wasm-tools print --skeleton target/wasm32-unknown-unknown/debug/iroh_blobs.wasm | grep 'import "env"' -------------------------------------------------------------------------------- /examples/random_store.rs: -------------------------------------------------------------------------------- 1 | use std::{env, path::PathBuf, str::FromStr}; 2 | 3 | use anyhow::{Context, Result}; 4 | use clap::{Parser, Subcommand}; 5 | use iroh::{discovery::static_provider::StaticProvider, SecretKey}; 6 | use iroh_blobs::{ 7 | api::downloader::Shuffled, 8 | provider::events::{AbortReason, EventMask, EventSender, ProviderMessage}, 9 | store::fs::FsStore, 10 | test::{add_hash_sequences, create_random_blobs}, 11 | HashAndFormat, 12 | }; 13 | use iroh_tickets::endpoint::EndpointTicket; 14 | use irpc::RpcMessage; 15 | use n0_future::StreamExt; 16 | use rand::{rngs::StdRng, Rng, SeedableRng}; 17 | use tokio::signal::ctrl_c; 18 | use tracing::info; 19 | 20 | #[derive(Parser, Debug)] 21 | #[command(author, version, about, long_about = None)] 22 | pub struct Args { 23 | /// Commands to run 24 | #[command(subcommand)] 25 | pub command: Commands, 26 | } 27 | 28 | #[derive(Parser, Debug)] 29 | pub struct CommonArgs { 30 | /// Random seed for reproducible results 31 | #[arg(long)] 32 | pub seed: Option, 33 | 34 | /// Path for store, none for in-memory store 35 | #[arg(long)] 36 | pub path: Option, 37 | } 38 | 39 | #[derive(Subcommand, Debug)] 40 | pub enum Commands { 41 | /// Provide content to the network 42 | Provide(ProvideArgs), 43 | /// Request content from the network 44 | Request(RequestArgs), 45 | } 46 | 47 | #[derive(Parser, Debug)] 48 | pub struct ProvideArgs { 49 | #[command(flatten)] 50 | pub common: CommonArgs, 51 | 52 | /// Number of blobs to generate 53 | #[arg(long, default_value_t = 100)] 54 | pub num_blobs: usize, 55 | 56 | /// Size of each blob in bytes 57 | #[arg(long, default_value_t = 100000)] 58 | pub blob_size: usize, 59 | 60 | /// Number of hash sequences 61 | #[arg(long, default_value_t = 1)] 62 | pub hash_seqs: usize, 63 | 64 | /// Size of each hash sequence 65 | #[arg(long, default_value_t = 100)] 66 | pub hash_seq_size: usize, 67 | 68 | /// Size of each hash sequence 69 | #[arg(long, default_value_t = false)] 70 | pub allow_push: bool, 71 | } 72 | 73 | #[derive(Parser, Debug)] 74 | pub struct RequestArgs { 75 | #[command(flatten)] 76 | pub common: CommonArgs, 77 | 78 | /// Hash of the blob to request 79 | #[arg(long)] 80 | pub content: Vec, 81 | 82 | /// Nodes to request from 83 | pub nodes: Vec, 84 | 85 | /// Split large requests 86 | #[arg(long, default_value_t = false)] 87 | pub split: bool, 88 | } 89 | 90 | pub fn get_or_generate_secret_key() -> Result { 91 | if let Ok(secret) = env::var("IROH_SECRET") { 92 | // Parse the secret key from string 93 | SecretKey::from_str(&secret).context("Invalid secret key format") 94 | } else { 95 | // Generate a new random key 96 | let secret_key = SecretKey::generate(&mut rand::rng()); 97 | let secret_key_str = hex::encode(secret_key.to_bytes()); 98 | println!("Generated new random secret key"); 99 | println!("To reuse this key, set the IROH_SECRET={secret_key_str}"); 100 | Ok(secret_key) 101 | } 102 | } 103 | 104 | pub fn dump_provider_events(allow_push: bool) -> (tokio::task::JoinHandle<()>, EventSender) { 105 | let (tx, mut rx) = EventSender::channel(100, EventMask::ALL_READONLY); 106 | fn dump_updates(mut rx: irpc::channel::mpsc::Receiver) { 107 | tokio::spawn(async move { 108 | while let Ok(Some(update)) = rx.recv().await { 109 | println!("{update:?}"); 110 | } 111 | }); 112 | } 113 | let dump_task = tokio::spawn(async move { 114 | while let Some(event) = rx.recv().await { 115 | match event { 116 | ProviderMessage::ClientConnected(msg) => { 117 | println!("{:?}", msg.inner); 118 | msg.tx.send(Ok(())).await.ok(); 119 | } 120 | ProviderMessage::ClientConnectedNotify(msg) => { 121 | println!("{:?}", msg.inner); 122 | } 123 | ProviderMessage::ConnectionClosed(msg) => { 124 | println!("{:?}", msg.inner); 125 | } 126 | ProviderMessage::GetRequestReceived(msg) => { 127 | println!("{:?}", msg.inner); 128 | msg.tx.send(Ok(())).await.ok(); 129 | dump_updates(msg.rx); 130 | } 131 | ProviderMessage::GetRequestReceivedNotify(msg) => { 132 | println!("{:?}", msg.inner); 133 | dump_updates(msg.rx); 134 | } 135 | ProviderMessage::GetManyRequestReceived(msg) => { 136 | println!("{:?}", msg.inner); 137 | msg.tx.send(Ok(())).await.ok(); 138 | dump_updates(msg.rx); 139 | } 140 | ProviderMessage::GetManyRequestReceivedNotify(msg) => { 141 | println!("{:?}", msg.inner); 142 | dump_updates(msg.rx); 143 | } 144 | ProviderMessage::PushRequestReceived(msg) => { 145 | println!("{:?}", msg.inner); 146 | let res = if allow_push { 147 | Ok(()) 148 | } else { 149 | Err(AbortReason::Permission) 150 | }; 151 | msg.tx.send(res).await.ok(); 152 | dump_updates(msg.rx); 153 | } 154 | ProviderMessage::PushRequestReceivedNotify(msg) => { 155 | println!("{:?}", msg.inner); 156 | dump_updates(msg.rx); 157 | } 158 | ProviderMessage::ObserveRequestReceived(msg) => { 159 | println!("{:?}", msg.inner); 160 | let res = if allow_push { 161 | Ok(()) 162 | } else { 163 | Err(AbortReason::Permission) 164 | }; 165 | msg.tx.send(res).await.ok(); 166 | dump_updates(msg.rx); 167 | } 168 | ProviderMessage::ObserveRequestReceivedNotify(msg) => { 169 | println!("{:?}", msg.inner); 170 | dump_updates(msg.rx); 171 | } 172 | ProviderMessage::Throttle(msg) => { 173 | println!("{:?}", msg.inner); 174 | msg.tx.send(Ok(())).await.ok(); 175 | } 176 | } 177 | } 178 | }); 179 | (dump_task, tx) 180 | } 181 | 182 | #[tokio::main] 183 | async fn main() -> Result<()> { 184 | tracing_subscriber::fmt::init(); 185 | let args = Args::parse(); 186 | match args.command { 187 | Commands::Provide(args) => provide(args).await, 188 | Commands::Request(args) => request(args).await, 189 | } 190 | } 191 | 192 | async fn provide(args: ProvideArgs) -> anyhow::Result<()> { 193 | println!("{args:?}"); 194 | let tempdir = if args.common.path.is_none() { 195 | Some(tempfile::tempdir_in(".").context("Failed to create temporary directory")?) 196 | } else { 197 | None 198 | }; 199 | let path = args 200 | .common 201 | .path 202 | .unwrap_or_else(|| tempdir.as_ref().unwrap().path().to_path_buf()); 203 | let store = FsStore::load(&path).await?; 204 | println!("Using store at: {}", path.display()); 205 | let mut rng = match args.common.seed { 206 | Some(seed) => StdRng::seed_from_u64(seed), 207 | None => StdRng::from_rng(&mut rand::rng()), 208 | }; 209 | let blobs = create_random_blobs( 210 | &store, 211 | args.num_blobs, 212 | |_, rand| rand.random_range(1..=args.blob_size), 213 | &mut rng, 214 | ) 215 | .await?; 216 | let hs = add_hash_sequences( 217 | &store, 218 | &blobs, 219 | args.hash_seqs, 220 | |_, rand| rand.random_range(1..=args.hash_seq_size), 221 | &mut rng, 222 | ) 223 | .await?; 224 | println!( 225 | "Created {} blobs and {} hash sequences", 226 | blobs.len(), 227 | hs.len() 228 | ); 229 | for (i, info) in blobs.iter().enumerate() { 230 | println!("blob {i} {}", info.hash_and_format()); 231 | } 232 | for (i, info) in hs.iter().enumerate() { 233 | println!("hash_seq {i} {}", info.hash_and_format()); 234 | } 235 | let secret_key = get_or_generate_secret_key()?; 236 | let endpoint = iroh::Endpoint::builder() 237 | .secret_key(secret_key) 238 | .bind() 239 | .await?; 240 | let (dump_task, events_tx) = dump_provider_events(args.allow_push); 241 | let blobs = iroh_blobs::BlobsProtocol::new(&store, Some(events_tx)); 242 | let router = iroh::protocol::Router::builder(endpoint.clone()) 243 | .accept(iroh_blobs::ALPN, blobs) 244 | .spawn(); 245 | let addr = router.endpoint().addr(); 246 | let ticket = EndpointTicket::from(addr.clone()); 247 | println!("Node address: {addr:?}"); 248 | println!("ticket:\n{ticket}"); 249 | ctrl_c().await?; 250 | router.shutdown().await?; 251 | dump_task.abort(); 252 | Ok(()) 253 | } 254 | 255 | async fn request(args: RequestArgs) -> anyhow::Result<()> { 256 | println!("{args:?}"); 257 | let tempdir = if args.common.path.is_none() { 258 | Some(tempfile::tempdir_in(".").context("Failed to create temporary directory")?) 259 | } else { 260 | None 261 | }; 262 | let path = args 263 | .common 264 | .path 265 | .unwrap_or_else(|| tempdir.as_ref().unwrap().path().to_path_buf()); 266 | let store = FsStore::load(&path).await?; 267 | println!("Using store at: {}", path.display()); 268 | let sp = StaticProvider::new(); 269 | let endpoint = iroh::Endpoint::builder() 270 | .discovery(sp.clone()) 271 | .bind() 272 | .await?; 273 | let downloader = store.downloader(&endpoint); 274 | for ticket in &args.nodes { 275 | sp.add_endpoint_info(ticket.endpoint_addr().clone()); 276 | } 277 | let nodes = args 278 | .nodes 279 | .iter() 280 | .map(|ticket| ticket.endpoint_addr().id) 281 | .collect::>(); 282 | for content in args.content { 283 | let mut progress = downloader 284 | .download(content, Shuffled::new(nodes.clone())) 285 | .stream() 286 | .await?; 287 | while let Some(event) = progress.next().await { 288 | info!("Progress: {:?}", event); 289 | } 290 | } 291 | let hashes = store.list().hashes().await?; 292 | for hash in hashes { 293 | println!("Got {hash}"); 294 | } 295 | store.dump().await?; 296 | Ok(()) 297 | } 298 | -------------------------------------------------------------------------------- /src/store/fs/entry_state.rs: -------------------------------------------------------------------------------- 1 | use std::{fmt::Debug, path::PathBuf}; 2 | 3 | use serde::{Deserialize, Serialize}; 4 | use smallvec::SmallVec; 5 | 6 | use super::meta::{ActorError, ActorResult}; 7 | use crate::store::util::SliceInfoExt; 8 | 9 | /// Location of the data. 10 | /// 11 | /// Data can be inlined in the database, a file conceptually owned by the store, 12 | /// or a number of external files conceptually owned by the user. 13 | /// 14 | /// Only complete data can be inlined. 15 | #[derive(Debug, Serialize, Deserialize, PartialEq, Eq)] 16 | pub enum DataLocation { 17 | /// Data is in the inline_data table. 18 | Inline(I), 19 | /// Data is in the canonical location in the data directory. 20 | Owned(E), 21 | /// Data is in several external locations. This should be a non-empty list. 22 | External(Vec, E), 23 | } 24 | 25 | impl, E: Debug> DataLocation { 26 | fn fmt_short(&self) -> String { 27 | match self { 28 | DataLocation::Inline(d) => { 29 | format!("Inline({}, addr={})", d.as_ref().len(), d.addr_short()) 30 | } 31 | DataLocation::Owned(e) => format!("Owned({e:?})"), 32 | DataLocation::External(paths, e) => { 33 | let paths = paths.iter().map(|p| p.display()).collect::>(); 34 | format!("External({paths:?}, {e:?})") 35 | } 36 | } 37 | } 38 | } 39 | 40 | impl DataLocation { 41 | #[allow(clippy::result_large_err)] 42 | fn union(self, that: DataLocation) -> ActorResult { 43 | Ok(match (self, that) { 44 | ( 45 | DataLocation::External(mut paths, a_size), 46 | DataLocation::External(b_paths, b_size), 47 | ) => { 48 | if a_size != b_size { 49 | return Err(ActorError::inconsistent(format!( 50 | "complete size mismatch {a_size} {b_size}" 51 | ))); 52 | } 53 | paths.extend(b_paths); 54 | paths.sort(); 55 | paths.dedup(); 56 | DataLocation::External(paths, a_size) 57 | } 58 | (_, b @ DataLocation::Owned(_)) => { 59 | // owned needs to win, since it has an associated file. Choosing 60 | // external would orphan the file. 61 | b 62 | } 63 | (a @ DataLocation::Owned(_), _) => { 64 | // owned needs to win, since it has an associated file. Choosing 65 | // external would orphan the file. 66 | a 67 | } 68 | (_, b @ DataLocation::Inline(_)) => { 69 | // inline needs to win, since it has associated data. Choosing 70 | // external would orphan the file. 71 | b 72 | } 73 | (a @ DataLocation::Inline(_), _) => { 74 | // inline needs to win, since it has associated data. Choosing 75 | // external would orphan the file. 76 | a 77 | } 78 | }) 79 | } 80 | } 81 | 82 | impl DataLocation { 83 | #[allow(dead_code)] 84 | pub fn discard_inline_data(self) -> DataLocation<(), E> { 85 | match self { 86 | DataLocation::Inline(_) => DataLocation::Inline(()), 87 | DataLocation::Owned(x) => DataLocation::Owned(x), 88 | DataLocation::External(paths, x) => DataLocation::External(paths, x), 89 | } 90 | } 91 | 92 | pub fn split_inline_data(self) -> (DataLocation<(), E>, Option) { 93 | match self { 94 | DataLocation::Inline(x) => (DataLocation::Inline(()), Some(x)), 95 | DataLocation::Owned(x) => (DataLocation::Owned(x), None), 96 | DataLocation::External(paths, x) => (DataLocation::External(paths, x), None), 97 | } 98 | } 99 | } 100 | 101 | /// Location of the outboard. 102 | /// 103 | /// Outboard can be inlined in the database or a file conceptually owned by the store. 104 | /// Outboards are implementation specific to the store and as such are always owned. 105 | /// 106 | /// Only complete outboards can be inlined. 107 | #[derive(Debug, Serialize, Deserialize, PartialEq, Eq)] 108 | pub enum OutboardLocation { 109 | /// Outboard is in the inline_outboard table. 110 | Inline(I), 111 | /// Outboard is in the canonical location in the data directory. 112 | Owned, 113 | /// Outboard is not needed 114 | NotNeeded, 115 | } 116 | 117 | impl> OutboardLocation { 118 | fn fmt_short(&self) -> String { 119 | match self { 120 | OutboardLocation::Inline(d) => format!("Inline({})", d.as_ref().len()), 121 | OutboardLocation::Owned => "Owned".to_string(), 122 | OutboardLocation::NotNeeded => "NotNeeded".to_string(), 123 | } 124 | } 125 | } 126 | 127 | impl OutboardLocation { 128 | pub fn inline(data: I) -> Self 129 | where 130 | I: AsRef<[u8]>, 131 | { 132 | if data.as_ref().is_empty() { 133 | OutboardLocation::NotNeeded 134 | } else { 135 | OutboardLocation::Inline(data) 136 | } 137 | } 138 | 139 | #[allow(dead_code)] 140 | pub fn discard_extra_data(self) -> OutboardLocation<()> { 141 | match self { 142 | Self::Inline(_) => OutboardLocation::Inline(()), 143 | Self::Owned => OutboardLocation::Owned, 144 | Self::NotNeeded => OutboardLocation::NotNeeded, 145 | } 146 | } 147 | 148 | pub fn split_inline_data(self) -> (OutboardLocation<()>, Option) { 149 | match self { 150 | Self::Inline(x) => (OutboardLocation::Inline(()), Some(x)), 151 | Self::Owned => (OutboardLocation::Owned, None), 152 | Self::NotNeeded => (OutboardLocation::NotNeeded, None), 153 | } 154 | } 155 | } 156 | 157 | /// The information about an entry that we keep in the entry table for quick access. 158 | /// 159 | /// The exact info to store here is TBD, so usually you should use the accessor methods. 160 | #[derive(Debug, Serialize, Deserialize, PartialEq, Eq)] 161 | pub enum EntryState { 162 | /// For a complete entry we always know the size. It does not make much sense 163 | /// to write to a complete entry, so they are much easier to share. 164 | Complete { 165 | /// Location of the data. 166 | data_location: DataLocation, 167 | /// Location of the outboard. 168 | outboard_location: OutboardLocation, 169 | }, 170 | /// Partial entries are entries for which we know the hash, but don't have 171 | /// all the data. They are created when syncing from somewhere else by hash. 172 | /// 173 | /// As such they are always owned. There is also no inline storage for them. 174 | /// Non short lived partial entries always live in the file system, and for 175 | /// short lived ones we never create a database entry in the first place. 176 | Partial { 177 | /// Once we get the last chunk of a partial entry, we have validated 178 | /// the size of the entry despite it still being incomplete. 179 | /// 180 | /// E.g. a giant file where we just requested the last chunk. 181 | size: Option, 182 | }, 183 | } 184 | 185 | impl EntryState { 186 | pub fn is_complete(&self) -> bool { 187 | matches!(self, Self::Complete { .. }) 188 | } 189 | 190 | pub fn is_partial(&self) -> bool { 191 | matches!(self, Self::Partial { .. }) 192 | } 193 | } 194 | 195 | impl Default for EntryState { 196 | fn default() -> Self { 197 | Self::Partial { size: None } 198 | } 199 | } 200 | 201 | impl> EntryState { 202 | pub fn fmt_short(&self) -> String { 203 | match self { 204 | Self::Complete { 205 | data_location, 206 | outboard_location, 207 | } => format!( 208 | "Complete {{ data: {}, outboard: {} }}", 209 | data_location.fmt_short(), 210 | outboard_location.fmt_short() 211 | ), 212 | Self::Partial { size } => format!("Partial {{ size: {size:?} }}"), 213 | } 214 | } 215 | } 216 | 217 | impl EntryState { 218 | #[allow(clippy::result_large_err)] 219 | pub fn union(old: Self, new: Self) -> ActorResult { 220 | match (old, new) { 221 | ( 222 | Self::Complete { 223 | data_location, 224 | outboard_location, 225 | }, 226 | Self::Complete { 227 | data_location: b_data_location, 228 | .. 229 | }, 230 | ) => Ok(Self::Complete { 231 | // combine external paths if needed 232 | data_location: data_location.union(b_data_location)?, 233 | outboard_location, 234 | }), 235 | (a @ Self::Complete { .. }, Self::Partial { .. }) => 236 | // complete wins over partial 237 | { 238 | Ok(a) 239 | } 240 | (Self::Partial { .. }, b @ Self::Complete { .. }) => 241 | // complete wins over partial 242 | { 243 | Ok(b) 244 | } 245 | (Self::Partial { size: a_size }, Self::Partial { size: b_size }) => 246 | // keep known size from either entry 247 | { 248 | let size = match (a_size, b_size) { 249 | (Some(a_size), Some(b_size)) => { 250 | // validated sizes are different. this means that at 251 | // least one validation was wrong, which would be a bug 252 | // in bao-tree. 253 | if a_size != b_size { 254 | return Err(ActorError::inconsistent(format!( 255 | "validated size mismatch {a_size} {b_size}" 256 | ))); 257 | } 258 | Some(a_size) 259 | } 260 | (Some(a_size), None) => Some(a_size), 261 | (None, Some(b_size)) => Some(b_size), 262 | (None, None) => None, 263 | }; 264 | Ok(Self::Partial { size }) 265 | } 266 | } 267 | } 268 | } 269 | 270 | impl redb::Value for EntryState { 271 | type SelfType<'a> = EntryState; 272 | 273 | type AsBytes<'a> = SmallVec<[u8; 128]>; 274 | 275 | fn fixed_width() -> Option { 276 | None 277 | } 278 | 279 | fn from_bytes<'a>(data: &'a [u8]) -> Self::SelfType<'a> 280 | where 281 | Self: 'a, 282 | { 283 | postcard::from_bytes(data).unwrap() 284 | } 285 | 286 | fn as_bytes<'a, 'b: 'a>(value: &'a Self::SelfType<'b>) -> Self::AsBytes<'a> 287 | where 288 | Self: 'a, 289 | Self: 'b, 290 | { 291 | postcard::to_extend(value, SmallVec::new()).unwrap() 292 | } 293 | 294 | fn type_name() -> redb::TypeName { 295 | redb::TypeName::new("EntryState") 296 | } 297 | } 298 | -------------------------------------------------------------------------------- /LICENSE-APACHE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [2023] [N0, INC] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | --------------------------------------------------------------------------------