├── docs └── willow-over-quic.md ├── release.toml ├── .gitignore ├── .gitattributes ├── src ├── rpc.rs ├── util │ ├── time.rs │ ├── codec2.rs │ ├── queue.rs │ ├── codec.rs │ ├── gen_stream.rs │ ├── stream.rs │ └── pipe.rs ├── lib.rs ├── proto.rs ├── util.rs ├── proto │ ├── wgps.rs │ ├── wgps │ │ ├── challenge.rs │ │ ├── handles.rs │ │ ├── fingerprint.rs │ │ └── channels.rs │ ├── pai.rs │ ├── grouping.rs │ └── meadowcap.rs ├── session │ ├── static_tokens.rs │ ├── challenge.rs │ ├── resource.rs │ ├── channels.rs │ ├── capabilities.rs │ ├── payload.rs │ ├── error.rs │ ├── data.rs │ └── aoi_finder.rs ├── store.rs ├── session.rs ├── form.rs ├── store │ ├── persistent │ │ └── tables.rs │ └── willow_store_glue.rs └── engine.rs ├── .github ├── workflows │ ├── commit.yml │ ├── cleanup.yaml │ ├── beta.yaml │ ├── netsim.yml │ ├── docs.yaml │ ├── test_relay_server.yml │ ├── flaky.yaml │ ├── docker.yaml │ ├── tests.yaml │ ├── ci.yml │ └── netsim_runner.yaml ├── dependabot.yml ├── ansible │ └── redeploy-relay.yml └── pull_request_template.md ├── proptest-regressions └── store │ └── glue.txt ├── .typos.toml ├── .config └── nextest.toml ├── Makefile.toml ├── LICENSE-MIT ├── deny.toml ├── .img └── iroh_wordmark.svg ├── code_of_conduct.md ├── cliff.toml ├── README.md ├── Cargo.toml └── CONTRIBUTING.md /docs/willow-over-quic.md: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /release.toml: -------------------------------------------------------------------------------- 1 | tag-prefix = "" 2 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | iroh.config.toml 3 | -------------------------------------------------------------------------------- /.gitattributes: -------------------------------------------------------------------------------- 1 | # Set line endings to LF, even on Windows. Otherwise, execution within CI fails. 2 | # See https://help.github.com/articles/dealing-with-line-endings/ 3 | *.sh text eol=lf -------------------------------------------------------------------------------- /src/rpc.rs: -------------------------------------------------------------------------------- 1 | pub mod client; 2 | pub mod handler; 3 | pub mod proto; 4 | 5 | type RpcClient> = 6 | quic_rpc::RpcClient; 7 | -------------------------------------------------------------------------------- /src/util/time.rs: -------------------------------------------------------------------------------- 1 | use std::time::SystemTime; 2 | 3 | /// Returns the current system time in microseconds since [`SystemTime::UNIX_EPOCH`]. 4 | pub fn system_time_now() -> u64 { 5 | SystemTime::now() 6 | .duration_since(SystemTime::UNIX_EPOCH) 7 | .expect("time drift") 8 | .as_micros() as u64 9 | } 10 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | //! Implementation of willow 2 | 3 | #![allow(missing_docs)] 4 | #![deny(unsafe_code)] 5 | 6 | pub mod engine; 7 | pub mod form; 8 | pub mod interest; 9 | pub(crate) mod net; 10 | pub mod proto; 11 | pub mod rpc; 12 | pub mod session; 13 | pub mod store; 14 | pub mod util; 15 | 16 | pub use engine::Engine; 17 | pub use net::ALPN; 18 | -------------------------------------------------------------------------------- /src/proto.rs: -------------------------------------------------------------------------------- 1 | //! Protocol data types used in willow. 2 | //! 3 | //! These are mostly type aliases onto [`willow-rs`] types, with some additional helpers. 4 | //! 5 | //! This module also contains the crypthographic primitives for fingerprints and private area 6 | //! intersection. 7 | 8 | pub mod data_model; 9 | pub mod grouping; 10 | pub mod keys; 11 | pub mod meadowcap; 12 | pub mod pai; 13 | pub mod wgps; 14 | -------------------------------------------------------------------------------- /.github/workflows/commit.yml: -------------------------------------------------------------------------------- 1 | name: Commits 2 | 3 | on: 4 | pull_request: 5 | branches: [main] 6 | types: [opened, edited, synchronize] 7 | 8 | env: 9 | IROH_FORCE_STAGING_RELAYS: "1" 10 | 11 | jobs: 12 | check-for-cc: 13 | runs-on: ubuntu-latest 14 | steps: 15 | - name: check-for-cc 16 | id: check-for-cc 17 | uses: agenthunt/conventional-commit-checker-action@v2.0.1 18 | with: 19 | pr-title-regex: "^(.+)(?:(([^)s]+)))?!?: (.+)" 20 | -------------------------------------------------------------------------------- /proptest-regressions/store/glue.txt: -------------------------------------------------------------------------------- 1 | # Seeds for failure cases proptest has generated in the past. It is 2 | # automatically read and these particular cases re-run before any 3 | # novel cases are generated. 4 | # 5 | # It is recommended to check this file in to source control so that 6 | # everyone who runs the test benefits from these saved cases. 7 | cc fce9116f574b43cdad834dcc3e61661a64a69c6b465cc1e453d7bd9875397b24 # shrinks to input = _PropStoredTimestampOrdMatchesU64OrdArgs { num: 11844874737747751936, other: 1 } 8 | -------------------------------------------------------------------------------- /.typos.toml: -------------------------------------------------------------------------------- 1 | [files] 2 | # We can't change the `CHANGELOG` without changing old commit message 3 | extend-exclude = [ 4 | "CHANGELOG_old.md", 5 | "CHANGELOG.md" 6 | ] 7 | 8 | [default] 9 | extend-ignore-re = [ 10 | # Line ignore with trailing `// spellchecker:disable-line` 11 | "(?Rm)^.*(#|//)\\s*spellchecker:disable-line$", 12 | 13 | # Abbreviations 14 | "UPnP", 15 | 16 | # Library name 17 | "tung", 18 | 19 | # Hex 20 | "ba", 21 | ] 22 | 23 | [default.extend-words] 24 | # Library name 25 | ratatui = "ratatui" 26 | -------------------------------------------------------------------------------- /.config/nextest.toml: -------------------------------------------------------------------------------- 1 | [test-groups] 2 | run-in-isolation = { max-threads = 32 } 3 | # these are tests that must not run with other tests concurrently. All tests in 4 | # this group can take up at most 32 threads among them, but each one requiring 5 | # 16 threads also. The effect should be that tests run isolated. 6 | 7 | [[profile.ci.overrides]] 8 | filter = 'test(::run_in_isolation::)' 9 | test-group = 'run-in-isolation' 10 | threads-required = 32 11 | 12 | [profile.default] 13 | slow-timeout = { period = "20s", terminate-after = 3 } 14 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | # Keep GitHub Actions up to date with GitHub's Dependabot... 2 | # https://docs.github.com/en/code-security/dependabot/working-with-dependabot/keeping-your-actions-up-to-date-with-dependabot 3 | # https://docs.github.com/en/code-security/dependabot/dependabot-version-updates/configuration-options-for-the-dependabot.yml-file#package-ecosystem 4 | version: 2 5 | updates: 6 | - package-ecosystem: github-actions 7 | directory: / 8 | groups: 9 | github-actions: 10 | patterns: 11 | - "*" # Group all Actions updates into a single larger pull request 12 | schedule: 13 | interval: weekly 14 | -------------------------------------------------------------------------------- /.github/ansible/redeploy-relay.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Update iroh-relay node 3 | hosts: derper 4 | become: yes 5 | 6 | tasks: 7 | - name: Fetch iroh-relay binary 8 | get_url: 9 | url: https://vorc.s3.us-east-2.amazonaws.com/iroh-relay-linux-amd64-{{ relay_version }} 10 | mode: '0755' 11 | force: yes 12 | dest: /usr/local/bin/iroh-relay 13 | - name: Allow ports 14 | shell: 15 | cmd: sudo setcap CAP_NET_BIND_SERVICE=+eip /usr/local/bin/iroh-relay 16 | - name: Make sure iroh-relay is started 17 | ansible.builtin.systemd: 18 | state: restarted 19 | enabled: yes 20 | name: iroh-relay 21 | -------------------------------------------------------------------------------- /.github/pull_request_template.md: -------------------------------------------------------------------------------- 1 | ## Description 2 | 3 | 4 | 5 | ## Breaking Changes 6 | 7 | 8 | 9 | ## Notes & open questions 10 | 11 | 12 | 13 | ## Change checklist 14 | 15 | - [ ] Self-review. 16 | - [ ] Documentation updates following the [style guide](https://rust-lang.github.io/rfcs/1574-more-api-documentation-conventions.html#appendix-a-full-conventions-text), if relevant. 17 | - [ ] Tests if relevant. 18 | - [ ] All breaking changes documented. 19 | -------------------------------------------------------------------------------- /src/util.rs: -------------------------------------------------------------------------------- 1 | //! Various utilities and data structures used in this crate. 2 | 3 | pub mod channel; 4 | pub mod codec; 5 | pub mod codec2; 6 | pub mod gen_stream; 7 | pub mod pipe; 8 | pub mod queue; 9 | pub mod stream; 10 | pub mod time; 11 | 12 | /// Increment a fixed-length byte string by one, by incrementing the last byte that is not 255 by one. 13 | /// 14 | /// Returns None if all bytes are 255. 15 | pub fn increment_by_one(value: &[u8; N]) -> Option<[u8; N]> { 16 | let mut out = *value; 17 | for char in out.iter_mut().rev() { 18 | if *char != 255 { 19 | *char += 1; 20 | return Some(out); 21 | } else { 22 | *char = 0; 23 | } 24 | } 25 | None 26 | } 27 | -------------------------------------------------------------------------------- /src/proto/wgps.rs: -------------------------------------------------------------------------------- 1 | //! Types and helpers for the Willow General Purpose Sync protocol. 2 | 3 | mod challenge; 4 | mod channels; 5 | mod fingerprint; 6 | mod handles; 7 | mod messages; 8 | 9 | pub use challenge::*; 10 | pub use channels::*; 11 | pub use fingerprint::*; 12 | pub use handles::*; 13 | pub use messages::*; 14 | 15 | pub const MAX_PAYLOAD_SIZE_POWER: u8 = 18; 16 | 17 | /// The maximum payload size limits when the other peer may include Payloads directly when transmitting Entries: 18 | /// when an Entry’s payload_length is strictly greater than the maximum payload size, 19 | /// its Payload may only be transmitted when explicitly requested. 20 | /// 21 | /// The value is 256KiB. 22 | pub const MAX_PAYLOAD_SIZE: usize = 2usize.pow(MAX_PAYLOAD_SIZE_POWER as u32); 23 | -------------------------------------------------------------------------------- /Makefile.toml: -------------------------------------------------------------------------------- 1 | # Use cargo-make to run tasks here: https://crates.io/crates/cargo-make 2 | 3 | [tasks.format] 4 | workspace = false 5 | command = "cargo" 6 | args = [ 7 | "fmt", 8 | "--all", 9 | "--", 10 | "--config", 11 | "unstable_features=true", 12 | "--config", 13 | "imports_granularity=Crate,group_imports=StdExternalCrate,reorder_imports=true,format_code_in_doc_comments=true", 14 | ] 15 | 16 | [tasks.format-check] 17 | workspace = false 18 | command = "cargo" 19 | args = [ 20 | "fmt", 21 | "--all", 22 | "--check", 23 | "--", 24 | "--config", 25 | "unstable_features=true", 26 | "--config", 27 | "imports_granularity=Crate,group_imports=StdExternalCrate,reorder_imports=true,format_code_in_doc_comments=true", 28 | ] 29 | -------------------------------------------------------------------------------- /LICENSE-MIT: -------------------------------------------------------------------------------- 1 | Copyright 2023 N0, INC. 2 | 3 | Permission is hereby granted, free of charge, to any 4 | person obtaining a copy of this software and associated 5 | documentation files (the "Software"), to deal in the 6 | Software without restriction, including without 7 | limitation the rights to use, copy, modify, merge, 8 | publish, distribute, sublicense, and/or sell copies of 9 | the Software, and to permit persons to whom the Software 10 | is furnished to do so, subject to the following 11 | conditions: 12 | 13 | The above copyright notice and this permission notice 14 | shall be included in all copies or substantial portions 15 | of the Software. 16 | 17 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF 18 | ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED 19 | TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A 20 | PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT 21 | SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY 22 | CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 23 | OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR 24 | IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 25 | DEALINGS IN THE SOFTWARE. 26 | -------------------------------------------------------------------------------- /.github/workflows/cleanup.yaml: -------------------------------------------------------------------------------- 1 | # Run tests using the beta Rust compiler 2 | 3 | name: Cleanup 4 | 5 | on: 6 | schedule: 7 | # 06:50 UTC every Monday 8 | - cron: '50 6 * * 1' 9 | workflow_dispatch: 10 | 11 | concurrency: 12 | group: beta-${{ github.workflow }}-${{ github.ref }} 13 | cancel-in-progress: true 14 | 15 | env: 16 | IROH_FORCE_STAGING_RELAYS: "1" 17 | 18 | jobs: 19 | clean_docs_branch: 20 | permissions: 21 | issues: write 22 | contents: write 23 | runs-on: ubuntu-latest 24 | steps: 25 | - name: Checkout 26 | uses: actions/checkout@v6 27 | with: 28 | ref: generated-docs-preview 29 | - name: Clean docs branch 30 | run: | 31 | cd pr/ 32 | # keep the last 25 prs 33 | dirs=$(ls -1d [0-9]* | sort -n) 34 | total_dirs=$(echo "$dirs" | wc -l) 35 | dirs_to_remove=$(echo "$dirs" | head -n $(($total_dirs - 25))) 36 | if [ -n "$dirs_to_remove" ]; then 37 | echo "$dirs_to_remove" | xargs rm -rf 38 | fi 39 | git add . 40 | git commit -m "Cleanup old docs" 41 | git push 42 | 43 | 44 | 45 | 46 | -------------------------------------------------------------------------------- /src/util/codec2.rs: -------------------------------------------------------------------------------- 1 | use std::convert::Infallible; 2 | 3 | use ufotofu::sync::{consumer::IntoVec, producer::FromSlice}; 4 | use willow_encoding::{ 5 | sync::{Decodable, Encodable, RelativeDecodable, RelativeEncodable}, 6 | DecodeError, 7 | }; 8 | 9 | pub fn from_bytes(data: &[u8]) -> Result> { 10 | let mut producer = FromSlice::new(data); 11 | let decoded = T::decode(&mut producer)?; 12 | Ok(decoded) 13 | } 14 | 15 | pub fn to_vec(item: &T) -> Vec { 16 | let mut consumer = IntoVec::new(); 17 | item.encode(&mut consumer).expect("infallible"); 18 | consumer.into_vec() 19 | } 20 | 21 | pub fn from_bytes_relative, U>( 22 | previous: &U, 23 | data: &[u8], 24 | ) -> Result> { 25 | let mut producer = FromSlice::new(data); 26 | let decoded = T::relative_decode(previous, &mut producer)?; 27 | Ok(decoded) 28 | } 29 | 30 | pub fn to_vec_relative, U>(previous: &U, item: &T) -> Vec { 31 | let mut consumer = IntoVec::new(); 32 | item.relative_encode(previous, &mut consumer) 33 | .expect("infallible"); 34 | consumer.into_vec() 35 | } 36 | -------------------------------------------------------------------------------- /deny.toml: -------------------------------------------------------------------------------- 1 | [bans] 2 | multiple-versions = "allow" 3 | deny = [ 4 | "aws-lc", 5 | "aws-lc-rs", 6 | "aws-lc-sys", 7 | "native-tls", 8 | "openssl", 9 | ] 10 | 11 | [licenses] 12 | allow = [ 13 | "Apache-2.0", 14 | "Apache-2.0 WITH LLVM-exception", 15 | "BSD-2-Clause", 16 | "BSD-3-Clause", 17 | "BSL-1.0", # BOSL license 18 | "ISC", 19 | "MIT", 20 | "Zlib", 21 | "MPL-2.0", # https://fossa.com/blog/open-source-software-licenses-101-mozilla-public-license-2-0/ 22 | "CC-PDDC", # https://spdx.org/licenses/CC-PDDC.html 23 | "Unicode-3.0", 24 | ] 25 | 26 | [[licenses.clarify]] 27 | name = "ring" 28 | expression = "MIT AND ISC AND OpenSSL" 29 | license-files = [ 30 | { path = "LICENSE", hash = 0xbd0eed23 }, 31 | ] 32 | 33 | [advisories] 34 | ignore = [ 35 | "RUSTSEC-2024-0370", # unmaintained, no upgrade available 36 | "RUSTSEC-2024-0384", # unmaintained, no upgrade available 37 | "RUSTSEC-2024-0436", # unmaintained (dtolnay/paste) 38 | ] 39 | 40 | [sources] 41 | allow-git = [ 42 | "https://github.com/n0-computer/iroh-blobs.git", 43 | ] 44 | 45 | # TODO(Frando): added for iroh-willow development, maybe remove again before release? 46 | [sources.allow-org] 47 | github = ["n0-computer"] 48 | -------------------------------------------------------------------------------- /.github/workflows/beta.yaml: -------------------------------------------------------------------------------- 1 | # Run tests using the beta Rust compiler 2 | 3 | name: Beta Rust 4 | 5 | on: 6 | schedule: 7 | # 06:50 UTC every Monday 8 | - cron: '50 6 * * 1' 9 | workflow_dispatch: 10 | 11 | concurrency: 12 | group: beta-${{ github.workflow }}-${{ github.ref }} 13 | cancel-in-progress: true 14 | 15 | env: 16 | IROH_FORCE_STAGING_RELAYS: "1" 17 | 18 | jobs: 19 | tests: 20 | uses: './.github/workflows/tests.yaml' 21 | with: 22 | rust-version: beta 23 | notify: 24 | needs: tests 25 | if: ${{ always() }} 26 | runs-on: ubuntu-latest 27 | steps: 28 | - name: Extract test results 29 | run: | 30 | printf '${{ toJSON(needs) }}\n' 31 | result=$(echo '${{ toJSON(needs) }}' | jq -r .tests.result) 32 | echo TESTS_RESULT=$result 33 | echo "TESTS_RESULT=$result" >>"$GITHUB_ENV" 34 | - name: Notify discord on failure 35 | uses: n0-computer/discord-webhook-notify@v1 36 | if: ${{ env.TESTS_RESULT == 'failure' }} 37 | with: 38 | severity: error 39 | details: | 40 | Rustc beta tests failed 41 | See https://github.com/${{ github.repository }}/actions/workflows/beta.yaml 42 | webhookUrl: ${{ secrets.DISCORD_N0_GITHUB_CHANNEL_WEBHOOK_URL }} 43 | 44 | -------------------------------------------------------------------------------- /.img/iroh_wordmark.svg: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /src/proto/wgps/challenge.rs: -------------------------------------------------------------------------------- 1 | use iroh_blobs::Hash; 2 | use rand::Rng; 3 | use rand_core::CryptoRngCore; 4 | use serde::{Deserialize, Serialize}; 5 | 6 | use crate::proto::data_model::DIGEST_LENGTH; 7 | 8 | pub const CHALLENGE_LENGTH: usize = 32; 9 | pub const CHALLENGE_HASH_LENGTH: usize = DIGEST_LENGTH; 10 | 11 | #[derive(derive_more::Debug, Copy, Clone, Eq, PartialEq, Serialize, Deserialize)] 12 | pub struct ChallengeHash( 13 | #[debug("{}..", data_encoding::HEXLOWER.encode(&self.0))] [u8; CHALLENGE_HASH_LENGTH], 14 | ); 15 | 16 | impl ChallengeHash { 17 | pub fn as_bytes(&self) -> &[u8] { 18 | &self.0 19 | } 20 | 21 | pub fn from_bytes(bytes: [u8; CHALLENGE_HASH_LENGTH]) -> Self { 22 | Self(bytes) 23 | } 24 | } 25 | 26 | #[derive(derive_more::Debug, Copy, Clone, Serialize, Deserialize, Eq, PartialEq)] 27 | pub struct AccessChallenge( 28 | #[debug("{}..", data_encoding::HEXLOWER.encode(&self.0))] AccessChallengeBytes, 29 | ); 30 | 31 | pub type AccessChallengeBytes = [u8; CHALLENGE_LENGTH]; 32 | 33 | impl Default for AccessChallenge { 34 | fn default() -> Self { 35 | Self::generate() 36 | } 37 | } 38 | 39 | impl AccessChallenge { 40 | pub fn generate() -> Self { 41 | Self(rand::random()) 42 | } 43 | 44 | pub fn generate_with_rng(rng: &mut impl CryptoRngCore) -> Self { 45 | Self(rng.gen()) 46 | } 47 | 48 | pub fn as_bytes(&self) -> &[u8] { 49 | &self.0 50 | } 51 | 52 | pub fn to_bytes(&self) -> [u8; 32] { 53 | self.0 54 | } 55 | 56 | pub fn hash(&self) -> ChallengeHash { 57 | ChallengeHash(*Hash::new(self.0).as_bytes()) 58 | } 59 | } 60 | -------------------------------------------------------------------------------- /.github/workflows/netsim.yml: -------------------------------------------------------------------------------- 1 | name: netsim-CI 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | workflow_dispatch: 8 | inputs: 9 | pr_number: 10 | required: true 11 | type: string 12 | branch: 13 | required: true 14 | type: string 15 | netsim_branch: 16 | required: true 17 | type: string 18 | default: "main" 19 | report_table: 20 | required: false 21 | type: boolean 22 | default: false 23 | 24 | env: 25 | RUST_BACKTRACE: 1 26 | RUSTFLAGS: -Dwarnings 27 | MSRV: "1.66" 28 | SCCACHE_GHA_ENABLED: "true" 29 | RUSTC_WRAPPER: "sccache" 30 | IROH_FORCE_STAGING_RELAYS: "1" 31 | 32 | jobs: 33 | netsim-release: 34 | permissions: write-all 35 | if: ${{github.ref_name=='main' && github.event_name == 'push'}} 36 | uses: './.github/workflows/netsim_runner.yaml' 37 | secrets: inherit 38 | with: 39 | branch: "main" 40 | max_workers: 1 41 | netsim_branch: "main" 42 | sim_paths: "sims/iroh,sims/integration" 43 | pr_number: "" 44 | publish_metrics: true 45 | build_profile: "optimized-release" 46 | 47 | netsim-perf: 48 | permissions: write-all 49 | if: ${{github.event_name != 'push'}} 50 | uses: './.github/workflows/netsim_runner.yaml' 51 | secrets: inherit 52 | with: 53 | branch: ${{inputs.branch}} 54 | max_workers: 1 55 | netsim_branch: ${{inputs.netsim_branch}} 56 | sim_paths: "sims/iroh" 57 | pr_number: ${{inputs.pr_number}} 58 | publish_metrics: false 59 | build_profile: "optimized-release" 60 | report_table: ${{inputs.report_table}} 61 | -------------------------------------------------------------------------------- /src/util/queue.rs: -------------------------------------------------------------------------------- 1 | //! A simple asynchronous queue. 2 | 3 | use std::{ 4 | collections::VecDeque, 5 | pin::Pin, 6 | task::{Poll, Waker}, 7 | }; 8 | 9 | use futures_lite::Stream; 10 | 11 | /// A simple unbounded FIFO queue. 12 | /// 13 | /// Values are pushed into the queue, synchronously. 14 | /// The queue can be polled for the next value asynchronously. 15 | #[derive(Debug)] 16 | pub struct Queue { 17 | items: VecDeque, 18 | wakers: VecDeque, 19 | } 20 | 21 | impl Default for Queue { 22 | fn default() -> Self { 23 | Self { 24 | items: Default::default(), 25 | wakers: Default::default(), 26 | } 27 | } 28 | } 29 | 30 | impl Queue { 31 | /// Push a new item to the back of the queue. 32 | pub fn push_back(&mut self, pair: T) { 33 | self.items.push_back(pair); 34 | for waker in self.wakers.drain(..) { 35 | waker.wake(); 36 | } 37 | } 38 | 39 | /// Attempt to pop the next item from the front of the queue. 40 | /// 41 | /// Returns [`Poll::Pending`] if no items are currently in the queue. 42 | pub fn poll_pop_front(&mut self, cx: &mut std::task::Context<'_>) -> Poll> { 43 | if let Some(item) = self.items.pop_front() { 44 | Poll::Ready(Some(item)) 45 | } else { 46 | self.wakers.push_back(cx.waker().to_owned()); 47 | Poll::Pending 48 | } 49 | } 50 | } 51 | 52 | impl Stream for Queue { 53 | type Item = T; 54 | fn poll_next( 55 | self: Pin<&mut Self>, 56 | cx: &mut std::task::Context<'_>, 57 | ) -> Poll> { 58 | Self::poll_pop_front(self.get_mut(), cx) 59 | } 60 | } 61 | -------------------------------------------------------------------------------- /code_of_conduct.md: -------------------------------------------------------------------------------- 1 | # Code of Conduct 2 | 3 | Online or off, Number Zero is a harassment-free environment for everyone, regardless of gender, gender identity and expression, sexual orientation, disability, physical appearance, body size, race, age or religion or technical skill level. We do not tolerate harassment of participants in any form. 4 | 5 | Harassment includes verbal comments that reinforce social structures of domination related to gender, gender identity and expression, sexual orientation, disability, physical appearance, body size, race, age, religion, sexual images in public spaces, deliberate intimidation, stalking, following, harassing photography or recording, sustained disruption of talks or other events, inappropriate physical contact, and unwelcome sexual attention. Participants asked to stop any harassing behavior are expected to comply immediately. 6 | 7 | If a participant engages in harassing behaviour, the organizers may take any action they deem appropriate, including warning the offender or expulsion from events and online forums. 8 | 9 | If you are being harassed, notice that someone else is being harassed, or have any other concerns, please contact a member of the organizing team immediately. 10 | 11 | At offline events, organizers will identify themselves, and will help participants contact venue security or local law enforcement, provide escorts, or otherwise assist those experiencing harassment to feel safe for the duration of the event. We value your participation! 12 | 13 | This document is based on a similar code from [EDGI](https://envirodatagov.org/) and [Civic Tech Toronto](http://civictech.ca/about-us/), itself derived from the [Recurse Center’s Social Rules](https://www.recurse.com/manual#sec-environment), and the [anti-harassment policy from the Geek Feminism Wiki](http://geekfeminism.wikia.com/wiki/Conference_anti-harassment/Policy). 14 | -------------------------------------------------------------------------------- /src/session/static_tokens.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | cell::RefCell, 3 | future::poll_fn, 4 | rc::Rc, 5 | task::{ready, Poll}, 6 | }; 7 | 8 | use crate::{ 9 | proto::{ 10 | data_model::{AuthorisationToken, AuthorisedEntry, Entry}, 11 | wgps::{DynamicToken, SetupBindStaticToken, StaticToken, StaticTokenHandle}, 12 | }, 13 | session::{channels::ChannelSenders, resource::ResourceMap, Error}, 14 | }; 15 | 16 | #[derive(Debug, Clone, Default)] 17 | pub struct StaticTokens(Rc>); 18 | 19 | #[derive(Debug, Default)] 20 | struct Inner { 21 | ours: ResourceMap, 22 | theirs: ResourceMap, 23 | } 24 | 25 | impl StaticTokens { 26 | pub fn bind_theirs(&self, token: StaticToken) { 27 | self.0.borrow_mut().theirs.bind(token); 28 | } 29 | 30 | pub async fn bind_and_send_ours( 31 | &self, 32 | static_token: StaticToken, 33 | send: &ChannelSenders, 34 | ) -> Result { 35 | let (handle, is_new) = { self.0.borrow_mut().ours.bind_if_new(static_token.clone()) }; 36 | if is_new { 37 | let msg = SetupBindStaticToken { static_token }; 38 | send.send(msg).await?; 39 | } 40 | Ok(handle) 41 | } 42 | 43 | pub async fn authorise_entry_eventually( 44 | &self, 45 | entry: Entry, 46 | static_token_handle: StaticTokenHandle, 47 | dynamic_token: DynamicToken, 48 | ) -> Result { 49 | let inner = self.0.clone(); 50 | let static_token = poll_fn(move |cx| { 51 | let mut inner = inner.borrow_mut(); 52 | let token = ready!(inner.theirs.poll_get_eventually(static_token_handle, cx)); 53 | Poll::Ready(token.clone()) 54 | }) 55 | .await; 56 | 57 | let token = AuthorisationToken::new(static_token.0, dynamic_token); 58 | let authorised_entry = AuthorisedEntry::new(entry, token)?; 59 | Ok(authorised_entry) 60 | } 61 | } 62 | -------------------------------------------------------------------------------- /src/util/codec.rs: -------------------------------------------------------------------------------- 1 | //! Traits for encoding and decoding values to and from bytes. 2 | 3 | use std::{fmt, io}; 4 | 5 | /// Trait for encoding values into bytes. 6 | pub trait Encoder: fmt::Debug { 7 | /// Returns the length (in bytes) of the encoded value. 8 | fn encoded_len(&self) -> usize; 9 | 10 | /// Encode [`Self`] into a writable buffer which implements `io::Write`. 11 | fn encode_into(&self, out: &mut W) -> anyhow::Result<()>; 12 | 13 | /// Encode [`Self`] into a vector of bytes. 14 | fn encode(&self) -> anyhow::Result> { 15 | let mut out = Vec::with_capacity(self.encoded_len()); 16 | self.encode_into(&mut out)?; 17 | Ok(out) 18 | } 19 | } 20 | 21 | /// Trait for decoding values from bytes. 22 | pub trait Decoder: Sized { 23 | /// Decode [`Self`] from a byte slice. 24 | fn decode_from(data: &[u8]) -> anyhow::Result>; 25 | } 26 | 27 | /// The outcome of [`Decoder::decode_from`] 28 | #[derive(Debug)] 29 | pub enum DecodeOutcome { 30 | /// Not enough data to decode the value. 31 | NeedMoreData, 32 | /// Decoded a value. 33 | Decoded { 34 | /// The decoded value. 35 | item: T, 36 | /// The number of bytes used for decoding the value. 37 | consumed: usize, 38 | }, 39 | } 40 | 41 | pub fn compact_width(value: u64) -> u8 { 42 | if value < 256 { 43 | 1 44 | } else if value < 256u64.pow(2) { 45 | 2 46 | } else if value < 256u64.pow(4) { 47 | 4 48 | } else { 49 | 8 50 | } 51 | } 52 | 53 | #[derive(Debug, Clone, Copy)] 54 | pub struct CompactWidth(pub u64); 55 | 56 | impl CompactWidth { 57 | fn len(self) -> u8 { 58 | compact_width(self.0) 59 | } 60 | } 61 | 62 | impl Encoder for CompactWidth { 63 | fn encoded_len(&self) -> usize { 64 | self.len() as usize 65 | } 66 | 67 | fn encode_into(&self, out: &mut W) -> anyhow::Result<()> { 68 | match self.len() { 69 | 1 => out.write_all(&(self.0 as u8).to_be_bytes())?, 70 | 2 => out.write_all(&(self.0 as u16).to_be_bytes())?, 71 | 4 => out.write_all(&(self.0 as u32).to_be_bytes())?, 72 | 8 => out.write_all(&self.0.to_be_bytes())?, 73 | _ => unreachable!("len is always one of the above"), 74 | }; 75 | Ok(()) 76 | } 77 | } 78 | -------------------------------------------------------------------------------- /src/util/gen_stream.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | future::Future, 3 | pin::Pin, 4 | task::{Context, Poll}, 5 | }; 6 | 7 | use futures_lite::Stream; 8 | use genawaiter::{ 9 | rc::{Co, Gen}, 10 | GeneratorState, 11 | }; 12 | 13 | /// Wraps a [`Gen`] into a [`Stream`]. 14 | /// 15 | /// The stream yields the items yielded by the generator. 16 | /// The generator's final output can be retrieved via [`Self::final_output`]. 17 | #[derive(derive_more::Debug)] 18 | pub struct GenStream 19 | where 20 | Fut: Future>, 21 | { 22 | #[debug("Gen")] 23 | gen: Gen, 24 | is_complete: bool, 25 | final_output: Option, 26 | } 27 | 28 | impl GenStream 29 | where 30 | Fut: Future>, 31 | { 32 | pub fn new(producer: impl FnOnce(Co) -> Fut) -> Self { 33 | Self::from_gen(Gen::new(producer)) 34 | } 35 | 36 | pub fn from_gen(gen: Gen) -> Self { 37 | Self { 38 | gen, 39 | is_complete: false, 40 | final_output: None, 41 | } 42 | } 43 | 44 | pub fn final_output(self) -> Option { 45 | self.final_output 46 | } 47 | } 48 | 49 | impl Stream for GenStream 50 | where 51 | Fut: Future>, 52 | FinalOutput: Unpin, 53 | { 54 | type Item = Result; 55 | 56 | fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { 57 | if self.is_complete { 58 | return Poll::Ready(None); 59 | } 60 | let (item, final_output) = { 61 | let mut fut = self.gen.async_resume(); 62 | let out = std::task::ready!(Pin::new(&mut fut).poll(cx)); 63 | match out { 64 | GeneratorState::Yielded(output) => (Some(Ok(output)), None), 65 | GeneratorState::Complete(Ok(final_output)) => (None, Some(final_output)), 66 | GeneratorState::Complete(Err(err)) => (Some(Err(err)), None), 67 | } 68 | }; 69 | if matches!(item, None | Some(Err(_))) { 70 | self.is_complete = true; 71 | } 72 | if let Some(final_output) = final_output { 73 | self.final_output = Some(final_output); 74 | }; 75 | Poll::Ready(item) 76 | } 77 | } 78 | -------------------------------------------------------------------------------- /cliff.toml: -------------------------------------------------------------------------------- 1 | [changelog] 2 | # changelog header 3 | header = """ 4 | # Changelog\n 5 | All notable changes to iroh will be documented in this file.\n 6 | """ 7 | 8 | body = """ 9 | {% if version %}\ 10 | {% if previous.version %}\ 11 | ## [{{ version | trim_start_matches(pat="v") }}](/compare/{{ previous.version }}..{{ version }}) - {{ timestamp | date(format="%Y-%m-%d") }} 12 | {% else %}\ 13 | ## [{{ version | trim_start_matches(pat="v") }}] - {{ timestamp | date(format="%Y-%m-%d") }} 14 | {% endif %}\ 15 | {% else %}\ 16 | ## [unreleased] 17 | {% endif %}\ 18 | 19 | {% macro commit(commit) -%} 20 | - {% if commit.scope %}*({{ commit.scope }})* {% endif %}{% if commit.breaking %}[**breaking**] {% endif %}\ 21 | {{ commit.message | upper_first }} - ([{{ commit.id | truncate(length=7, end="") }}](/commit/{{ commit.id }}))\ 22 | {% endmacro -%} 23 | 24 | {% for group, commits in commits | group_by(attribute="group") %} 25 | ### {{ group | striptags | trim | upper_first }} 26 | {% for commit in commits 27 | | filter(attribute="scope") 28 | | sort(attribute="scope") %} 29 | {{ self::commit(commit=commit) }} 30 | {%- endfor -%} 31 | {% raw %}\n{% endraw %}\ 32 | {%- for commit in commits %} 33 | {%- if not commit.scope -%} 34 | {{ self::commit(commit=commit) }} 35 | {% endif -%} 36 | {% endfor -%} 37 | {% endfor %}\n 38 | """ 39 | 40 | footer = "" 41 | postprocessors = [ 42 | { pattern = '', replace = "https://github.com/n0-computer/iroh" }, 43 | { pattern = "\\(#([0-9]+)\\)", replace = "([#${1}](https://github.com/n0-computer/iroh/issues/${1}))"} 44 | ] 45 | 46 | 47 | [git] 48 | # regex for parsing and grouping commits 49 | commit_parsers = [ 50 | { message = "^feat", group = "⛰️ Features" }, 51 | { message = "^fix", group = "🐛 Bug Fixes" }, 52 | { message = "^doc", group = "📚 Documentation" }, 53 | { message = "^perf", group = "⚡ Performance" }, 54 | { message = "^refactor", group = "🚜 Refactor" }, 55 | { message = "^style", group = "🎨 Styling" }, 56 | { message = "^test", group = "🧪 Testing" }, 57 | { message = "^chore\\(release\\)", skip = true }, 58 | { message = "^chore\\(deps\\)", skip = true }, 59 | { message = "^chore\\(pr\\)", skip = true }, 60 | { message = "^chore\\(pull\\)", skip = true }, 61 | { message = "^chore|ci", group = "⚙️ Miscellaneous Tasks" }, 62 | { body = ".*security", group = "🛡️ Security" }, 63 | { message = "^revert", group = "◀️ Revert" }, 64 | ] -------------------------------------------------------------------------------- /.github/workflows/docs.yaml: -------------------------------------------------------------------------------- 1 | name: Docs Preview 2 | 3 | on: 4 | pull_request: 5 | workflow_dispatch: 6 | inputs: 7 | pr_number: 8 | required: true 9 | type: string 10 | 11 | # ensure job runs sequentially so pushing to the preview branch doesn't conflict 12 | concurrency: 13 | group: ci-docs-preview 14 | 15 | env: 16 | IROH_FORCE_STAGING_RELAYS: "1" 17 | 18 | jobs: 19 | preview_docs: 20 | permissions: write-all 21 | timeout-minutes: 30 22 | name: Docs preview 23 | if: ${{ (github.event_name == 'pull_request' || github.event_name == 'workflow_dispatch' ) && !github.event.pull_request.head.repo.fork }} 24 | runs-on: ubuntu-latest 25 | env: 26 | RUSTC_WRAPPER: "sccache" 27 | SCCACHE_GHA_ENABLED: "on" 28 | SCCACHE_CACHE_SIZE: "50G" 29 | PREVIEW_PATH: pr/${{ github.event.pull_request.number || inputs.pr_number }}/docs 30 | 31 | steps: 32 | - uses: actions/checkout@v6 33 | - uses: dtolnay/rust-toolchain@master 34 | with: 35 | toolchain: nightly-2024-11-30 36 | - name: Install sccache 37 | uses: mozilla-actions/sccache-action@v0.0.9 38 | 39 | - name: Generate Docs 40 | run: cargo doc --workspace --all-features --no-deps 41 | env: 42 | RUSTDOCFLAGS: --cfg iroh_docsrs 43 | 44 | - name: Deploy Docs to Preview Branch 45 | uses: peaceiris/actions-gh-pages@v4 46 | with: 47 | github_token: ${{ secrets.GITHUB_TOKEN }} 48 | publish_dir: ./target/doc/ 49 | destination_dir: ${{ env.PREVIEW_PATH }} 50 | publish_branch: generated-docs-preview 51 | 52 | - name: Find Docs Comment 53 | uses: peter-evans/find-comment@v4 54 | id: fc 55 | with: 56 | issue-number: ${{ github.event.pull_request.number || inputs.pr_number }} 57 | comment-author: 'github-actions[bot]' 58 | body-includes: Documentation for this PR has been generated 59 | 60 | - name: Get current timestamp 61 | id: get_timestamp 62 | run: echo "TIMESTAMP=$(date -u +'%Y-%m-%dT%H:%M:%SZ')" >> $GITHUB_ENV 63 | 64 | - name: Create or Update Docs Comment 65 | uses: peter-evans/create-or-update-comment@v5 66 | with: 67 | issue-number: ${{ github.event.pull_request.number || inputs.pr_number }} 68 | comment-id: ${{ steps.fc.outputs.comment-id }} 69 | body: | 70 | Documentation for this PR has been generated and is available at: https://${{ github.repository_owner }}.github.io/${{ github.event.repository.name }}/${{ env.PREVIEW_PATH }}/iroh/ 71 | 72 | Last updated: ${{ env.TIMESTAMP }} 73 | edit-mode: replace 74 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 |

iroh

2 | 3 |

4 | A minimal implementation of Willow, Meadowcap, and Confidential Sync with iroh 5 |

6 | 7 | [![Documentation](https://img.shields.io/badge/docs-latest-blue.svg?style=flat-square)](https://docs.rs/iroh-willow/) 8 | [![Crates.io](https://img.shields.io/crates/v/iroh-willow.svg?style=flat-square)](https://crates.io/crates/iroh-willow) 9 | [![downloads](https://img.shields.io/crates/d/iroh-willow.svg?style=flat-square)](https://crates.io/crates/iroh-willow) 10 | [![Chat](https://img.shields.io/discord/1161119546170687619?logo=discord&style=flat-square)](https://discord.com/invite/DpmJgtU7cW) 11 | [![Youtube](https://img.shields.io/badge/YouTube-red?logo=youtube&logoColor=white&style=flat-square)](https://www.youtube.com/@n0computer) 12 | [![License: MIT](https://img.shields.io/badge/License-MIT-blue.svg?style=flat-square)](LICENSE-MIT) 13 | [![License: Apache 2.0](https://img.shields.io/badge/License-Apache%202.0-blue.svg?style=flat-square)](LICENSE-APACHE) 14 | [![CI](https://img.shields.io/github/actions/workflow/status/n0-computer/iroh-willow/ci.yml?branch=main&label=CI&style=flat-square)](https://github.com/n0-computer/iroh-willow/actions/workflows/ci.yml) 15 | 16 |
17 |

18 | 19 | Rust Docs 20 | 21 |

22 |
23 |
24 | 25 | Read more about willow here: https://willowprotocol.org 26 | 27 | # License 28 | 29 | This project is licensed under either of 30 | 31 | * Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or 32 | http://www.apache.org/licenses/LICENSE-2.0) 33 | * MIT license ([LICENSE-MIT](LICENSE-MIT) or 34 | http://opensource.org/licenses/MIT) 35 | 36 | at your option. 37 | 38 | ### Contribution 39 | 40 | Unless you explicitly state otherwise, any contribution intentionally submitted 41 | for inclusion in this project by you, as defined in the Apache-2.0 license, 42 | shall be dual licensed as above, without any additional terms or conditions. 43 | 44 | ## License 45 | 46 | Copyright 2024 N0, INC. 47 | 48 | This project is licensed under either of 49 | 50 | * Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or 51 | http://www.apache.org/licenses/LICENSE-2.0) 52 | * MIT license ([LICENSE-MIT](LICENSE-MIT) or 53 | http://opensource.org/licenses/MIT) 54 | 55 | at your option. 56 | 57 | ### Contribution 58 | 59 | Unless you explicitly state otherwise, any contribution intentionally submitted for inclusion in this project by you, as defined in the Apache-2.0 license, shall be dual licensed as above, without any additional terms or conditions. 60 | -------------------------------------------------------------------------------- /.github/workflows/test_relay_server.yml: -------------------------------------------------------------------------------- 1 | name: Deploy Test Relay Server 2 | 3 | on: 4 | workflow_dispatch: 5 | schedule: 6 | - cron: '0 4 * * *' 7 | 8 | concurrency: 9 | group: relay-${{ github.workflow }}-${{ github.ref }} 10 | cancel-in-progress: true 11 | 12 | env: 13 | RUST_BACKTRACE: 1 14 | RUSTFLAGS: -Dwarnings 15 | RUSTDOCFLAGS: -Dwarnings 16 | MSRV: "1.76" 17 | SCCACHE_CACHE_SIZE: "50G" 18 | IROH_FORCE_STAGING_RELAYS: "1" 19 | 20 | jobs: 21 | build_relay_server: 22 | runs-on: [self-hosted, linux, X64] 23 | if: github.ref_name=='main' 24 | env: 25 | # Using self-hosted runners so use local cache for sccache and 26 | # not SCCACHE_GHA_ENABLED. 27 | RUSTC_WRAPPER: "sccache" 28 | RUST_BACKTRACE: full 29 | RUSTV: stable 30 | steps: 31 | - uses: actions/checkout@v6 32 | - name: Install rust stable 33 | uses: dtolnay/rust-toolchain@stable 34 | - name: Install sccache 35 | uses: mozilla-actions/sccache-action@v0.0.9 36 | 37 | - name: build release 38 | run: | 39 | cargo build --release --all-features --bin iroh-relay 40 | 41 | - name: Setup awscli on linux 42 | run: | 43 | curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip" 44 | unzip awscliv2.zip 45 | sudo ./aws/install --update 46 | - name: Set aws credentials 47 | run: | 48 | echo "AWS_ACCESS_KEY_ID=${{secrets.S3_ACCESS_KEY_ID}}" >> $GITHUB_ENV 49 | echo "AWS_SECRET_ACCESS_KEY=${{secrets.S3_ACCESS_KEY}}" >> $GITHUB_ENV 50 | echo "AWS_DEFAULT_REGION=us-west-2" >> $GITHUB_ENV 51 | 52 | - name: push release 53 | run: | 54 | aws s3 cp ./target/release/iroh-relay s3://vorc/iroh-relay-linux-amd64-${GITHUB_SHA::7} --no-progress 55 | 56 | - name: Set iroh-relay tag 57 | id: set_tag 58 | run: | 59 | echo ::set-output name=tag::${GITHUB_SHA::7} 60 | 61 | 62 | deploy_iroh-relay: 63 | runs-on: ubuntu-latest 64 | if: github.ref_name=='main' 65 | needs: build_relay_server 66 | steps: 67 | - uses: actions/checkout@v6 68 | - name: Run Staging Deploy Playbook 69 | uses: arqu/action-ansible-playbook@master 70 | with: 71 | playbook: redeploy-relay.yml 72 | directory: .github/ansible 73 | key: ${{ secrets.TEST_DERPER_SSH_PKEY }} 74 | inventory: ${{ secrets.TEST_DERPER_INVENTORY }} 75 | known_hosts: ${{ secrets.TEST_DERPER_KNOWN_HOSTS }} 76 | options: --extra-vars ansible_ssh_user=root --extra-vars relay_version=${{ needs.build_relay_server.outputs.set_tag.tag }} 77 | -------------------------------------------------------------------------------- /src/proto/wgps/handles.rs: -------------------------------------------------------------------------------- 1 | use serde::{Deserialize, Serialize}; 2 | 3 | /// The different resource handles employed by the WGPS. 4 | #[derive(Debug, Serialize, Deserialize, strum::Display)] 5 | pub enum HandleType { 6 | /// Resource handle for the private set intersection part of private area intersection. 7 | /// More precisely, an IntersectionHandle stores a PsiGroup member together with one of two possible states: 8 | /// * pending (waiting for the other peer to perform scalar multiplication), 9 | /// * completed (both peers performed scalar multiplication). 10 | Intersection, 11 | 12 | /// Resource handle for [`crate::proto::meadowcap::ReadAuthorisation`] that certify access to some Entries. 13 | Capability, 14 | 15 | /// Resource handle for [`crate::proto::grouping::AreaOfInterest`]s that peers wish to sync. 16 | AreaOfInterest, 17 | 18 | /// Resource handle that controls the matching from Payload transmissions to Payload requests. 19 | PayloadRequest, 20 | 21 | /// Resource handle for [`super::StaticToken`]s that peers need to transmit. 22 | StaticToken, 23 | } 24 | 25 | pub trait IsHandle: 26 | std::fmt::Debug + std::hash::Hash + From + Into + Copy + Eq + PartialEq 27 | { 28 | fn handle_type(&self) -> HandleType; 29 | fn value(&self) -> u64; 30 | } 31 | 32 | #[derive(Debug, Serialize, Deserialize, Hash, Eq, PartialEq, Clone, Copy, derive_more::From)] 33 | pub struct AreaOfInterestHandle(u64); 34 | 35 | #[derive(Debug, Serialize, Deserialize, Hash, Eq, PartialEq, Clone, Copy, derive_more::From)] 36 | pub struct IntersectionHandle(u64); 37 | 38 | #[derive(Debug, Serialize, Deserialize, Hash, Eq, PartialEq, Clone, Copy, derive_more::From)] 39 | pub struct CapabilityHandle(u64); 40 | 41 | #[derive(Debug, Serialize, Deserialize, Hash, Eq, PartialEq, Clone, Copy, derive_more::From)] 42 | pub struct StaticTokenHandle(u64); 43 | 44 | #[derive(Debug, Hash, Eq, PartialEq, Clone, Copy, derive_more::From)] 45 | pub enum ResourceHandle { 46 | AreaOfInterest(AreaOfInterestHandle), 47 | Intersection(IntersectionHandle), 48 | Capability(CapabilityHandle), 49 | StaticToken(StaticTokenHandle), 50 | } 51 | 52 | impl IsHandle for CapabilityHandle { 53 | fn handle_type(&self) -> HandleType { 54 | HandleType::Capability 55 | } 56 | fn value(&self) -> u64 { 57 | self.0 58 | } 59 | } 60 | impl IsHandle for StaticTokenHandle { 61 | fn handle_type(&self) -> HandleType { 62 | HandleType::StaticToken 63 | } 64 | fn value(&self) -> u64 { 65 | self.0 66 | } 67 | } 68 | impl IsHandle for AreaOfInterestHandle { 69 | fn handle_type(&self) -> HandleType { 70 | HandleType::AreaOfInterest 71 | } 72 | fn value(&self) -> u64 { 73 | self.0 74 | } 75 | } 76 | impl IsHandle for IntersectionHandle { 77 | fn handle_type(&self) -> HandleType { 78 | HandleType::Intersection 79 | } 80 | fn value(&self) -> u64 { 81 | self.0 82 | } 83 | } 84 | -------------------------------------------------------------------------------- /src/proto/wgps/fingerprint.rs: -------------------------------------------------------------------------------- 1 | use std::fmt; 2 | 3 | use serde::{Deserialize, Serialize}; 4 | use willow_store::{FixedSize, LiftingCommutativeMonoid, PointRef}; 5 | 6 | use crate::{ 7 | proto::data_model::Entry, 8 | store::willow_store_glue::{ 9 | path_to_blobseq, IrohWillowParams, StoredAuthorisedEntry, StoredTimestamp, 10 | }, 11 | }; 12 | 13 | #[derive( 14 | Default, 15 | Serialize, 16 | Deserialize, 17 | Eq, 18 | PartialEq, 19 | Clone, 20 | Copy, 21 | zerocopy_derive::FromBytes, 22 | zerocopy_derive::AsBytes, 23 | zerocopy_derive::FromZeroes, 24 | )] 25 | #[repr(transparent)] 26 | pub struct Fingerprint(pub [u8; 32]); 27 | 28 | impl Fingerprint { 29 | pub(crate) fn lift_stored_entry( 30 | key: &PointRef, 31 | payload_digest: &[u8; 32], 32 | payload_size: u64, 33 | ) -> Self { 34 | let mut hasher = iroh_blake3::Hasher::default(); 35 | hasher.update(key.as_slice()); 36 | hasher.update(payload_digest); 37 | hasher.update(&payload_size.to_le_bytes()); 38 | Self(*hasher.finalize().as_bytes()) 39 | } 40 | 41 | pub fn lift_entry(entry: &Entry) -> Self { 42 | let point = willow_store::Point::::new( 43 | entry.subspace_id(), 44 | &StoredTimestamp::new(entry.timestamp()), 45 | &path_to_blobseq(entry.path()), 46 | ); 47 | Self::lift_stored_entry( 48 | &point, 49 | entry.payload_digest().0.as_bytes(), 50 | entry.payload_length(), 51 | ) 52 | } 53 | } 54 | 55 | impl FixedSize for Fingerprint { 56 | const SIZE: usize = std::mem::size_of::(); 57 | } 58 | 59 | impl LiftingCommutativeMonoid, StoredAuthorisedEntry> for Fingerprint { 60 | fn neutral() -> Self { 61 | Self([0u8; 32]) 62 | } 63 | 64 | fn lift(key: &PointRef, value: &StoredAuthorisedEntry) -> Self { 65 | Self::lift_stored_entry(key, &value.payload_digest, value.payload_size) 66 | } 67 | 68 | fn combine(&self, other: &Self) -> Self { 69 | let mut slf = *self; 70 | slf ^= *other; 71 | slf 72 | } 73 | } 74 | 75 | impl fmt::Debug for Fingerprint { 76 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 77 | write!( 78 | f, 79 | "Fingerprint({})", 80 | data_encoding::HEXLOWER.encode(&self.0[..10]) 81 | ) 82 | } 83 | } 84 | 85 | impl Fingerprint { 86 | pub fn add_entry(&mut self, entry: &Entry) { 87 | // TODO: Don't allocate 88 | let next = Self::lift_entry(entry); 89 | *self ^= next; 90 | } 91 | 92 | pub fn add_entries<'a>(&mut self, iter: impl Iterator) { 93 | for entry in iter { 94 | self.add_entry(entry); 95 | } 96 | } 97 | 98 | pub fn from_entries<'a>(iter: impl Iterator) -> Self { 99 | let mut this = Self::default(); 100 | this.add_entries(iter); 101 | this 102 | } 103 | 104 | pub fn is_empty(&self) -> bool { 105 | *self == Self::default() 106 | } 107 | } 108 | 109 | impl std::ops::BitXorAssign for Fingerprint { 110 | fn bitxor_assign(&mut self, rhs: Self) { 111 | for (a, b) in self.0.iter_mut().zip(rhs.0.iter()) { 112 | *a ^= b; 113 | } 114 | } 115 | } 116 | -------------------------------------------------------------------------------- /src/util/stream.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | future::Future, 3 | pin::Pin, 4 | task::{Context, Poll}, 5 | }; 6 | 7 | use futures_lite::Stream; 8 | use tokio_stream::wrappers::ReceiverStream; 9 | use tokio_util::sync::{CancellationToken, WaitForCancellationFutureOwned}; 10 | 11 | /// Wrapper around [`Stream`] that takes a cancel token to cancel the stream. 12 | /// 13 | /// Once the cancel token is cancelled, this stream will continue to yield all items which are 14 | /// ready immediately and then return [`None`]. 15 | #[derive(Debug)] 16 | pub struct Cancelable { 17 | stream: S, 18 | cancelled: Pin>, 19 | is_cancelled: bool, 20 | } 21 | 22 | impl Cancelable { 23 | pub fn new(stream: S, cancel_token: CancellationToken) -> Self { 24 | Self { 25 | stream, 26 | cancelled: Box::pin(cancel_token.cancelled_owned()), 27 | is_cancelled: false, 28 | } 29 | } 30 | pub fn into_inner(self) -> S { 31 | self.stream 32 | } 33 | } 34 | 35 | impl Stream for Cancelable { 36 | type Item = S::Item; 37 | fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { 38 | match Pin::new(&mut self.stream).poll_next(cx) { 39 | Poll::Ready(r) => Poll::Ready(r), 40 | Poll::Pending => { 41 | if self.is_cancelled { 42 | return Poll::Ready(None); 43 | } 44 | match Pin::new(&mut self.cancelled).poll(cx) { 45 | Poll::Ready(()) => { 46 | self.is_cancelled = true; 47 | Poll::Ready(None) 48 | } 49 | Poll::Pending => Poll::Pending, 50 | } 51 | } 52 | } 53 | } 54 | } 55 | 56 | /// Wrapper around [`ReceiverStream`] that can be closed with a [`CancellationToken`]. 57 | #[derive(Debug)] 58 | pub struct CancelableReceiver { 59 | receiver: ReceiverStream, 60 | cancelled: Pin>, 61 | is_cancelled: bool, 62 | } 63 | 64 | impl CancelableReceiver { 65 | pub fn new(receiver: ReceiverStream, cancel_token: CancellationToken) -> Self { 66 | let is_cancelled = cancel_token.is_cancelled(); 67 | Self { 68 | receiver, 69 | cancelled: Box::pin(cancel_token.cancelled_owned()), 70 | is_cancelled, 71 | } 72 | } 73 | 74 | pub fn into_inner(self) -> ReceiverStream { 75 | self.receiver 76 | } 77 | } 78 | 79 | impl Stream for CancelableReceiver { 80 | type Item = T; 81 | fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { 82 | match Pin::new(&mut self.receiver).poll_next(cx) { 83 | Poll::Ready(r) => Poll::Ready(r), 84 | Poll::Pending => { 85 | if !self.is_cancelled { 86 | match Pin::new(&mut self.cancelled).poll(cx) { 87 | Poll::Ready(()) => { 88 | self.receiver.close(); 89 | self.is_cancelled = true; 90 | Poll::Ready(None) 91 | } 92 | Poll::Pending => Poll::Pending, 93 | } 94 | } else { 95 | Poll::Pending 96 | } 97 | } 98 | } 99 | } 100 | } 101 | -------------------------------------------------------------------------------- /src/session/challenge.rs: -------------------------------------------------------------------------------- 1 | use super::{Error, Role}; 2 | use crate::proto::{ 3 | keys::{UserPublicKey, UserSignature}, 4 | wgps::{AccessChallenge, AccessChallengeBytes, ChallengeHash}, 5 | }; 6 | 7 | /// Data from the initial transmission 8 | /// 9 | /// This happens before the session is initialized. 10 | #[derive(Debug)] 11 | pub struct InitialTransmission { 12 | /// The [`AccessChallenge`] nonce, whose hash we sent to the remote. 13 | pub our_nonce: AccessChallenge, 14 | /// The [`ChallengeHash`] we received from the remote. 15 | pub received_commitment: ChallengeHash, 16 | /// The maximum payload size we received from the remote. 17 | pub their_max_payload_size: u64, 18 | } 19 | 20 | #[derive(Debug)] 21 | pub enum ChallengeState { 22 | Committed { 23 | our_nonce: AccessChallenge, 24 | received_commitment: ChallengeHash, 25 | }, 26 | Revealed { 27 | ours: AccessChallengeBytes, 28 | theirs: AccessChallengeBytes, 29 | }, 30 | } 31 | 32 | impl ChallengeState { 33 | pub fn reveal(&mut self, our_role: Role, their_nonce: AccessChallenge) -> Result<(), Error> { 34 | match self { 35 | Self::Committed { 36 | our_nonce, 37 | received_commitment, 38 | } => { 39 | if their_nonce.hash() != *received_commitment { 40 | return Err(Error::BrokenCommittement); 41 | } 42 | let ours = match our_role { 43 | Role::Alfie => bitwise_xor(our_nonce.to_bytes(), their_nonce.to_bytes()), 44 | Role::Betty => { 45 | bitwise_xor_complement(our_nonce.to_bytes(), their_nonce.to_bytes()) 46 | } 47 | }; 48 | let theirs = bitwise_complement(ours); 49 | *self = Self::Revealed { ours, theirs }; 50 | Ok(()) 51 | } 52 | _ => Err(Error::InvalidMessageInCurrentState), 53 | } 54 | } 55 | 56 | pub fn is_revealed(&self) -> bool { 57 | matches!(self, Self::Revealed { .. }) 58 | } 59 | 60 | // pub fn sign(&self, secret_key: &UserSecretKey) -> Result { 61 | // let signable = self.signable()?; 62 | // let signature = secret_key.sign(&signable); 63 | // Ok(signature) 64 | // } 65 | 66 | pub fn signable(&self) -> Result<[u8; 32], Error> { 67 | let challenge = self.get_ours()?; 68 | Ok(*challenge) 69 | } 70 | 71 | pub fn verify(&self, user_key: &UserPublicKey, signature: &UserSignature) -> Result<(), Error> { 72 | let their_challenge = self.get_theirs()?; 73 | user_key.verify(their_challenge, signature)?; 74 | Ok(()) 75 | } 76 | 77 | fn get_ours(&self) -> Result<&AccessChallengeBytes, Error> { 78 | match self { 79 | Self::Revealed { ours, .. } => Ok(ours), 80 | _ => Err(Error::InvalidMessageInCurrentState), 81 | } 82 | } 83 | 84 | fn get_theirs(&self) -> Result<&AccessChallengeBytes, Error> { 85 | match self { 86 | Self::Revealed { theirs, .. } => Ok(theirs), 87 | _ => Err(Error::InvalidMessageInCurrentState), 88 | } 89 | } 90 | } 91 | 92 | fn bitwise_xor(a: [u8; N], b: [u8; N]) -> [u8; N] { 93 | let mut res = [0u8; N]; 94 | for (i, (x1, x2)) in a.iter().zip(b.iter()).enumerate() { 95 | res[i] = x1 ^ x2; 96 | } 97 | res 98 | } 99 | 100 | fn bitwise_complement(a: [u8; N]) -> [u8; N] { 101 | let mut res = [0u8; N]; 102 | for (i, x) in a.iter().enumerate() { 103 | res[i] = !x; 104 | } 105 | res 106 | } 107 | 108 | fn bitwise_xor_complement(a: [u8; N], b: [u8; N]) -> [u8; N] { 109 | let mut res = [0u8; N]; 110 | for (i, (x1, x2)) in a.iter().zip(b.iter()).enumerate() { 111 | res[i] = !(x1 ^ x2); 112 | } 113 | res 114 | } 115 | -------------------------------------------------------------------------------- /.github/workflows/flaky.yaml: -------------------------------------------------------------------------------- 1 | # Run all tests, including flaky test. 2 | # 3 | # The default CI workflow ignores flaky tests. This workflow will run 4 | # all tests, including ignored ones. 5 | # 6 | # To use this workflow you can either: 7 | # 8 | # - Label a PR with "flaky-test", the normal CI workflow will not run 9 | # any jobs but the jobs here will be run. Note that to merge the PR 10 | # you'll need to remove the label eventually because the normal CI 11 | # jobs are required by branch protection. 12 | # 13 | # - Manually trigger the workflow, you may choose a branch for this to 14 | # run on. 15 | # 16 | # Additionally this jobs runs once a day on a schedule. 17 | # 18 | # Currently doctests are not run by this workflow. 19 | 20 | name: Flaky CI 21 | 22 | on: 23 | pull_request: 24 | types: [ 'labeled', 'unlabeled', 'opened', 'synchronize', 'reopened' ] 25 | schedule: 26 | # 06:30 UTC every day 27 | - cron: '30 6 * * *' 28 | workflow_dispatch: 29 | inputs: 30 | branch: 31 | description: 'Branch to run on, defaults to main' 32 | required: true 33 | default: 'main' 34 | type: string 35 | 36 | concurrency: 37 | group: flaky-${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} 38 | cancel-in-progress: true 39 | 40 | env: 41 | IROH_FORCE_STAGING_RELAYS: "1" 42 | 43 | jobs: 44 | tests: 45 | if: "contains(github.event.pull_request.labels.*.name, 'flaky-test') || github.event_name == 'workflow_dispatch' || github.event_name == 'schedule'" 46 | uses: './.github/workflows/tests.yaml' 47 | with: 48 | flaky: true 49 | git-ref: ${{ inputs.branch }} 50 | notify: 51 | needs: tests 52 | if: ${{ always() }} 53 | runs-on: ubuntu-latest 54 | steps: 55 | - name: Extract test results 56 | run: | 57 | printf '${{ toJSON(needs) }}\n' 58 | result=$(echo '${{ toJSON(needs) }}' | jq -r .tests.result) 59 | echo TESTS_RESULT=$result 60 | echo "TESTS_RESULT=$result" >>"$GITHUB_ENV" 61 | - name: download nextest reports 62 | uses: actions/download-artifact@v6 63 | with: 64 | pattern: libtest_run_${{ github.run_number }}-${{ github.run_attempt }}-* 65 | merge-multiple: true 66 | path: nextest-results 67 | - name: create summary report 68 | id: make_summary 69 | run: | 70 | # prevent the glob expression in the loop to match on itself when the dir is empty 71 | shopt -s nullglob 72 | # to deal with multiline outputs it's recommended to use a random EOF, the syntax is based on 73 | # https://docs.github.com/en/actions/using-workflows/workflow-commands-for-github-actions#multiline-strings 74 | EOF=aP51VriWCxNJ1JjvmO9i 75 | echo "summary<<$EOF" >> $GITHUB_OUTPUT 76 | echo "Flaky tests failure:" >> $GITHUB_OUTPUT 77 | echo " " >> $GITHUB_OUTPUT 78 | for report in nextest-results/*.json; do 79 | # remove the name prefix and extension, and split the parts 80 | name=$(echo ${report:16:-5} | tr _ ' ') 81 | echo $name 82 | echo "- **$name**" >> $GITHUB_OUTPUT 83 | # select the failed tests 84 | # the tests have this format "crate::module$test_name", the sed expressions remove the quotes and replace $ for :: 85 | failure=$(jq --slurp '.[] | select(.["type"] == "test" and .["event"] == "failed" ) | .["name"]' $report | sed -e 's/^"//g' -e 's/\$/::/' -e 's/"//') 86 | echo "$failure" 87 | echo "$failure" >> $GITHUB_OUTPUT 88 | done 89 | echo "" >> $GITHUB_OUTPUT 90 | echo "See https://github.com/${{ github.repository }}/actions/workflows/flaky.yaml" >> $GITHUB_OUTPUT 91 | echo "$EOF" >> $GITHUB_OUTPUT 92 | - name: Notify discord on failure 93 | uses: n0-computer/discord-webhook-notify@v1 94 | if: ${{ env.TESTS_RESULT == 'failure' || env.TESTS_RESULT == 'success' }} 95 | with: 96 | text: "Flaky tests in **${{ github.repository }}**:" 97 | severity: ${{ env.TESTS_RESULT == 'failure' && 'warn' || 'info' }} 98 | details: ${{ env.TESTS_RESULT == 'failure' && steps.make_summary.outputs.summary || 'No flaky failures!' }} 99 | webhookUrl: ${{ secrets.DISCORD_N0_GITHUB_CHANNEL_WEBHOOK_URL }} 100 | -------------------------------------------------------------------------------- /.github/workflows/docker.yaml: -------------------------------------------------------------------------------- 1 | name: Docker 2 | 3 | on: 4 | workflow_dispatch: 5 | inputs: 6 | release_version: 7 | description: "Release version" 8 | required: true 9 | type: string 10 | default: "" 11 | base_hash: 12 | description: "Commit hash from which to build" 13 | required: true 14 | type: string 15 | default: "" 16 | publish: 17 | description: "Publish to Docker Hub" 18 | required: true 19 | type: boolean 20 | default: false 21 | workflow_call: 22 | inputs: 23 | release_version: 24 | description: "Release version" 25 | required: true 26 | type: string 27 | default: "" 28 | base_hash: 29 | description: "Commit hash from which to build" 30 | required: true 31 | type: string 32 | default: "" 33 | publish: 34 | description: "Publish to Docker Hub" 35 | required: true 36 | type: boolean 37 | default: false 38 | 39 | env: 40 | IROH_FORCE_STAGING_RELAYS: "1" 41 | 42 | jobs: 43 | build_and_publish: 44 | timeout-minutes: 30 45 | name: Docker 46 | runs-on: [self-hosted, linux, X64] 47 | steps: 48 | - name: Checkout 49 | uses: actions/checkout@v6 50 | 51 | - name: Set up Docker Buildx 52 | uses: docker/setup-buildx-action@v3 53 | 54 | - name: Login to Docker Hub 55 | uses: docker/login-action@v3 56 | with: 57 | username: ${{ secrets.DOCKERHUB_USERNAME }} 58 | password: ${{ secrets.DOCKERHUB_TOKEN }} 59 | 60 | - name: Prep dirs 61 | run: | 62 | mkdir -p bins/linux/amd64 63 | mkdir -p bins/linux/arm64 64 | 65 | - name: Setup awscli on linux 66 | run: | 67 | curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip" 68 | unzip awscliv2.zip 69 | sudo ./aws/install --update 70 | 71 | - name: Set aws credentials 72 | run: | 73 | echo "AWS_ACCESS_KEY_ID=${{secrets.S3_ACCESS_KEY_ID}}" >> $GITHUB_ENV 74 | echo "AWS_SECRET_ACCESS_KEY=${{secrets.S3_ACCESS_KEY}}" >> $GITHUB_ENV 75 | echo "AWS_DEFAULT_REGION=us-west-2" >> $GITHUB_ENV 76 | 77 | - name: Fetch release binaries 78 | run: | 79 | aws s3 cp s3://vorc/iroh-linux-amd64-${{ inputs.base_hash }} bins/linux/amd64/iroh 80 | aws s3 cp s3://vorc/iroh-relay-linux-amd64-${{ inputs.base_hash }} bins/linux/amd64/iroh-relay 81 | aws s3 cp s3://vorc/iroh-dns-server-linux-amd64-${{ inputs.base_hash }} bins/linux/amd64/iroh-dns-server 82 | 83 | aws s3 cp s3://vorc/iroh-linux-aarch64-${{ inputs.base_hash }} bins/linux/arm64/iroh 84 | aws s3 cp s3://vorc/iroh-relay-linux-aarch64-${{ inputs.base_hash }} bins/linux/arm64/iroh-relay 85 | aws s3 cp s3://vorc/iroh-dns-server-linux-aarch64-${{ inputs.base_hash }} bins/linux/arm64/iroh-dns-server 86 | 87 | - name: Build Docker image (iroh) 88 | uses: docker/build-push-action@v6 89 | with: 90 | context: . 91 | push: ${{ inputs.publish }} 92 | tags: n0computer/iroh:latest,n0computer/iroh:${{ inputs.release_version }} 93 | target: iroh 94 | platforms: linux/amd64,linux/arm64/v8 95 | file: docker/Dockerfile.ci 96 | 97 | - name: Build Docker image (iroh-relay) 98 | uses: docker/build-push-action@v6 99 | with: 100 | context: . 101 | push: ${{ inputs.publish }} 102 | tags: n0computer/iroh-relay:latest,n0computer/iroh-relay:${{ inputs.release_version }} 103 | target: iroh-relay 104 | platforms: linux/amd64,linux/arm64/v8 105 | file: docker/Dockerfile.ci 106 | 107 | - name: Build Docker image (iroh-dns-server) 108 | uses: docker/build-push-action@v6 109 | with: 110 | context: . 111 | push: ${{ inputs.publish }} 112 | tags: n0computer/iroh-dns-server:latest,n0computer/iroh-dns-server:${{ inputs.release_version }} 113 | target: iroh-dns-server 114 | platforms: linux/amd64,linux/arm64/v8 115 | file: docker/Dockerfile.ci -------------------------------------------------------------------------------- /src/util/pipe.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | cell::RefCell, 3 | future::poll_fn, 4 | io, 5 | rc::Rc, 6 | task::{Context, Poll, Waker}, 7 | }; 8 | 9 | use bytes::{Bytes, BytesMut}; 10 | use futures_lite::Stream; 11 | use iroh_io::AsyncStreamWriter; 12 | 13 | /// In-memory local-io async pipe between a [`AsyncStreamWriter`] and a [`Stream`] of [`Bytes`]. 14 | /// 15 | /// The pipe maintains a shared in-memory buffer of `chunk_size` 16 | /// 17 | /// [`PipeWriter`] is a [`AsyncStreamWriter`] that writes into the shared buffer. 18 | /// 19 | /// [`PipeReader`] is [`Stream`] that emits [`Bytes`] of `chunk_size` length. The last chunk may be 20 | /// smaller than `chunk_size`. 21 | /// 22 | /// The pipe is closed once either the reader or the writer are dropped. If the reader is dropped, 23 | /// subsequent writes will fail with [`io::ErrorKind::BrokenPipe`]. 24 | // TODO: Move to iroh-io? 25 | pub fn chunked_pipe(chunk_size: usize) -> (PipeWriter, PipeReader) { 26 | let shared = Shared { 27 | buf: BytesMut::new(), 28 | chunk_size, 29 | read_waker: None, 30 | write_waker: None, 31 | closed: false, 32 | }; 33 | let shared = Rc::new(RefCell::new(shared)); 34 | let writer = PipeWriter { 35 | shared: shared.clone(), 36 | }; 37 | let reader = PipeReader { shared }; 38 | (writer, reader) 39 | } 40 | 41 | #[derive(Debug)] 42 | struct Shared { 43 | buf: BytesMut, 44 | chunk_size: usize, 45 | read_waker: Option, 46 | write_waker: Option, 47 | closed: bool, 48 | } 49 | 50 | impl Shared { 51 | fn poll_write(&mut self, data: &[u8], cx: &mut Context<'_>) -> Poll> { 52 | if self.closed { 53 | return Poll::Ready(Err(io::Error::new( 54 | io::ErrorKind::BrokenPipe, 55 | "write after close", 56 | ))); 57 | } 58 | let remaining = self.chunk_size - self.buf.len(); 59 | let amount = data.len().min(remaining); 60 | if amount > 0 { 61 | self.buf.extend_from_slice(&data[..amount]); 62 | if let Some(waker) = self.read_waker.take() { 63 | waker.wake(); 64 | } 65 | Poll::Ready(Ok(amount)) 66 | } else { 67 | self.write_waker = Some(cx.waker().to_owned()); 68 | Poll::Pending 69 | } 70 | } 71 | 72 | fn poll_next(&mut self, cx: &mut Context<'_>) -> Poll>> { 73 | if self.buf.len() == self.chunk_size { 74 | if let Some(write_waker) = self.write_waker.take() { 75 | write_waker.wake(); 76 | } 77 | Poll::Ready(Some(Ok(self.buf.split().freeze()))) 78 | } else if self.closed && !self.buf.is_empty() { 79 | Poll::Ready(Some(Ok(self.buf.split().freeze()))) 80 | } else if self.closed { 81 | Poll::Ready(None) 82 | } else { 83 | self.read_waker = Some(cx.waker().to_owned()); 84 | Poll::Pending 85 | } 86 | } 87 | 88 | fn close(&mut self) { 89 | self.closed = true; 90 | if let Some(waker) = self.read_waker.take() { 91 | waker.wake(); 92 | } 93 | if let Some(waker) = self.write_waker.take() { 94 | waker.wake(); 95 | } 96 | } 97 | } 98 | 99 | /// The writer returned from [`chunked_pipe`]. 100 | #[derive(Debug)] 101 | pub struct PipeWriter { 102 | shared: Rc>, 103 | } 104 | 105 | /// The reader returned from [`chunked_pipe`]. 106 | #[derive(Debug)] 107 | pub struct PipeReader { 108 | shared: Rc>, 109 | } 110 | 111 | impl Drop for PipeWriter { 112 | fn drop(&mut self) { 113 | let mut shared = self.shared.borrow_mut(); 114 | shared.close(); 115 | } 116 | } 117 | 118 | impl Drop for PipeReader { 119 | fn drop(&mut self) { 120 | let mut shared = self.shared.borrow_mut(); 121 | shared.close(); 122 | } 123 | } 124 | 125 | impl AsyncStreamWriter for PipeWriter { 126 | async fn write(&mut self, data: &[u8]) -> io::Result<()> { 127 | let mut written = 0; 128 | while written < data.len() { 129 | written += poll_fn(|cx| { 130 | let mut shared = self.shared.borrow_mut(); 131 | shared.poll_write(&data[written..], cx) 132 | }) 133 | .await?; 134 | } 135 | Ok(()) 136 | } 137 | 138 | async fn write_bytes(&mut self, data: bytes::Bytes) -> io::Result<()> { 139 | self.write(&data[..]).await 140 | } 141 | 142 | async fn sync(&mut self) -> io::Result<()> { 143 | Ok(()) 144 | } 145 | } 146 | 147 | impl Stream for PipeReader { 148 | type Item = io::Result; 149 | 150 | fn poll_next(self: std::pin::Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { 151 | let mut shared = self.shared.borrow_mut(); 152 | shared.poll_next(cx) 153 | } 154 | } 155 | -------------------------------------------------------------------------------- /src/session/resource.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | collections::{hash_map, HashMap, VecDeque}, 3 | task::{Context, Poll, Waker}, 4 | }; 5 | 6 | use super::Error; 7 | use crate::proto::wgps::{IsHandle, ResourceHandle}; 8 | 9 | /// The bind scope for resources. 10 | /// 11 | /// Resources are bound by either peer 12 | #[derive(Copy, Clone, Debug)] 13 | pub enum Scope { 14 | /// Resources bound by ourselves. 15 | Ours, 16 | /// Resources bound by the other peer. 17 | Theirs, 18 | } 19 | 20 | #[derive(Debug)] 21 | pub struct ResourceMap { 22 | next_handle: u64, 23 | map: HashMap>, 24 | wakers: HashMap>, 25 | } 26 | 27 | impl Default for ResourceMap { 28 | fn default() -> Self { 29 | Self { 30 | next_handle: 0, 31 | map: Default::default(), 32 | wakers: Default::default(), 33 | } 34 | } 35 | } 36 | 37 | impl ResourceMap 38 | where 39 | H: IsHandle, 40 | { 41 | pub fn iter(&self) -> impl Iterator + '_ { 42 | self.map.iter().map(|(h, r)| (h, &r.value)) 43 | } 44 | 45 | pub fn bind(&mut self, resource: R) -> H { 46 | let handle: H = self.next_handle.into(); 47 | self.next_handle += 1; 48 | let resource = Resource::new(resource); 49 | self.map.insert(handle, resource); 50 | tracing::trace!(?handle, "bind"); 51 | if let Some(mut wakers) = self.wakers.remove(&handle) { 52 | tracing::trace!(?handle, "notify {}", wakers.len()); 53 | for waker in wakers.drain(..) { 54 | waker.wake(); 55 | } 56 | } 57 | handle 58 | } 59 | 60 | pub fn try_get(&self, handle: &H) -> Result<&R, MissingResource> { 61 | self.map 62 | .get(handle) 63 | .as_ref() 64 | .map(|r| &r.value) 65 | .ok_or_else(|| MissingResource((*handle).into())) 66 | } 67 | 68 | pub fn poll_get_eventually(&mut self, handle: H, cx: &mut Context<'_>) -> Poll<&R> { 69 | // cannot use self.get() and self.register_waker() here due to borrow checker. 70 | if let Some(resource) = self.map.get(&handle).as_ref().map(|r| &r.value) { 71 | Poll::Ready(resource) 72 | } else { 73 | self.wakers 74 | .entry(handle) 75 | .or_default() 76 | .push_back(cx.waker().to_owned()); 77 | Poll::Pending 78 | } 79 | } 80 | 81 | pub fn update(&mut self, handle: H, resource: R) -> Result<(), Error> { 82 | match self.map.entry(handle) { 83 | hash_map::Entry::Vacant(_) => Err(Error::MissingResource(handle.into())), 84 | hash_map::Entry::Occupied(mut entry) => { 85 | entry.get_mut().value = resource; 86 | Ok(()) 87 | } 88 | } 89 | } 90 | } 91 | impl ResourceMap 92 | where 93 | H: IsHandle, 94 | R: Eq + PartialEq, 95 | { 96 | pub fn bind_if_new(&mut self, resource: R) -> (H, bool) { 97 | // TODO: Optimize / find out if reverse index is better than find_map 98 | if let Some(handle) = self 99 | .map 100 | .iter() 101 | .find_map(|(handle, r)| (r.value == resource).then_some(handle)) 102 | { 103 | (*handle, false) 104 | } else { 105 | let handle = self.bind(resource); 106 | (handle, true) 107 | } 108 | } 109 | 110 | pub fn find(&self, resource: &R) -> Option { 111 | self.map 112 | .iter() 113 | .find_map(|(handle, r)| (r.value == *resource).then_some(*handle)) 114 | } 115 | } 116 | 117 | #[derive(Debug, thiserror::Error)] 118 | #[error("missing resource {0:?}")] 119 | pub struct MissingResource(pub ResourceHandle); 120 | 121 | // #[derive(Debug)] 122 | // enum ResourceState { 123 | // Active, 124 | // WeProposedFree, 125 | // ToBeDeleted, 126 | // } 127 | 128 | #[derive(Debug)] 129 | struct Resource { 130 | value: V, 131 | // state: ResourceState, 132 | // unprocessed_messages: usize, 133 | } 134 | impl Resource { 135 | pub fn new(value: V) -> Self { 136 | Self { 137 | value, 138 | // state: ResourceState::Active, 139 | // unprocessed_messages: 0, 140 | } 141 | } 142 | } 143 | 144 | // #[derive(Debug, Default)] 145 | // pub struct Resources { 146 | // pub ours: ScopedResources, 147 | // pub theirs: ScopedResources, 148 | // } 149 | // 150 | // impl Resources { 151 | // pub fn scope(&self, scope: Scope) -> &ScopedResources { 152 | // match scope { 153 | // Scope::Ours => &self.ours, 154 | // Scope::Theirs => &self.theirs, 155 | // } 156 | // } 157 | // 158 | // pub fn scope_mut(&mut self, scope: Scope) -> &mut ScopedResources { 159 | // match scope { 160 | // Scope::Ours => &mut self.ours, 161 | // Scope::Theirs => &mut self.theirs, 162 | // } 163 | // } 164 | // } 165 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "iroh-willow" 3 | version = "0.28.0" 4 | edition = "2021" 5 | readme = "README.md" 6 | description = "willow protocol implementation for iroh" 7 | license = "MIT/Apache-2.0" 8 | authors = ["n0 team"] 9 | repository = "https://github.com/n0-computer/iroh" 10 | 11 | # Sadly this also needs to be updated in .github/workflows/ci.yml 12 | rust-version = "1.77" 13 | 14 | [lints] 15 | workspace = true 16 | 17 | [dependencies] 18 | anyhow = "1" 19 | bytes = { version = "1.4", features = ["serde"] } 20 | curve25519-dalek = { version = "4.1.3", features = [ 21 | "digest", 22 | "rand_core", 23 | "serde", 24 | ] } 25 | derive_more = { version = "1.0.0", features = [ 26 | "debug", 27 | "deref", 28 | "display", 29 | "from", 30 | "try_into", 31 | "into", 32 | "as_ref", 33 | "try_from", 34 | ] } 35 | ed25519-dalek = { version = "2.0.0", features = ["serde", "rand_core"] } 36 | either = "1.13.0" 37 | futures-buffered = "0.2.6" 38 | futures-concurrency = "7.6.0" 39 | futures-lite = "2.3.0" 40 | futures-util = "0.3.30" 41 | genawaiter = "0.99.1" 42 | hex = "0.4.3" 43 | iroh-base = { version = "0.34.0" } 44 | iroh-blake3 = "1.4.5" 45 | # iroh-blobs = { version = "0.34.0" } 46 | iroh-blobs = { git = "https://github.com/n0-computer/iroh-blobs", branch = "matheus23/verified-streams" } 47 | iroh-io = { version = "0.6.0", features = ["stats"] } 48 | iroh-metrics = { version = "0.32.0", optional = true } 49 | iroh = { version = "0.34.0" } 50 | meadowcap = "0.1.0" 51 | nested_enum_utils = "0.1.0" 52 | postcard = { version = "1", default-features = false, features = [ 53 | "alloc", 54 | "use-std", 55 | "experimental-derive", 56 | ] } 57 | quic-rpc = "0.15.1" 58 | quic-rpc-derive = "0.15.0" 59 | rand = "0.8.5" 60 | rand_core = "0.6.4" 61 | redb = { version = "2.0.0" } 62 | ref-cast = "1.0.23" 63 | self_cell = "1.0.4" 64 | serde = { version = "1.0.164", features = ["derive"] } 65 | serde-error = "0.1.3" 66 | sha2 = "0.10.8" 67 | strum = { version = "0.26", features = ["derive"] } 68 | syncify = "0.1.0" 69 | thiserror = "1" 70 | tokio = { version = "1", features = ["sync"] } 71 | tokio-stream = { version = "0.1.15", features = ["sync"] } 72 | tokio-util = { version = "0.7", features = ["io-util", "io"] } 73 | tracing = "0.1" 74 | ufotofu = { version = "0.4.1", features = ["std"] } 75 | willow-data-model = "0.1.0" 76 | willow-encoding = "0.1.0" 77 | willow-store = { git = "https://github.com/n0-computer/willow-store.git", branch = "main" } 78 | zerocopy = { version = "0.7", features = ["derive"] } 79 | zerocopy-derive = "0.7" 80 | data-encoding = "2.6.0" 81 | 82 | [dev-dependencies] 83 | iroh-test = { version = "0.31" } 84 | iroh = { version = "0.34", features = ["test-utils"] } 85 | rand_chacha = "0.3.1" 86 | tokio = { version = "1", features = ["sync", "macros"] } 87 | proptest = "1.2.0" 88 | tempfile = "3.4" 89 | testresult = "0.4.0" 90 | test-strategy = "0.3.1" 91 | tracing-subscriber = "0.3.18" 92 | 93 | [features] 94 | default = ["metrics"] 95 | metrics = ["iroh-metrics"] 96 | 97 | [profile.release] 98 | debug = true 99 | 100 | [profile.bench] 101 | debug = true 102 | 103 | [profile.dev-ci] 104 | inherits = 'dev' 105 | opt-level = 1 106 | 107 | [profile.optimized-release] 108 | inherits = 'release' 109 | debug = false 110 | lto = true 111 | debug-assertions = false 112 | opt-level = 3 113 | panic = 'abort' 114 | incremental = false 115 | 116 | [profile.dev.package] 117 | # optimize crypto dependencies in dev mode 118 | ed25519-dalek = { opt-level = 3 } 119 | curve25519-dalek = { opt-level = 3 } 120 | iroh-blake3 = { opt-level = 3 } 121 | 122 | [workspace.lints.rust] 123 | missing_debug_implementations = "warn" 124 | 125 | # We use this --cfg for documenting the cargo features on which an API 126 | # is available. To preview this locally use: RUSTFLAGS="--cfg 127 | # iroh_docsrs cargo +nightly doc --all-features". We use our own 128 | # iroh_docsrs instead of the common docsrs to avoid also enabling this 129 | # feature in any dependencies, because some indirect dependencies 130 | # require a feature enabled when using `--cfg docsrs` which we can not 131 | # do. To enable for a crate set `#![cfg_attr(iroh_docsrs, 132 | # feature(doc_cfg))]` in the crate. 133 | unexpected_cfgs = { level = "warn", check-cfg = ["cfg(iroh_docsrs)"] } 134 | 135 | [workspace.lints.clippy] 136 | unused-async = "warn" 137 | 138 | [patch.crates-io] 139 | # willow-data-model = { path = "../willow-rs/data-model" } 140 | # willow-encoding = { path = "../willow-rs/encoding" } 141 | # meadowcap = { path = "../willow-rs/meadowcap" } 142 | willow-data-model = { git = "https://github.com/n0-computer/willow-rs.git", branch = "main" } 143 | willow-encoding = { git = "https://github.com/n0-computer/willow-rs.git", branch = "main" } 144 | meadowcap = { git = "https://github.com/n0-computer/willow-rs.git", branch = "main" } 145 | 146 | # iroh-base = { git = "https://github.com/n0-computer/iroh", branch = "main" } 147 | # iroh-net = { git = "https://github.com/n0-computer/iroh", branch = "main" } 148 | # iroh-metrics = { git = "https://github.com/n0-computer/iroh", branch = "main" } 149 | # iroh-test = { git = "https://github.com/n0-computer/iroh", branch = "main" } 150 | # iroh-router = { git = "https://github.com/n0-computer/iroh", branch = "main" } 151 | 152 | # iroh-blobs = { git = "https://github.com/n0-computer/iroh-blobs", branch = "matheus23/verified-streams" } 153 | -------------------------------------------------------------------------------- /src/store.rs: -------------------------------------------------------------------------------- 1 | //! Store for entries, secrets, and capabilities used in the Willow engine. 2 | //! 3 | //! The storage backend is defined in the [`Storage`] trait and its associated types. 4 | //! 5 | //! The only implementation is currently an in-memory store at [`memory`]. 6 | 7 | use anyhow::{anyhow, Context, Result}; 8 | use rand_core::CryptoRngCore; 9 | use traits::EntryStorage; 10 | 11 | pub(crate) use self::traits::EntryOrigin; 12 | use self::{ 13 | auth::{Auth, AuthError}, 14 | traits::Storage, 15 | }; 16 | use crate::{ 17 | form::{AuthForm, EntryForm, EntryOrForm, SubspaceForm, TimestampForm}, 18 | interest::{CapSelector, UserSelector}, 19 | proto::{ 20 | data_model::{AuthorisedEntry, Entry, PayloadDigest}, 21 | keys::{NamespaceId, NamespaceKind, NamespaceSecretKey, UserId}, 22 | }, 23 | store::traits::SecretStorage, 24 | util::time::system_time_now, 25 | }; 26 | 27 | pub(crate) mod auth; 28 | pub mod memory; 29 | pub mod persistent; 30 | pub mod traits; 31 | pub(crate) mod willow_store_glue; 32 | 33 | /// Storage for the Willow engine. 34 | /// 35 | /// Wraps a `Storage` instance and adds the [`Auth`] struct that uses the secret and caps storage to provide 36 | /// authentication when inserting entries. 37 | #[derive(Debug, Clone)] 38 | pub(crate) struct Store { 39 | storage: S, 40 | auth: Auth, 41 | } 42 | 43 | impl Store { 44 | pub fn new(storage: S) -> Self { 45 | Self { 46 | auth: Auth::new(storage.secrets().clone(), storage.caps().clone()), 47 | storage, 48 | } 49 | } 50 | 51 | pub fn entries(&self) -> &S::Entries { 52 | self.storage.entries() 53 | } 54 | 55 | pub fn secrets(&self) -> &S::Secrets { 56 | self.storage.secrets() 57 | } 58 | 59 | pub fn payloads(&self) -> &S::Payloads { 60 | self.storage.payloads() 61 | } 62 | 63 | pub fn auth(&self) -> &Auth { 64 | &self.auth 65 | } 66 | 67 | pub async fn insert_entry( 68 | &self, 69 | entry: EntryOrForm, 70 | auth: AuthForm, 71 | ) -> Result<(AuthorisedEntry, bool)> { 72 | let user_id = auth.user_id(); 73 | let entry = match entry { 74 | EntryOrForm::Entry(entry) => Ok(entry), 75 | EntryOrForm::Form(form) => self.form_to_entry(form, user_id).await, 76 | }?; 77 | let capability = match auth { 78 | AuthForm::Exact(cap) => cap, 79 | AuthForm::Any(user_id) => { 80 | let selector = CapSelector::for_entry(&entry, UserSelector::Exact(user_id)); 81 | self.auth() 82 | .get_write_cap(&selector)? 83 | .ok_or_else(|| anyhow!("no write capability available"))? 84 | } 85 | }; 86 | let secret_key = self 87 | .secrets() 88 | .get_user(&user_id)? 89 | .context("Missing user keypair")?; 90 | 91 | // TODO(frando): This should use `authorisation_token_unchecked` if we uphold the invariant 92 | // that `user_id` is a pubkey for `secret_key`. However, that is `unsafe` at the moment 93 | // (but should not be, IMO). 94 | // Not using the `_unchecked` variant has the cost of an additional signature verification, 95 | // so significant. 96 | let token = capability.authorisation_token(&entry, secret_key)?; 97 | let authorised_entry = AuthorisedEntry::new_unchecked(entry, token); 98 | let inserted = self 99 | .entries() 100 | .ingest_entry(&authorised_entry, EntryOrigin::Local)?; 101 | Ok((authorised_entry, inserted)) 102 | } 103 | 104 | pub fn create_namespace( 105 | &self, 106 | rng: &mut impl CryptoRngCore, 107 | kind: NamespaceKind, 108 | owner: UserId, 109 | ) -> Result { 110 | let namespace_secret = NamespaceSecretKey::generate(rng, kind); 111 | let namespace_id = namespace_secret.id(); 112 | self.secrets().insert_namespace(namespace_secret)?; 113 | self.auth().create_full_caps(namespace_id, owner)?; 114 | Ok(namespace_id) 115 | } 116 | 117 | /// Convert the form into an [`Entry`] by filling the fields with data from the environment and 118 | /// the provided [`Store`]. 119 | /// 120 | /// `user_id` must be set to the user who is authenticating the entry. 121 | async fn form_to_entry( 122 | &self, 123 | form: EntryForm, 124 | user_id: UserId, // auth: AuthForm, 125 | ) -> anyhow::Result { 126 | let timestamp = match form.timestamp { 127 | TimestampForm::Now => system_time_now(), 128 | TimestampForm::Exact(timestamp) => timestamp, 129 | }; 130 | let subspace_id = match form.subspace_id { 131 | SubspaceForm::User => user_id, 132 | SubspaceForm::Exact(subspace) => subspace, 133 | }; 134 | let (payload_digest, payload_length) = form.payload.submit(self.payloads()).await?; 135 | let entry = Entry::new( 136 | form.namespace_id, 137 | subspace_id, 138 | form.path, 139 | timestamp, 140 | payload_length, 141 | PayloadDigest(payload_digest), 142 | ); 143 | Ok(entry) 144 | } 145 | } 146 | -------------------------------------------------------------------------------- /src/session/channels.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | marker::PhantomData, 3 | pin::Pin, 4 | task::{self, ready, Poll}, 5 | }; 6 | 7 | use futures_lite::Stream; 8 | use tracing::trace; 9 | 10 | use super::Error; 11 | use crate::{ 12 | proto::wgps::{ 13 | Channel, DataMessage, IntersectionMessage, LogicalChannel, Message, ReconciliationMessage, 14 | SetupBindAreaOfInterest, SetupBindReadCapability, SetupBindStaticToken, 15 | }, 16 | util::channel::{Receiver, Sender, WriteError}, 17 | }; 18 | 19 | #[derive(Debug)] 20 | pub struct MessageReceiver { 21 | inner: Receiver, 22 | _phantom: PhantomData, 23 | } 24 | 25 | impl> MessageReceiver { 26 | // pub async fn recv(&mut self) -> Option> { 27 | // poll_fn(|cx| self.poll_recv(cx)).await 28 | // } 29 | 30 | // pub fn close(&self) { 31 | // self.inner.close() 32 | // } 33 | 34 | pub fn poll_recv(&mut self, cx: &mut task::Context<'_>) -> Poll>> { 35 | let message = ready!(Pin::new(&mut self.inner).poll_next(cx)); 36 | let message = match message { 37 | None => None, 38 | Some(Err(err)) => Some(Err(err.into())), 39 | Some(Ok(message)) => { 40 | trace!(%message, "recv"); 41 | let message = message.try_into().map_err(|_| Error::WrongChannel); 42 | Some(message) 43 | } 44 | }; 45 | Poll::Ready(message) 46 | } 47 | } 48 | 49 | impl + Unpin> Stream for MessageReceiver { 50 | type Item = Result; 51 | fn poll_next(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll> { 52 | self.get_mut().poll_recv(cx) 53 | } 54 | } 55 | 56 | impl> From> for MessageReceiver { 57 | fn from(inner: Receiver) -> Self { 58 | Self { 59 | inner, 60 | _phantom: PhantomData, 61 | } 62 | } 63 | } 64 | 65 | #[derive(Debug)] 66 | pub struct LogicalChannelReceivers { 67 | pub intersection_recv: MessageReceiver, 68 | pub reconciliation_recv: MessageReceiver, 69 | pub static_tokens_recv: MessageReceiver, 70 | pub capability_recv: MessageReceiver, 71 | pub aoi_recv: MessageReceiver, 72 | pub data_recv: MessageReceiver, 73 | } 74 | 75 | impl LogicalChannelReceivers { 76 | // pub fn close(&self) { 77 | // self.intersection_recv.close(); 78 | // self.reconciliation_recv.close(); 79 | // self.static_tokens_recv.close(); 80 | // self.capability_recv.close(); 81 | // self.aoi_recv.close(); 82 | // self.data_recv.close(); 83 | // } 84 | } 85 | 86 | #[derive(Debug, Clone)] 87 | pub struct LogicalChannelSenders { 88 | pub intersection_send: Sender, 89 | pub reconciliation_send: Sender, 90 | pub static_tokens_send: Sender, 91 | pub aoi_send: Sender, 92 | pub capability_send: Sender, 93 | pub data_send: Sender, 94 | } 95 | impl LogicalChannelSenders { 96 | pub fn close(&self) { 97 | self.intersection_send.close(); 98 | self.reconciliation_send.close(); 99 | self.static_tokens_send.close(); 100 | self.aoi_send.close(); 101 | self.capability_send.close(); 102 | self.data_send.close(); 103 | } 104 | 105 | pub fn get(&self, channel: LogicalChannel) -> &Sender { 106 | match channel { 107 | LogicalChannel::Intersection => &self.intersection_send, 108 | LogicalChannel::Reconciliation => &self.reconciliation_send, 109 | LogicalChannel::StaticToken => &self.static_tokens_send, 110 | LogicalChannel::Capability => &self.capability_send, 111 | LogicalChannel::AreaOfInterest => &self.aoi_send, 112 | LogicalChannel::Data => &self.data_send, 113 | } 114 | } 115 | } 116 | 117 | #[derive(Debug, Clone)] 118 | pub struct ChannelSenders { 119 | pub control_send: Sender, 120 | pub logical_send: LogicalChannelSenders, 121 | } 122 | 123 | #[derive(Debug)] 124 | pub struct ChannelReceivers { 125 | pub control_recv: Receiver, 126 | pub logical_recv: LogicalChannelReceivers, 127 | } 128 | 129 | #[derive(Debug)] 130 | pub struct Channels { 131 | pub send: ChannelSenders, 132 | pub recv: ChannelReceivers, 133 | } 134 | 135 | impl ChannelSenders { 136 | pub fn close_all(&self) { 137 | self.control_send.close(); 138 | self.logical_send.close(); 139 | } 140 | pub fn get(&self, channel: Channel) -> &Sender { 141 | match channel { 142 | Channel::Control => &self.control_send, 143 | Channel::Logical(channel) => self.get_logical(channel), 144 | } 145 | } 146 | 147 | pub fn get_logical(&self, channel: LogicalChannel) -> &Sender { 148 | self.logical_send.get(channel) 149 | } 150 | 151 | pub async fn send(&self, message: impl Into) -> Result<(), WriteError> { 152 | let message: Message = message.into(); 153 | let channel = message.channel(); 154 | self.get(channel).send_message(&message).await?; 155 | trace!(%message, ch=%channel.fmt_short(), "sent"); 156 | Ok(()) 157 | } 158 | } 159 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing to Iroh 2 | 3 | We'd love for you to contribute to our source code to make Iroh even better! 4 | 5 | When contributing to Iroh, you are expected to follow our [Code of Conduct][coc]. 6 | 7 | Here are some of the ways in which you can contribute: 8 | 9 | ## Discussions 10 | 11 | If you want to ask a question to understand a concept regarding Iroh, or need help working with Iroh, please check the [Discussions][discussions]. If you don't find a thread that fits your needs, feel free to create a new one. 12 | 13 | ## Issues 14 | 15 | If you found unexpected behavior while using Iroh, please browse our existing [issues][issues]. If no issues fit your case, [create a new one][newissue]. 16 | 17 | If you would like to suggest a new feature in Iroh, [create a new issue][newissue]. This helps have meaningful conversations about design, feasibility, and general expectations of how a feature would work. If you plan to work on this yourself, we ask you to state this as well, so that you receive the guidance you need. 18 | 19 | ## Pull requests 20 | 21 | Code contributions to Iroh are greatly appreciated. Here is the general workflow you should follow: 22 | 23 | 1. **State in the associated issue your desire to work on it** 24 | 25 | If there is no issue for the work you would like to do, please open one. This helps reduce duplicated efforts and give contributors the help and guidance they might need. 26 | 27 | 2. **Write some code!** 28 | 29 | If this is your first contribution to Iroh, you will need to [fork][forkiroh] and clone it using git. If you need help with the code you are working on, don't hesitate to ask questions in the associated issue. We will be happy to help you. 30 | 31 | 3. **Open the pull request** 32 | 33 | In general, pull requests should be opened as [a draft][draftprs]. This way, the team and community can know what work is being done, and reviewers can give early pointers on the work you are doing. Additionally we ask you to follow these guidelines: 34 | 35 | - **General code guidelines** 36 | 37 | - When possible, please document relevant pieces of code following the [rust documentation conventions][docconventions]. For more information on how the rust documentation system works check the [rustdoc documentation][rustdoc]. 38 | - Comment your code. It will be useful for your reviewer and future contributors. 39 | 40 | - **Pull request titles** 41 | 42 | - Iroh pull requests titles look like this: `type(crate): description` 43 | 44 | | **`type`** | **When to use** | 45 | |--: |-- | 46 | | `feat` | A new feature | 47 | | `test` | Changes that exclusively affect tests, either by adding new ones or correcting existing ones | 48 | | `fix` | A bug fix | 49 | | `docs` | Documentation only changes | 50 | | `refactor` | A code change that neither fixes a bug nor adds a feature | 51 | | `perf` | A code change that improves performance | 52 | | `deps` | Dependency only updates | 53 | | `chore` | Changes to the build process or auxiliary tools and libraries | 54 | 55 | 56 | **`crate`** is the rust crate containing your changes. 57 | 58 | **`description`** is a short sentence that summarizes your changes. 59 | 60 | If there is a breaking change please use a `!` in the commit message to denote this, eg. `feat(iroh)!: break the world`. 61 | 62 | - **Pull request descriptions** 63 | 64 | Once you open a pull request, you will be prompted to follow a template with three simple parts: 65 | 66 | - **Description** 67 | 68 | A summary of what your pull request achieves and a rough list of changes. 69 | 70 | - **Breaking Changes** 71 | 72 | Optional, if there are any breaking changes document them, including how to migrate older code. 73 | 74 | - **Notes & open questions** 75 | 76 | Notes, open questions and remarks about your changes. 77 | 78 | - **Checklist** 79 | 80 | - **Self review**: We ask you to thoroughly review your changes until you are happy with them. This helps speed up the review process. 81 | - **Add documentation**: If your change requires documentation updates, make sure they are properly added. 82 | - **Tests**: If your code creates a new feature, when possible add tests for this. If they fix a bug, a regression test is recommended as well. 83 | - **Breaking Changes**: All breaking changes need to be documented. 84 | 85 | 86 | 4. **Review process** 87 | 88 | - Mark your pull request as ready for review. 89 | - If a team member in particular is guiding you, feel free to directly tag them in your pull request to get a review. Otherwise, wait for someone to pick it up. 90 | - Attend to constructive criticism and make changes when necessary. 91 | 92 | 5. **My code is ready to be merged!** 93 | 94 | Congratulations on becoming an official Iroh contributor! 95 | 96 | [coc]: https://github.com/n0-computer/iroh/blob/main/code_of_conduct.md 97 | [discussions]: https://github.com/n0-computer/iroh/discussions 98 | [issues]: https://github.com/n0-computer/iroh/issues?q=is%3Aissue+is%3Aopen+sort%3Aupdated-desc 99 | [newissue]: https://github.com/n0-computer/iroh/issues/new 100 | [forkiroh]: https://github.com/n0-computer/iroh/fork 101 | [draftprs]: https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/proposing-changes-to-your-work-with-pull-requests/about-pull-requests#draft-pull-requests 102 | [rustdoc]: https://doc.rust-lang.org/rustdoc/how-to-write-documentation.html 103 | [docconventions]: https://rust-lang.github.io/rfcs/1574-more-api-documentation-conventions.html#appendix-a-full-conventions-text 104 | -------------------------------------------------------------------------------- /src/proto/wgps/channels.rs: -------------------------------------------------------------------------------- 1 | use serde::{Deserialize, Serialize}; 2 | use strum::{EnumCount, VariantArray}; 3 | 4 | use super::messages::Message; 5 | 6 | #[derive(Debug, Copy, Clone, Eq, PartialEq, Hash, derive_more::TryFrom)] 7 | pub enum Channel { 8 | Control, 9 | Logical(LogicalChannel), 10 | } 11 | 12 | impl Channel { 13 | pub const COUNT: usize = LogicalChannel::COUNT + 1; 14 | 15 | pub fn all() -> [Channel; LogicalChannel::COUNT + 1] { 16 | // TODO: do this without allocation 17 | // https://users.rust-lang.org/t/how-to-concatenate-array-literals-in-compile-time/21141/3 18 | [Self::Control] 19 | .into_iter() 20 | .chain(LogicalChannel::VARIANTS.iter().copied().map(Self::Logical)) 21 | .collect::>() 22 | .try_into() 23 | .expect("static length") 24 | } 25 | 26 | pub fn fmt_short(&self) -> &'static str { 27 | match self { 28 | Channel::Control => "Ctl", 29 | Channel::Logical(ch) => ch.fmt_short(), 30 | } 31 | } 32 | 33 | pub fn id(&self) -> u8 { 34 | match self { 35 | Channel::Control => 0, 36 | Channel::Logical(ch) => ch.id(), 37 | } 38 | } 39 | 40 | pub fn from_id(id: u8) -> Result { 41 | match id { 42 | 0 => Ok(Self::Control), 43 | _ => { 44 | let ch = LogicalChannel::from_id(id)?; 45 | Ok(Self::Logical(ch)) 46 | } 47 | } 48 | } 49 | } 50 | 51 | /// The different logical channels employed by the WGPS. 52 | #[derive( 53 | Debug, 54 | Serialize, 55 | Deserialize, 56 | Copy, 57 | Clone, 58 | Eq, 59 | PartialEq, 60 | Hash, 61 | strum::EnumIter, 62 | strum::VariantArray, 63 | strum::EnumCount, 64 | )] 65 | pub enum LogicalChannel { 66 | /// Logical channel for controlling the binding of new IntersectionHandles. 67 | Intersection, 68 | /// Logical channel for controlling the binding of new CapabilityHandles. 69 | Capability, 70 | /// Logical channel for controlling the binding of new AreaOfInterestHandles. 71 | AreaOfInterest, 72 | /// Logical channel for controlling the binding of new StaticTokenHandles. 73 | StaticToken, 74 | /// Logical channel for performing 3d range-based set reconciliation. 75 | Reconciliation, 76 | /// Logical channel for transmitting Entries and Payloads outside of 3d range-based set reconciliation. 77 | Data, 78 | // /// Logical channel for controlling the binding of new PayloadRequestHandles. 79 | // PayloadRequest, 80 | } 81 | 82 | #[derive(Debug, thiserror::Error)] 83 | #[error("invalid channel id")] 84 | pub struct InvalidChannelId; 85 | 86 | impl LogicalChannel { 87 | pub fn all() -> [LogicalChannel; LogicalChannel::COUNT] { 88 | LogicalChannel::VARIANTS 89 | .try_into() 90 | .expect("statically checked") 91 | } 92 | pub fn fmt_short(&self) -> &'static str { 93 | match self { 94 | LogicalChannel::Intersection => "Pai", 95 | LogicalChannel::Reconciliation => "Rec", 96 | LogicalChannel::StaticToken => "StT", 97 | LogicalChannel::Capability => "Cap", 98 | LogicalChannel::AreaOfInterest => "AoI", 99 | LogicalChannel::Data => "Dat", 100 | } 101 | } 102 | 103 | pub fn from_id(id: u8) -> Result { 104 | match id { 105 | 2 => Ok(Self::Intersection), 106 | 3 => Ok(Self::AreaOfInterest), 107 | 4 => Ok(Self::Capability), 108 | 5 => Ok(Self::StaticToken), 109 | 6 => Ok(Self::Reconciliation), 110 | 7 => Ok(Self::Data), 111 | _ => Err(InvalidChannelId), 112 | } 113 | } 114 | 115 | pub fn id(&self) -> u8 { 116 | match self { 117 | LogicalChannel::Intersection => 2, 118 | LogicalChannel::AreaOfInterest => 3, 119 | LogicalChannel::Capability => 4, 120 | LogicalChannel::StaticToken => 5, 121 | LogicalChannel::Reconciliation => 6, 122 | LogicalChannel::Data => 7, 123 | } 124 | } 125 | } 126 | 127 | impl Message { 128 | pub fn channel(&self) -> Channel { 129 | match self { 130 | Message::PaiBindFragment(_) | Message::PaiReplyFragment(_) => { 131 | Channel::Logical(LogicalChannel::Intersection) 132 | } 133 | 134 | Message::SetupBindReadCapability(_) => Channel::Logical(LogicalChannel::Capability), 135 | Message::SetupBindAreaOfInterest(_) => Channel::Logical(LogicalChannel::AreaOfInterest), 136 | Message::SetupBindStaticToken(_) => Channel::Logical(LogicalChannel::StaticToken), 137 | 138 | Message::ReconciliationSendFingerprint(_) 139 | | Message::ReconciliationAnnounceEntries(_) 140 | | Message::ReconciliationSendEntry(_) 141 | | Message::ReconciliationSendPayload(_) 142 | | Message::ReconciliationTerminatePayload(_) => { 143 | Channel::Logical(LogicalChannel::Reconciliation) 144 | } 145 | 146 | Message::DataSendEntry(_) 147 | | Message::DataSendPayload(_) 148 | | Message::DataSetMetadata(_) => Channel::Logical(LogicalChannel::Data), 149 | 150 | Message::CommitmentReveal(_) 151 | | Message::PaiRequestSubspaceCapability(_) 152 | | Message::PaiReplySubspaceCapability(_) 153 | | Message::ControlIssueGuarantee(_) 154 | | Message::ControlAbsolve(_) 155 | | Message::ControlPlead(_) 156 | | Message::ControlAnnounceDropping(_) 157 | | Message::ControlApologise(_) 158 | | Message::ControlFreeHandle(_) => Channel::Control, 159 | } 160 | } 161 | } 162 | -------------------------------------------------------------------------------- /src/session/capabilities.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | cell::RefCell, 3 | future::poll_fn, 4 | rc::Rc, 5 | task::{ready, Poll, Waker}, 6 | }; 7 | 8 | use crate::{ 9 | proto::{ 10 | keys::UserSignature, 11 | meadowcap::{ReadCapability, SubspaceCapability}, 12 | wgps::{ 13 | AccessChallenge, CapabilityHandle, ChallengeHash, CommitmentReveal, IntersectionHandle, 14 | PaiReplySubspaceCapability, SetupBindReadCapability, 15 | }, 16 | }, 17 | session::{challenge::ChallengeState, resource::ResourceMap, Error, Role}, 18 | store::traits::SecretStorage, 19 | }; 20 | 21 | #[derive(Debug, Clone)] 22 | pub struct Capabilities(Rc>); 23 | 24 | #[derive(Debug)] 25 | struct Inner { 26 | challenge: ChallengeState, 27 | ours: ResourceMap, 28 | theirs: ResourceMap, 29 | on_reveal_wakers: Vec, 30 | } 31 | 32 | impl Capabilities { 33 | pub fn new(our_nonce: AccessChallenge, received_commitment: ChallengeHash) -> Self { 34 | let challenge = ChallengeState::Committed { 35 | our_nonce, 36 | received_commitment, 37 | }; 38 | Self(Rc::new(RefCell::new(Inner { 39 | challenge, 40 | ours: Default::default(), 41 | theirs: Default::default(), 42 | on_reveal_wakers: Default::default(), 43 | }))) 44 | } 45 | 46 | // pub fn revealed(&self) -> impl Future + '_ { 47 | // std::future::poll_fn(|cx| { 48 | // let mut inner = self.0.borrow_mut(); 49 | // if inner.challenge.is_revealed() { 50 | // Poll::Ready(()) 51 | // } else { 52 | // inner.on_reveal_wakers.push(cx.waker().to_owned()); 53 | // Poll::Pending 54 | // } 55 | // }) 56 | // } 57 | 58 | pub fn is_revealed(&self) -> bool { 59 | self.0.borrow().challenge.is_revealed() 60 | } 61 | 62 | pub fn find_ours(&self, cap: &ReadCapability) -> Option { 63 | self.0.borrow().ours.find(cap) 64 | } 65 | 66 | pub fn sign_capability( 67 | &self, 68 | secret_store: &S, 69 | intersection_handle: IntersectionHandle, 70 | capability: ReadCapability, 71 | ) -> Result { 72 | let inner = self.0.borrow(); 73 | let signable = inner.challenge.signable()?; 74 | let signature = secret_store.sign_user(capability.receiver(), &signable)?; 75 | Ok(SetupBindReadCapability { 76 | capability: capability.into(), 77 | handle: intersection_handle, 78 | signature, 79 | }) 80 | } 81 | 82 | pub fn bind_ours(&self, capability: ReadCapability) -> (CapabilityHandle, bool) { 83 | self.0.borrow_mut().ours.bind_if_new(capability) 84 | } 85 | 86 | pub fn validate_and_bind_theirs( 87 | &self, 88 | capability: ReadCapability, 89 | signature: UserSignature, 90 | ) -> Result<(), Error> { 91 | // TODO(Frando): I *think* meadowcap caps are always validated (no way to construct invalid ones). 92 | // capability.validate()?; 93 | let mut inner = self.0.borrow_mut(); 94 | // TODO(Frando): We should somehow remove the `Id`/`PublicKey` split. 95 | let receiver_key = capability.receiver().into_public_key()?; 96 | inner.challenge.verify(&receiver_key, &signature)?; 97 | inner.theirs.bind(capability); 98 | Ok(()) 99 | } 100 | 101 | pub async fn get_theirs_eventually(&self, handle: CapabilityHandle) -> ReadCapability { 102 | poll_fn(|cx| { 103 | let mut inner = self.0.borrow_mut(); 104 | let cap = ready!(inner.theirs.poll_get_eventually(handle, cx)); 105 | Poll::Ready(cap.clone()) 106 | }) 107 | .await 108 | } 109 | 110 | pub fn verify_subspace_cap( 111 | &self, 112 | capability: &SubspaceCapability, 113 | signature: &UserSignature, 114 | ) -> Result<(), Error> { 115 | // TODO(Frando): I *think* meadowcap caps are always validated (no way to construct invalid ones). 116 | // capability.validate()?; 117 | // TODO(Frando): We should somehow remove the `Id`/`PublicKey` split. 118 | let receiver_key = capability.receiver().into_public_key()?; 119 | self.0 120 | .borrow_mut() 121 | .challenge 122 | .verify(&receiver_key, signature)?; 123 | Ok(()) 124 | } 125 | 126 | pub fn reveal_commitment(&self) -> Result { 127 | match self.0.borrow_mut().challenge { 128 | ChallengeState::Committed { our_nonce, .. } => { 129 | Ok(CommitmentReveal { nonce: our_nonce }) 130 | } 131 | _ => Err(Error::InvalidMessageInCurrentState), 132 | } 133 | } 134 | 135 | pub fn received_commitment_reveal( 136 | &self, 137 | our_role: Role, 138 | their_nonce: AccessChallenge, 139 | ) -> Result<(), Error> { 140 | let mut inner = self.0.borrow_mut(); 141 | inner.challenge.reveal(our_role, their_nonce)?; 142 | for waker in inner.on_reveal_wakers.drain(..) { 143 | waker.wake(); 144 | } 145 | Ok(()) 146 | } 147 | 148 | pub fn sign_subspace_capability( 149 | &self, 150 | secrets: &S, 151 | cap: SubspaceCapability, 152 | handle: IntersectionHandle, 153 | ) -> Result { 154 | let inner = self.0.borrow(); 155 | let signable = inner.challenge.signable()?; 156 | let signature = secrets.sign_user(cap.receiver(), &signable)?; 157 | let message = PaiReplySubspaceCapability { 158 | handle, 159 | capability: cap.clone().into(), 160 | signature, 161 | }; 162 | Ok(message) 163 | } 164 | } 165 | -------------------------------------------------------------------------------- /src/session/payload.rs: -------------------------------------------------------------------------------- 1 | use std::io; 2 | 3 | use bytes::Bytes; 4 | use futures_concurrency::future::TryJoin; 5 | use futures_lite::StreamExt; 6 | use futures_util::TryFutureExt; 7 | use iroh_blobs::{ 8 | store::{MapEntry, Store as PayloadStore}, 9 | Hash, HashAndFormat, TempTag, 10 | }; 11 | use iroh_io::TokioStreamReader; 12 | use tokio::sync::mpsc; 13 | use tokio_stream::wrappers::ReceiverStream; 14 | 15 | use super::Error; 16 | use crate::{ 17 | proto::{data_model::PayloadDigest, wgps::Message}, 18 | session::channels::ChannelSenders, 19 | util::pipe::chunked_pipe, 20 | }; 21 | 22 | const CHUNK_SIZE: usize = 1024 * 32; 23 | 24 | /// Send a payload in chunks. 25 | /// 26 | /// Returns `true` if the payload was sent. 27 | /// Returns `false` if blob is not found in `payload_store`. 28 | /// Returns an error if the store or sending on the `senders` return an error. 29 | // TODO: Include outboards. 30 | pub async fn send_payload_chunked( 31 | digest: PayloadDigest, 32 | payload_store: &P, 33 | senders: &ChannelSenders, 34 | offset: u64, 35 | map: impl Fn(Bytes) -> Message, 36 | ) -> Result { 37 | let hash: Hash = digest.into(); 38 | let entry = payload_store 39 | .get(&hash) 40 | .await 41 | .map_err(Error::PayloadStore)?; 42 | let Some(entry) = entry else { 43 | return Ok(false); 44 | }; 45 | 46 | let (writer, mut reader) = chunked_pipe(CHUNK_SIZE); 47 | let write_stream_fut = entry 48 | .write_verifiable_stream(offset, writer) 49 | .map_err(Error::PayloadStore); 50 | let send_fut = async { 51 | while let Some(bytes) = reader.try_next().await.map_err(Error::PayloadStore)? { 52 | let msg = map(bytes); 53 | senders.send(msg).await?; 54 | } 55 | Ok(()) 56 | }; 57 | (write_stream_fut, send_fut).try_join().await?; 58 | Ok(true) 59 | } 60 | 61 | #[derive(Debug, Default)] 62 | pub struct CurrentPayload(Option); 63 | 64 | #[derive(Debug)] 65 | struct CurrentPayloadInner { 66 | payload_digest: PayloadDigest, 67 | expected_length: u64, 68 | received_length: u64, 69 | total_length: u64, 70 | offset: u64, 71 | writer: Option, 72 | } 73 | 74 | #[derive(derive_more::Debug)] 75 | struct PayloadWriter { 76 | tag: TempTag, 77 | task: tokio::task::JoinHandle>, 78 | sender: mpsc::Sender>, 79 | } 80 | 81 | impl CurrentPayload { 82 | /// Set the payload to be received. 83 | pub fn set( 84 | &mut self, 85 | payload_digest: PayloadDigest, 86 | total_length: u64, 87 | available_length: Option, 88 | offset: Option, 89 | ) -> Result<(), Error> { 90 | if self.0.is_some() { 91 | return Err(Error::InvalidMessageInCurrentState); 92 | } 93 | let offset = offset.unwrap_or(0); 94 | let available_length = available_length.unwrap_or(total_length); 95 | let expected_length = available_length - offset; 96 | self.0 = Some(CurrentPayloadInner { 97 | payload_digest, 98 | writer: None, 99 | expected_length, 100 | total_length, 101 | offset, 102 | received_length: 0, 103 | }); 104 | Ok(()) 105 | } 106 | 107 | pub async fn recv_chunk( 108 | &mut self, 109 | store: &P, 110 | chunk: Bytes, 111 | ) -> anyhow::Result<()> { 112 | let state = self.0.as_mut().ok_or(Error::InvalidMessageInCurrentState)?; 113 | let len = chunk.len(); 114 | let store = store.clone(); 115 | let writer = state.writer.get_or_insert_with(|| { 116 | let (tx, rx) = tokio::sync::mpsc::channel(2); 117 | let store = store.clone(); 118 | let hash: Hash = state.payload_digest.into(); 119 | let total_length = state.total_length; 120 | let offset = state.offset; 121 | let tag = store.temp_tag(HashAndFormat::raw(hash)); 122 | let mut reader = 123 | TokioStreamReader(tokio_util::io::StreamReader::new(ReceiverStream::new(rx))); 124 | let fut = async move { 125 | store 126 | .import_verifiable_stream(hash, total_length, offset, &mut reader) 127 | .await?; 128 | Ok(()) 129 | }; 130 | let task = tokio::task::spawn_local(fut); 131 | PayloadWriter { 132 | tag, 133 | task, 134 | sender: tx, 135 | } 136 | }); 137 | writer.sender.send(Ok(chunk)).await?; 138 | state.received_length += len as u64; 139 | Ok(()) 140 | } 141 | 142 | pub fn is_complete(&self) -> bool { 143 | let Some(state) = self.0.as_ref() else { 144 | return false; 145 | }; 146 | state.received_length >= state.expected_length 147 | } 148 | 149 | pub async fn finalize(&mut self) -> Result<(), Error> { 150 | let state = self.0.take().ok_or(Error::InvalidMessageInCurrentState)?; 151 | // The writer is only set if we received at least one payload chunk. 152 | if let Some(writer) = state.writer { 153 | drop(writer.sender); 154 | writer 155 | .task 156 | .await 157 | .expect("payload writer panicked") 158 | .map_err(Error::PayloadStore)?; 159 | // TODO: Make sure blobs referenced from entries are protected from GC by now. 160 | drop(writer.tag); 161 | } 162 | Ok(()) 163 | } 164 | 165 | pub fn is_active(&self) -> bool { 166 | self.0.as_ref().map(|s| s.writer.is_some()).unwrap_or(false) 167 | } 168 | 169 | pub fn ensure_none(&self) -> Result<(), Error> { 170 | if self.is_active() { 171 | Err(Error::InvalidMessageInCurrentState) 172 | } else { 173 | Ok(()) 174 | } 175 | } 176 | } 177 | -------------------------------------------------------------------------------- /src/session/error.rs: -------------------------------------------------------------------------------- 1 | use ed25519_dalek::SignatureError; 2 | use tokio::sync::mpsc; 3 | 4 | use crate::{ 5 | proto::{data_model::UnauthorisedWriteError, meadowcap::UserId, wgps::ResourceHandle}, 6 | session::{pai_finder::PaiError, resource::MissingResource}, 7 | store::traits::SecretStoreError, 8 | util::channel::{ReadError, WriteError}, 9 | }; 10 | 11 | // This is a catch-all error type for the session module. 12 | // TODO: Split this into multiple error types 13 | #[derive(Debug, thiserror::Error)] 14 | pub enum Error { 15 | #[error("local store failed: {0}")] 16 | Store(#[from] anyhow::Error), 17 | #[error("authentication error: {0}")] 18 | Auth(#[from] crate::store::auth::AuthError), 19 | #[error("payload store failed: {0}")] 20 | PayloadStore(std::io::Error), 21 | #[error("payload digest does not match expected digest")] 22 | PayloadDigestMismatch, 23 | #[error("payload size does not match expected size")] 24 | PayloadSizeMismatch, 25 | #[error("local store failed: {0}")] 26 | KeyStore(#[from] SecretStoreError), 27 | #[error("failed to receive data: {0}")] 28 | Receive(#[from] ReadError), 29 | #[error("failed to send data: {0}")] 30 | Write(#[from] WriteError), 31 | #[error("wrong secret key for capability")] 32 | WrongSecretKeyForCapability, 33 | #[error("missing resource {0:?}")] 34 | MissingResource(ResourceHandle), 35 | #[error("received capability is invalid")] 36 | InvalidCapability, 37 | #[error("received capability has an invalid signature")] 38 | InvalidSignature, 39 | #[error("missing resource")] 40 | RangeOutsideCapability, 41 | #[error("received a message that is not valid in the current session state")] 42 | InvalidMessageInCurrentState, 43 | #[error("our and their area of interests refer to different namespaces")] 44 | AreaOfInterestNamespaceMismatch, 45 | #[error("our and their area of interests do not overlap")] 46 | AreaOfInterestDoesNotOverlap, 47 | #[error("received an area of interest which is not authorised")] 48 | UnauthorisedArea, 49 | #[error("received an entry which is not authorised")] 50 | UnauthorisedWrite(#[from] UnauthorisedWriteError), 51 | #[error("received an unsupported message type")] 52 | UnsupportedMessage, 53 | #[error("received a message that is intended for another channel")] 54 | WrongChannel, 55 | #[error("the received nonce does not match the received commitment")] 56 | BrokenCommittement, 57 | #[error("received an actor message for unknown session")] 58 | SessionNotFound, 59 | #[error("invalid parameters: {0}")] 60 | InvalidParameters(&'static str), 61 | #[error("reached an invalid state")] 62 | InvalidState(&'static str), 63 | #[error("actor failed to respond")] 64 | ActorFailed, 65 | #[error("missing user secret key for {0:?}")] 66 | MissingUserKey(UserId), 67 | #[error("a task failed to join")] 68 | TaskFailed(#[from] tokio::task::JoinError), 69 | #[error("no known interests for given capability")] 70 | NoKnownInterestsForCapability, 71 | #[error("private area intersection error: {0}")] 72 | Pai(#[from] PaiError), 73 | #[error("net failed: {0}")] 74 | Net(anyhow::Error), 75 | #[error("channel closed unexpectedly")] 76 | ChannelClosed, 77 | #[error("our node is shutting down")] 78 | ShuttingDown, 79 | #[error("The operation was cancelled locally")] 80 | Cancelled, 81 | #[error("Connection was closed by peer")] 82 | ConnectionClosed(#[source] anyhow::Error), 83 | #[error("Session was closed by peer")] 84 | SessionClosedByPeer, 85 | } 86 | 87 | #[derive(Debug, thiserror::Error)] 88 | #[error("channel receiver dropped")] 89 | pub struct ChannelReceiverDropped; 90 | impl From for Error { 91 | fn from(_: ChannelReceiverDropped) -> Self { 92 | Self::ChannelClosed 93 | } 94 | } 95 | 96 | // TODO: Remove likely? 97 | // Added this to be able to implement PartialEq on EventKind for tests 98 | // but many errors are not PartialEq, so we just return false for them, always 99 | impl PartialEq for Error { 100 | fn eq(&self, other: &Self) -> bool { 101 | match (self, other) { 102 | (Self::Store(_), Self::Store(_)) => false, 103 | (Self::Auth(_), Self::Auth(_)) => false, 104 | (Self::PayloadStore(_), Self::PayloadStore(_)) => false, 105 | (Self::KeyStore(_), Self::KeyStore(_)) => false, 106 | (Self::Receive(_), Self::Receive(_)) => false, 107 | (Self::Write(_), Self::Write(_)) => false, 108 | (Self::TaskFailed(_), Self::TaskFailed(_)) => false, 109 | (Self::Pai(_), Self::Pai(_)) => false, 110 | (Self::Net(_), Self::Net(_)) => false, 111 | (Self::MissingResource(l0), Self::MissingResource(r0)) => l0 == r0, 112 | (Self::InvalidParameters(l0), Self::InvalidParameters(r0)) => l0 == r0, 113 | (Self::InvalidState(l0), Self::InvalidState(r0)) => l0 == r0, 114 | (Self::MissingUserKey(l0), Self::MissingUserKey(r0)) => l0 == r0, 115 | _ => core::mem::discriminant(self) == core::mem::discriminant(other), 116 | } 117 | } 118 | } 119 | 120 | impl Eq for Error {} 121 | 122 | // impl From for Error { 123 | // fn from(_value: meadowcap::InvalidCapability) -> Self { 124 | // Self::InvalidCapability 125 | // } 126 | // } 127 | 128 | impl From for Error { 129 | fn from(_value: SignatureError) -> Self { 130 | Self::InvalidSignature 131 | } 132 | } 133 | 134 | // impl From for Error { 135 | // fn from(_value: meadowcap::InvalidParams) -> Self { 136 | // Self::InvalidParameters("") 137 | // } 138 | // } 139 | 140 | impl From for Error { 141 | fn from(value: MissingResource) -> Self { 142 | Self::MissingResource(value.0) 143 | } 144 | } 145 | 146 | impl From> for Error { 147 | fn from(_error: mpsc::error::SendError) -> Self { 148 | Self::ChannelClosed 149 | } 150 | } 151 | -------------------------------------------------------------------------------- /src/session.rs: -------------------------------------------------------------------------------- 1 | //! The `session` module contains an implementation of the Willow General Purpose Sync Protocol 2 | //! (WGPS). 3 | //! 4 | //! It exposes a few public types used to initiate sessions, and the [`intents`] module which 5 | //! contains handle, event and command types for controlling sessions. 6 | //! 7 | //! Internally, this module contains the full implementation of the protocol, which is started with 8 | //! the `run_session` function (which is not public). 9 | 10 | use std::sync::Arc; 11 | 12 | use channels::ChannelSenders; 13 | use serde::{Deserialize, Serialize}; 14 | use tokio::sync::mpsc; 15 | 16 | use crate::{ 17 | interest::Interests, 18 | session::{error::ChannelReceiverDropped, intents::Intent}, 19 | }; 20 | 21 | mod aoi_finder; 22 | mod capabilities; 23 | mod challenge; 24 | pub(crate) mod channels; 25 | mod data; 26 | mod error; 27 | pub mod intents; 28 | mod pai_finder; 29 | mod payload; 30 | mod reconciler; 31 | mod resource; 32 | mod run; 33 | mod static_tokens; 34 | 35 | pub(crate) use self::{ 36 | challenge::InitialTransmission, channels::Channels, error::Error, run::run_session, 37 | }; 38 | 39 | /// Id per session to identify store subscriptions. 40 | pub(crate) type SessionId = u64; 41 | 42 | /// To break symmetry, we refer to the peer that initiated the synchronisation session as Alfie, 43 | /// and the other peer as Betty. 44 | #[derive(Debug, Clone, Copy, Eq, PartialEq, Ord, PartialOrd)] 45 | pub enum Role { 46 | /// The peer that initiated the synchronisation session. 47 | Alfie, 48 | /// The peer that accepted the synchronisation session. 49 | Betty, 50 | } 51 | 52 | impl Role { 53 | /// Returns `true` if we initiated the session. 54 | pub fn is_alfie(&self) -> bool { 55 | matches!(self, Role::Alfie) 56 | } 57 | /// Returns `true` if we accepted the session. 58 | pub fn is_betty(&self) -> bool { 59 | matches!(self, Role::Betty) 60 | } 61 | } 62 | 63 | /// A session can either run a single reconciliation, or keep open until closed by either peer. 64 | /// 65 | /// * [`Self::Continuous`] will enable the live data channels to synchronize updates in real-time. 66 | /// * [`Self::ReconcileOnce`] will run a single reconciliation of the interests declared at session 67 | /// start, and then close the session. 68 | #[derive(Debug, Clone, Copy, Eq, PartialEq, Serialize, Deserialize)] 69 | pub enum SessionMode { 70 | /// Run a single, full reconciliation, and then quit. 71 | ReconcileOnce, 72 | /// Run reconciliations and data mode, until intentionally closed. 73 | Continuous, 74 | } 75 | 76 | impl SessionMode { 77 | /// Returns `true` if the session runs in live mode. 78 | pub fn is_live(&self) -> bool { 79 | matches!(self, Self::Continuous) 80 | } 81 | } 82 | 83 | /// Options to initialize a session. 84 | #[derive(Debug, Clone, Serialize, Deserialize)] 85 | pub struct SessionInit { 86 | /// Selects the areas we wish to synchronize. 87 | pub interests: Interests, 88 | /// Selects the session mode (once or continuous). 89 | pub mode: SessionMode, 90 | } 91 | 92 | impl SessionInit { 93 | pub fn new(interests: impl Into, mode: SessionMode) -> Self { 94 | let interests = interests.into(); 95 | Self { interests, mode } 96 | } 97 | 98 | /// Creates a new [`SessionInit`] with [`SessionMode::Continuous`]. 99 | pub fn continuous(interests: impl Into) -> Self { 100 | Self::new(interests, SessionMode::Continuous) 101 | } 102 | 103 | /// Creates a new [`SessionInit`] with [`SessionMode::ReconcileOnce`]. 104 | pub fn reconcile_once(interests: impl Into) -> Self { 105 | Self::new(interests, SessionMode::ReconcileOnce) 106 | } 107 | } 108 | 109 | /// Sender for session events 110 | #[derive(Debug, Clone)] 111 | pub(crate) struct EventSender(pub mpsc::Sender); 112 | 113 | impl EventSender { 114 | pub(crate) async fn send(&self, event: SessionEvent) -> Result<(), ChannelReceiverDropped> { 115 | self.0.send(event).await.map_err(|_| ChannelReceiverDropped) 116 | } 117 | } 118 | 119 | /// Events emitted from a session. 120 | #[derive(derive_more::Debug)] 121 | pub(crate) enum SessionEvent { 122 | Established, 123 | Complete { 124 | result: Result<(), Arc>, 125 | // TODO(Frando): Not sure if we should make use of this somewhere, maybe just remove. 126 | #[allow(unused)] 127 | we_cancelled: bool, 128 | #[debug("ChannelSenders")] 129 | senders: ChannelSenders, 130 | remaining_intents: Vec, 131 | }, 132 | } 133 | 134 | /// Update commands for an active session. 135 | #[derive(Debug)] 136 | pub(crate) enum SessionUpdate { 137 | SubmitIntent(Intent), 138 | Abort(Error), 139 | } 140 | 141 | /// Handle to an active session. 142 | /// 143 | /// This is not made public, the only public interface are [`intents`] handles. 144 | #[derive(Debug)] 145 | pub(crate) struct SessionHandle { 146 | pub(crate) update_tx: mpsc::Sender, 147 | pub(crate) event_rx: mpsc::Receiver, 148 | } 149 | 150 | impl SessionHandle { 151 | // TODO(Frando): Previously the [`SessionHandle`] was exposed through the `net` module. 152 | // Now all public interaction goes through the [`Engine`], which does not use the handle as 153 | // such, but splits into the fields. Leaving this here for the moment in case we decide to 154 | // expose the session handle (without relying on intents) publicly. 155 | 156 | /// Wait for the session to finish. 157 | /// 158 | /// Returns the channel senders and a boolean indicating if we cancelled the session. 159 | /// Returns an error if the session failed to complete. 160 | #[cfg(test)] 161 | pub(crate) async fn complete(&mut self) -> Result<(ChannelSenders, bool), Arc> { 162 | while let Some(event) = self.event_rx.recv().await { 163 | if let SessionEvent::Complete { 164 | result, 165 | senders, 166 | we_cancelled, 167 | .. 168 | } = event 169 | { 170 | return result.map(|()| (senders, we_cancelled)); 171 | } 172 | } 173 | Err(Arc::new(Error::ActorFailed)) 174 | } 175 | } 176 | -------------------------------------------------------------------------------- /src/proto/pai.rs: -------------------------------------------------------------------------------- 1 | //! Primitives for [Private Area Intersection] 2 | //! 3 | //! * Uses ristretto255 and SHA512 for `hash_into_group`. 4 | //! 5 | //! TODO: Use edwards25519 with [RFC 9380] instead. 6 | //! 7 | //! [Private Area Intersection]: https://willowprotocol.org/specs/pai/index.html 8 | //! [RFC 9380]: https://www.rfc-editor.org/rfc/rfc9380 9 | 10 | use curve25519_dalek::{ristretto::CompressedRistretto, RistrettoPoint, Scalar}; 11 | use ufotofu::sync::consumer::IntoVec; 12 | use willow_encoding::sync::Encodable; 13 | 14 | use crate::proto::{ 15 | data_model::{NamespaceId, Path, SubspaceId}, 16 | grouping::AreaSubspace, 17 | }; 18 | 19 | type ReadCapability = super::meadowcap::McCapability; 20 | 21 | #[derive(Debug, Clone, Copy, Eq, PartialEq)] 22 | pub struct PsiGroup(RistrettoPoint); 23 | 24 | #[derive(Debug, thiserror::Error)] 25 | #[error("Invalid Psi Group")] 26 | pub struct InvalidPsiGroup; 27 | 28 | impl PsiGroup { 29 | pub fn from_bytes(bytes: [u8; 32]) -> Result { 30 | let compressed = CompressedRistretto(bytes); 31 | let uncompressed = compressed.decompress().ok_or(InvalidPsiGroup)?; 32 | Ok(Self(uncompressed)) 33 | } 34 | 35 | pub fn to_bytes(self) -> [u8; 32] { 36 | self.0.compress().0 37 | } 38 | } 39 | 40 | #[derive(Debug, Clone, Copy, Eq, PartialEq)] 41 | pub struct PsiScalar(Scalar); 42 | 43 | #[derive(Debug)] 44 | pub struct PaiScheme; 45 | 46 | impl PaiScheme { 47 | pub fn hash_into_group(fragment: &Fragment) -> PsiGroup { 48 | let encoded = { 49 | let mut consumer = IntoVec::::new(); 50 | fragment 51 | .encode(&mut consumer) 52 | .expect("encoding not to fail"); 53 | consumer.into_vec() 54 | }; 55 | let point = RistrettoPoint::hash_from_bytes::(&encoded); 56 | PsiGroup(point) 57 | } 58 | 59 | pub fn get_scalar() -> PsiScalar { 60 | PsiScalar(Scalar::random(&mut rand::thread_rng())) 61 | } 62 | 63 | pub fn scalar_mult(group: PsiGroup, scalar: PsiScalar) -> PsiGroup { 64 | PsiGroup(group.0 * scalar.0) 65 | } 66 | 67 | pub fn is_group_equal(a: &PsiGroup, b: &PsiGroup) -> bool { 68 | a == b 69 | } 70 | 71 | pub fn get_fragment_kit(cap: &ReadCapability) -> FragmentKit { 72 | let granted_area = cap.granted_area(); 73 | let granted_namespace = cap.granted_namespace(); 74 | let granted_path = granted_area.path().clone(); 75 | 76 | match granted_area.subspace() { 77 | AreaSubspace::Any => FragmentKit::Complete(*granted_namespace, granted_path), 78 | AreaSubspace::Id(granted_subspace) => { 79 | FragmentKit::Selective(*granted_namespace, *granted_subspace, granted_path) 80 | } 81 | } 82 | } 83 | } 84 | 85 | #[derive(Debug, Clone)] 86 | pub enum Fragment { 87 | Pair(FragmentPair), 88 | Triple(FragmentTriple), 89 | } 90 | 91 | impl Fragment { 92 | pub fn into_parts(self) -> (NamespaceId, AreaSubspace, Path) { 93 | match self { 94 | Fragment::Pair((namespace_id, path)) => (namespace_id, AreaSubspace::Any, path), 95 | Fragment::Triple((namespace_id, subspace_id, path)) => { 96 | (namespace_id, AreaSubspace::Id(subspace_id), path) 97 | } 98 | } 99 | } 100 | } 101 | 102 | pub type FragmentTriple = (NamespaceId, SubspaceId, Path); 103 | 104 | pub type FragmentPair = (NamespaceId, Path); 105 | 106 | #[derive(Debug, Clone, Copy)] 107 | pub enum FragmentKind { 108 | Primary, 109 | Secondary, 110 | } 111 | 112 | impl FragmentKind { 113 | pub fn is_secondary(&self) -> bool { 114 | matches!(self, FragmentKind::Secondary) 115 | } 116 | } 117 | 118 | #[derive(Debug, Clone)] 119 | pub enum FragmentSet { 120 | Complete(Vec), 121 | Selective { 122 | primary: Vec, 123 | secondary: Vec, 124 | }, 125 | } 126 | 127 | #[derive(Debug)] 128 | pub enum FragmentKit { 129 | Complete(NamespaceId, Path), 130 | Selective(NamespaceId, SubspaceId, Path), 131 | } 132 | 133 | impl FragmentKit { 134 | pub fn into_fragment_set(self) -> FragmentSet { 135 | match self { 136 | FragmentKit::Complete(namespace_id, path) => { 137 | let pairs = path 138 | .all_prefixes() 139 | .map(|prefix| (namespace_id, prefix)) 140 | .collect(); 141 | FragmentSet::Complete(pairs) 142 | } 143 | FragmentKit::Selective(namespace_id, subspace_id, path) => { 144 | let primary = path 145 | .all_prefixes() 146 | .map(|prefix| (namespace_id, subspace_id, prefix)) 147 | .collect(); 148 | let secondary = path 149 | .all_prefixes() 150 | .map(|prefix| (namespace_id, prefix)) 151 | .collect(); 152 | FragmentSet::Selective { primary, secondary } 153 | } 154 | } 155 | } 156 | } 157 | 158 | use syncify::{syncify, syncify_replace}; 159 | 160 | #[syncify(encoding_sync)] 161 | mod encoding { 162 | #[syncify_replace(use ufotofu::sync::BulkConsumer;)] 163 | use ufotofu::local_nb::BulkConsumer; 164 | #[syncify_replace(use willow_encoding::sync::Encodable;)] 165 | use willow_encoding::Encodable; 166 | 167 | use super::*; 168 | 169 | impl Encodable for Fragment { 170 | async fn encode(&self, consumer: &mut Consumer) -> Result<(), Consumer::Error> 171 | where 172 | Consumer: BulkConsumer, 173 | { 174 | match self { 175 | Fragment::Pair((namespace_id, path)) => { 176 | namespace_id.encode(consumer).await?; 177 | path.encode(consumer).await?; 178 | } 179 | Fragment::Triple((namespace_id, subspace_id, path)) => { 180 | namespace_id.encode(consumer).await?; 181 | subspace_id.encode(consumer).await?; 182 | path.encode(consumer).await?; 183 | } 184 | } 185 | Ok(()) 186 | } 187 | } 188 | } 189 | -------------------------------------------------------------------------------- /src/form.rs: -------------------------------------------------------------------------------- 1 | //! Structs that allow constructing entries and other structs where some fields may be 2 | //! automatically filled. 3 | 4 | use std::{io, path::PathBuf}; 5 | 6 | use bytes::Bytes; 7 | use futures_lite::Stream; 8 | use iroh_blobs::{ 9 | store::{ImportMode, MapEntry}, 10 | util::progress::IgnoreProgressSender, 11 | BlobFormat, Hash, 12 | }; 13 | use serde::{Deserialize, Serialize}; 14 | use tokio::io::AsyncRead; 15 | 16 | use crate::proto::{ 17 | data_model::{Entry, NamespaceId, Path, SubspaceId, Timestamp}, 18 | keys::UserId, 19 | meadowcap::{self, WriteCapability}, 20 | }; 21 | 22 | /// Sources where payload data can come from. 23 | #[derive(derive_more::Debug)] 24 | pub enum PayloadForm { 25 | /// Set the payload hash directly. The blob must exist in the node's blob store, this will fail 26 | /// otherwise. 27 | Hash(Hash), 28 | /// Set the payload hash directly. The blob must exist in the node's blob store, this will fail 29 | /// otherwise. 30 | HashUnchecked(Hash, u64), 31 | /// Import data from the provided bytes and set as payload. 32 | #[debug("Bytes({})", _0.len())] 33 | Bytes(Bytes), 34 | /// Import data from a file on the node's local file system and set as payload. 35 | File(PathBuf, ImportMode), 36 | #[debug("Stream")] 37 | /// Import data from a [`Stream`] of bytes and set as payload. 38 | Stream(Box> + Send + Sync + Unpin>), 39 | /// Import data from a [`AsyncRead`] and set as payload. 40 | #[debug("Reader")] 41 | Reader(Box), 42 | } 43 | 44 | impl PayloadForm { 45 | pub async fn submit( 46 | self, 47 | store: &S, 48 | ) -> anyhow::Result<(Hash, u64)> { 49 | let (hash, len) = match self { 50 | PayloadForm::Hash(digest) => { 51 | let entry = store.get(&digest).await?; 52 | let entry = entry.ok_or_else(|| anyhow::anyhow!("hash not foundA"))?; 53 | (digest, entry.size().value()) 54 | } 55 | PayloadForm::HashUnchecked(digest, len) => (digest, len), 56 | PayloadForm::Bytes(bytes) => { 57 | let len = bytes.len(); 58 | let temp_tag = store.import_bytes(bytes, BlobFormat::Raw).await?; 59 | (*temp_tag.hash(), len as u64) 60 | } 61 | PayloadForm::File(path, mode) => { 62 | let progress = IgnoreProgressSender::default(); 63 | let (temp_tag, len) = store 64 | .import_file(path, mode, BlobFormat::Raw, progress) 65 | .await?; 66 | (*temp_tag.hash(), len) 67 | } 68 | PayloadForm::Stream(stream) => { 69 | let progress = IgnoreProgressSender::default(); 70 | let (temp_tag, len) = store 71 | .import_stream(stream, BlobFormat::Raw, progress) 72 | .await?; 73 | (*temp_tag.hash(), len) 74 | } 75 | PayloadForm::Reader(reader) => { 76 | let progress = IgnoreProgressSender::default(); 77 | let (temp_tag, len) = store 78 | .import_reader(reader, BlobFormat::Raw, progress) 79 | .await?; 80 | (*temp_tag.hash(), len) 81 | } 82 | }; 83 | Ok((hash, len)) 84 | } 85 | } 86 | 87 | /// Either a [`Entry`] or a [`EntryForm`]. 88 | #[derive(Debug, derive_more::From)] 89 | pub enum EntryOrForm { 90 | Entry(Entry), 91 | Form(EntryForm), 92 | } 93 | 94 | /// Creates an entry while setting some fields automatically. 95 | #[derive(Debug)] 96 | pub struct EntryForm { 97 | pub namespace_id: NamespaceId, 98 | pub subspace_id: SubspaceForm, 99 | pub path: Path, 100 | pub timestamp: TimestampForm, 101 | pub payload: PayloadForm, 102 | } 103 | 104 | impl EntryForm { 105 | /// Creates a new [`EntryForm`] where the subspace is set to the user authenticating the entry, 106 | /// the timestamp is the current system time, and the payload is set to the provided [`Bytes`]. 107 | pub fn new_bytes(namespace_id: NamespaceId, path: Path, payload: impl Into) -> Self { 108 | EntryForm { 109 | namespace_id, 110 | subspace_id: SubspaceForm::User, 111 | path, 112 | timestamp: TimestampForm::Now, 113 | payload: PayloadForm::Bytes(payload.into()), 114 | } 115 | } 116 | 117 | /// Sets the subspace for the entry. 118 | pub fn subspace(mut self, subspace: SubspaceId) -> Self { 119 | self.subspace_id = SubspaceForm::Exact(subspace); 120 | self 121 | } 122 | } 123 | 124 | /// Select which capability to use for authenticating a new entry. 125 | #[derive(Debug, Clone, Serialize, Deserialize, derive_more::From)] 126 | pub enum AuthForm { 127 | /// Use any available capability which covers the entry and whose receiver is the provided 128 | /// user. 129 | Any(UserId), 130 | /// Use the provided [`WriteCapability`]. 131 | Exact(#[serde(with = "meadowcap::serde_encoding::mc_capability")] WriteCapability), 132 | } 133 | 134 | impl AuthForm { 135 | /// Get the user id of the user who is the receiver of the capability selected by this 136 | /// [`AuthForm`]. 137 | pub fn user_id(&self) -> UserId { 138 | match self { 139 | AuthForm::Any(user) => *user, 140 | AuthForm::Exact(cap) => *cap.receiver(), 141 | } 142 | } 143 | } 144 | 145 | /// Set the subspace either to a provided [`SubspaceId`], or use the user authenticating the entry 146 | /// as subspace. 147 | #[derive(Debug, Clone, Serialize, Deserialize, Default)] 148 | pub enum SubspaceForm { 149 | /// Set the subspace to the [`UserId`] of the user authenticating the entry. 150 | #[default] 151 | User, 152 | /// Set the subspace to the provided [`SubspaceId`]. 153 | Exact(SubspaceId), 154 | } 155 | 156 | /// Set the timestamp either to the provided [`Timestamp`] or to the current system time. 157 | #[derive(Debug, Clone, Serialize, Deserialize, Default)] 158 | pub enum TimestampForm { 159 | /// Set the timestamp to the current system time. 160 | #[default] 161 | Now, 162 | /// Set the timestamp to the provided value. 163 | Exact(Timestamp), 164 | } 165 | -------------------------------------------------------------------------------- /src/session/data.rs: -------------------------------------------------------------------------------- 1 | use futures_lite::StreamExt; 2 | 3 | use super::{ 4 | aoi_finder::AoiIntersection, 5 | payload::{send_payload_chunked, CurrentPayload}, 6 | }; 7 | use crate::{ 8 | proto::{ 9 | data_model::AuthorisedEntry, 10 | wgps::{DataMessage, DataSendEntry, DataSendPayload, StaticToken}, 11 | }, 12 | session::{channels::ChannelSenders, static_tokens::StaticTokens, Error, SessionId}, 13 | store::{ 14 | traits::{EntryOrigin, EntryStorage, Storage, StoreEvent, SubscribeParams}, 15 | Store, 16 | }, 17 | util::stream::CancelableReceiver, 18 | }; 19 | 20 | #[derive(Debug)] 21 | pub enum Input { 22 | AoiIntersection(AoiIntersection), 23 | } 24 | 25 | #[derive(derive_more::Debug)] 26 | pub struct DataSender { 27 | inbox: CancelableReceiver, 28 | store: Store, 29 | send: ChannelSenders, 30 | static_tokens: StaticTokens, 31 | session_id: SessionId, 32 | } 33 | 34 | impl DataSender { 35 | pub fn new( 36 | inbox: CancelableReceiver, 37 | store: Store, 38 | send: ChannelSenders, 39 | static_tokens: StaticTokens, 40 | session_id: SessionId, 41 | ) -> Self { 42 | Self { 43 | inbox, 44 | store, 45 | send, 46 | static_tokens, 47 | session_id, 48 | } 49 | } 50 | pub async fn run(mut self) -> Result<(), Error> { 51 | let mut entry_stream = futures_concurrency::stream::StreamGroup::new(); 52 | loop { 53 | tokio::select! { 54 | input = self.inbox.next() => { 55 | let Some(input) = input else { 56 | break; 57 | }; 58 | let Input::AoiIntersection(intersection) = input; 59 | let params = SubscribeParams::default().ingest_only().ignore_remote(self.session_id); 60 | // TODO: We could start at the progress id at the beginning of the session. 61 | let stream = self 62 | .store 63 | .entries() 64 | .subscribe_area( 65 | intersection.namespace, 66 | intersection.intersection.area.clone(), 67 | params, 68 | ) 69 | .filter_map(|event| match event { 70 | StoreEvent::Ingested(_id, entry, _origin) => Some(entry), 71 | // We get only Ingested events because we set ingest_only() param above. 72 | _ => unreachable!("expected only Ingested event but got another event"), 73 | }); 74 | entry_stream.insert(stream); 75 | }, 76 | entry = entry_stream.next(), if !entry_stream.is_empty() => { 77 | match entry { 78 | Some(entry) => self.send_entry(entry).await?, 79 | None => break, 80 | } 81 | } 82 | } 83 | } 84 | Ok(()) 85 | } 86 | 87 | async fn send_entry(&mut self, authorised_entry: AuthorisedEntry) -> Result<(), Error> { 88 | let (entry, token) = authorised_entry.into_parts(); 89 | let static_token: StaticToken = token.capability.into(); 90 | let dynamic_token = token.signature; 91 | // TODO: partial payloads 92 | // let available = entry.payload_length; 93 | let static_token_handle = self 94 | .static_tokens 95 | .bind_and_send_ours(static_token, &self.send) 96 | .await?; 97 | let digest = *entry.payload_digest(); 98 | let offset = 0; 99 | let msg = DataSendEntry { 100 | entry: entry.into(), 101 | static_token_handle, 102 | dynamic_token, 103 | offset, 104 | }; 105 | self.send.send(msg).await?; 106 | 107 | // TODO: only send payload if configured to do so and/or under size limit. 108 | let send_payloads = true; 109 | if send_payloads { 110 | send_payload_chunked(digest, self.store.payloads(), &self.send, offset, |bytes| { 111 | DataSendPayload { bytes }.into() 112 | }) 113 | .await?; 114 | } 115 | Ok(()) 116 | } 117 | } 118 | 119 | #[derive(derive_more::Debug)] 120 | pub struct DataReceiver { 121 | store: Store, 122 | current_payload: CurrentPayload, 123 | static_tokens: StaticTokens, 124 | session_id: SessionId, 125 | } 126 | 127 | impl DataReceiver { 128 | pub fn new(store: Store, static_tokens: StaticTokens, session_id: SessionId) -> Self { 129 | Self { 130 | store, 131 | static_tokens, 132 | session_id, 133 | current_payload: Default::default(), 134 | } 135 | } 136 | 137 | pub async fn on_message(&mut self, message: DataMessage) -> Result<(), Error> { 138 | match message { 139 | DataMessage::SendEntry(message) => self.on_send_entry(message).await?, 140 | DataMessage::SendPayload(message) => self.on_send_payload(message).await?, 141 | DataMessage::SetMetadata(_) => {} 142 | } 143 | Ok(()) 144 | } 145 | 146 | async fn on_send_entry(&mut self, message: DataSendEntry) -> Result<(), Error> { 147 | self.current_payload.ensure_none()?; 148 | let authorised_entry = self 149 | .static_tokens 150 | .authorise_entry_eventually( 151 | message.entry.into(), 152 | message.static_token_handle, 153 | message.dynamic_token, 154 | ) 155 | .await?; 156 | self.store 157 | .entries() 158 | .ingest_entry(&authorised_entry, EntryOrigin::Remote(self.session_id))?; 159 | let (entry, _token) = authorised_entry.into_parts(); 160 | // TODO: handle offset 161 | self.current_payload.set( 162 | *entry.payload_digest(), 163 | entry.payload_length(), 164 | None, 165 | Some(message.offset), 166 | )?; 167 | Ok(()) 168 | } 169 | 170 | async fn on_send_payload(&mut self, message: DataSendPayload) -> Result<(), Error> { 171 | self.current_payload 172 | .recv_chunk(self.store.payloads(), message.bytes) 173 | .await?; 174 | if self.current_payload.is_complete() { 175 | self.current_payload.finalize().await?; 176 | } 177 | Ok(()) 178 | } 179 | } 180 | -------------------------------------------------------------------------------- /src/store/persistent/tables.rs: -------------------------------------------------------------------------------- 1 | use std::time::Instant; 2 | 3 | use anyhow::Result; 4 | use ed25519_dalek::ed25519; 5 | use redb::{ 6 | MultimapTable, MultimapTableDefinition, ReadOnlyMultimapTable, ReadOnlyTable, ReadTransaction, 7 | Table, TableDefinition, WriteTransaction, 8 | }; 9 | use ufotofu::sync::{consumer::IntoVec, producer::FromSlice}; 10 | use willow_encoding::sync::{RelativeDecodable, RelativeEncodable}; 11 | 12 | use crate::proto::{ 13 | grouping::Area, 14 | meadowcap::{serde_encoding::SerdeReadAuthorisation, McCapability, ReadAuthorisation}, 15 | }; 16 | 17 | // These consts are here so we don't accidentally break the schema! 18 | pub type NamespaceId = [u8; 32]; 19 | pub type UserId = [u8; 32]; 20 | 21 | pub const NAMESPACE_NODES: TableDefinition = 22 | TableDefinition::new("namespace-nodes-0"); 23 | 24 | pub const AUTH_TOKENS: TableDefinition = 25 | TableDefinition::new("auth-tokens-0"); 26 | pub const AUTH_TOKEN_REFCOUNT: TableDefinition = 27 | TableDefinition::new("auth-token-refcounts-0"); 28 | 29 | pub const USER_SECRETS: TableDefinition = TableDefinition::new("user-secrets-0"); 30 | pub const NAMESPACE_SECRETS: TableDefinition = 31 | TableDefinition::new("namespaces-secrets-0"); 32 | 33 | pub const READ_CAPS: MultimapTableDefinition = 34 | MultimapTableDefinition::new("read-caps-0"); 35 | pub const WRITE_CAPS: MultimapTableDefinition = 36 | MultimapTableDefinition::new("write-caps-0"); 37 | 38 | self_cell::self_cell! { 39 | struct OpenWriteInner { 40 | owner: WriteTransaction, 41 | #[covariant] 42 | dependent: Tables, 43 | } 44 | } 45 | 46 | #[derive(derive_more::Debug)] 47 | pub struct OpenWrite { 48 | #[debug("OpenWriteInner")] 49 | inner: OpenWriteInner, 50 | pub since: Instant, 51 | } 52 | 53 | impl OpenWrite { 54 | pub fn new(tx: WriteTransaction) -> Result { 55 | Ok(Self { 56 | inner: OpenWriteInner::try_new(tx, |tx| Tables::new(tx))?, 57 | since: Instant::now(), 58 | }) 59 | } 60 | 61 | pub fn read(&self) -> &Tables<'_> { 62 | self.inner.borrow_dependent() 63 | } 64 | 65 | pub fn modify(&mut self, f: impl FnOnce(&mut Tables) -> Result) -> Result { 66 | self.inner.with_dependent_mut(|_, t| f(t)) 67 | } 68 | 69 | pub fn commit(self) -> Result<()> { 70 | self.inner 71 | .into_owner() 72 | .commit() 73 | .map_err(anyhow::Error::from) 74 | } 75 | } 76 | 77 | pub struct Tables<'tx> { 78 | pub namespace_nodes: Table<'tx, NamespaceId, willow_store::NodeId>, 79 | pub auth_tokens: Table<'tx, ed25519::SignatureBytes, WriteCap>, 80 | pub auth_token_refcount: Table<'tx, ed25519::SignatureBytes, u64>, 81 | pub user_secrets: Table<'tx, UserId, [u8; 32]>, 82 | pub namespace_secrets: Table<'tx, NamespaceId, [u8; 32]>, 83 | pub read_caps: MultimapTable<'tx, NamespaceId, ReadCap>, 84 | pub write_caps: MultimapTable<'tx, NamespaceId, WriteCap>, 85 | pub node_store: willow_store::Tables<'tx>, 86 | } 87 | 88 | impl<'tx> Tables<'tx> { 89 | pub fn new(tx: &'tx WriteTransaction) -> Result { 90 | Ok(Self { 91 | namespace_nodes: tx.open_table(NAMESPACE_NODES)?, 92 | auth_tokens: tx.open_table(AUTH_TOKENS)?, 93 | auth_token_refcount: tx.open_table(AUTH_TOKEN_REFCOUNT)?, 94 | user_secrets: tx.open_table(USER_SECRETS)?, 95 | namespace_secrets: tx.open_table(NAMESPACE_SECRETS)?, 96 | read_caps: tx.open_multimap_table(READ_CAPS)?, 97 | write_caps: tx.open_multimap_table(WRITE_CAPS)?, 98 | node_store: willow_store::Tables::open(tx)?, 99 | }) 100 | } 101 | } 102 | 103 | pub struct OpenRead { 104 | pub namespace_nodes: ReadOnlyTable, 105 | pub auth_tokens: ReadOnlyTable, 106 | pub read_caps: ReadOnlyMultimapTable, 107 | pub write_caps: ReadOnlyMultimapTable, 108 | pub node_store: willow_store::Snapshot, 109 | } 110 | 111 | impl OpenRead { 112 | pub fn new(tx: &ReadTransaction) -> Result { 113 | Ok(Self { 114 | namespace_nodes: tx.open_table(NAMESPACE_NODES)?, 115 | auth_tokens: tx.open_table(AUTH_TOKENS)?, 116 | read_caps: tx.open_multimap_table(READ_CAPS)?, 117 | write_caps: tx.open_multimap_table(WRITE_CAPS)?, 118 | node_store: willow_store::Snapshot::open(tx)?, 119 | }) 120 | } 121 | } 122 | 123 | #[derive(Debug)] 124 | pub struct WriteCap(pub McCapability); 125 | 126 | impl redb::Key for WriteCap { 127 | fn compare(data1: &[u8], data2: &[u8]) -> std::cmp::Ordering { 128 | data1.cmp(data2) 129 | } 130 | } 131 | 132 | impl redb::Value for WriteCap { 133 | type SelfType<'a> 134 | = Self 135 | where 136 | Self: 'a; 137 | 138 | type AsBytes<'a> 139 | = Vec 140 | where 141 | Self: 'a; 142 | 143 | fn fixed_width() -> Option { 144 | None 145 | } 146 | 147 | fn from_bytes<'a>(data: &'a [u8]) -> Self::SelfType<'a> 148 | where 149 | Self: 'a, 150 | { 151 | let capability = 152 | McCapability::relative_decode(&Area::new_full(), &mut FromSlice::new(data)).unwrap(); 153 | WriteCap(capability) 154 | } 155 | 156 | fn as_bytes<'a, 'b: 'a>(value: &'a Self::SelfType<'b>) -> Self::AsBytes<'a> 157 | where 158 | Self: 'a, 159 | Self: 'b, 160 | { 161 | let mut consumer = IntoVec::new(); 162 | value 163 | .0 164 | .relative_encode(&Area::new_full(), &mut consumer) 165 | .unwrap_or_else(|e| match e {}); // infallible 166 | consumer.into_vec() 167 | } 168 | 169 | fn type_name() -> redb::TypeName { 170 | redb::TypeName::new("WriteCap") 171 | } 172 | } 173 | 174 | #[derive(Debug)] 175 | #[repr(transparent)] 176 | pub struct ReadCap(pub ReadAuthorisation); 177 | 178 | impl redb::Key for ReadCap { 179 | fn compare(data1: &[u8], data2: &[u8]) -> std::cmp::Ordering { 180 | data1.cmp(data2) 181 | } 182 | } 183 | 184 | impl redb::Value for ReadCap { 185 | type SelfType<'a> 186 | = Self 187 | where 188 | Self: 'a; 189 | 190 | type AsBytes<'a> 191 | = Vec 192 | where 193 | Self: 'a; 194 | 195 | fn fixed_width() -> Option { 196 | None 197 | } 198 | 199 | fn from_bytes<'a>(data: &'a [u8]) -> Self::SelfType<'a> 200 | where 201 | Self: 'a, 202 | { 203 | let capability: SerdeReadAuthorisation = postcard::from_bytes(data).unwrap(); 204 | ReadCap(capability.0) 205 | } 206 | 207 | fn as_bytes<'a, 'b: 'a>(value: &'a Self::SelfType<'b>) -> Self::AsBytes<'a> 208 | where 209 | Self: 'a, 210 | Self: 'b, 211 | { 212 | // TODO(matheus23): Fewer clones. 213 | postcard::to_stdvec(&SerdeReadAuthorisation(value.0.clone())).unwrap() 214 | } 215 | 216 | fn type_name() -> redb::TypeName { 217 | redb::TypeName::new("ReadCap") 218 | } 219 | } 220 | -------------------------------------------------------------------------------- /src/engine.rs: -------------------------------------------------------------------------------- 1 | //! Engine for driving a willow store and synchronisation sessions. 2 | 3 | use std::sync::{Arc, OnceLock}; 4 | 5 | use anyhow::Result; 6 | use futures_lite::future::Boxed; 7 | use futures_util::{ 8 | future::{MapErr, Shared}, 9 | FutureExt, TryFutureExt, 10 | }; 11 | use iroh::{endpoint::Connection, protocol::ProtocolHandler, Endpoint, NodeId}; 12 | use tokio::{ 13 | sync::{mpsc, oneshot}, 14 | task::JoinError, 15 | }; 16 | use tokio_util::task::AbortOnDropHandle; 17 | use tracing::{debug, error, error_span, Instrument}; 18 | 19 | use crate::{ 20 | rpc::{client::MemClient, handler::RpcHandler}, 21 | session::{ 22 | intents::{Intent, IntentHandle}, 23 | SessionInit, 24 | }, 25 | store::traits::Storage, 26 | }; 27 | 28 | mod actor; 29 | mod peer_manager; 30 | 31 | use self::peer_manager::PeerManager; 32 | pub use self::{actor::ActorHandle, peer_manager::AcceptOpts}; 33 | 34 | const PEER_MANAGER_INBOX_CAP: usize = 128; 35 | 36 | /// The [`Engine`] is the main handle onto a Willow store with networking. 37 | /// 38 | /// It runs a dedicated thread for all storage operations, and a peer manager to coordinate network 39 | /// connections to other peers. 40 | /// 41 | /// The engine does not establish any peer connections on its own. Synchronisation sessions can be 42 | /// started with [`Engine::sync_with_peer`]. 43 | #[derive(Debug, Clone)] 44 | pub struct Engine { 45 | actor_handle: ActorHandle, 46 | pub(crate) endpoint: Endpoint, 47 | peer_manager_inbox: mpsc::Sender, 48 | // `Engine` needs to be `Clone + Send`, and we need to `task.await` in its `shutdown()` impl. 49 | // So we need 50 | // - `Shared` so we can `task.await` from all `Node` clones 51 | // - `MapErr` to map the `JoinError` to a `String`, because `JoinError` is `!Clone` 52 | // - `AbortOnDropHandle` to make sure that the `task` is cancelled when all `Node`s are dropped 53 | // (`Shared` acts like an `Arc` around its inner future). 54 | peer_manager_task: Shared>, JoinErrToStr>>, 55 | rpc_handler: Arc>, 56 | } 57 | 58 | pub(crate) type JoinErrToStr = Box String + Send + Sync + 'static>; 59 | 60 | impl Engine { 61 | /// Get an in memory client to interact with the willow engine. 62 | pub fn client(&self) -> &MemClient { 63 | &self 64 | .rpc_handler 65 | .get_or_init(|| RpcHandler::new(self.clone())) 66 | .client 67 | } 68 | 69 | /// Start the Willow engine. 70 | /// 71 | /// This needs an `endpoint` to connect to other peers, and a `create_store` closure which 72 | /// returns a [`Storage`] instance. 73 | /// 74 | /// You also need to pass [`AcceptOpts`] to configure what to do with incoming connections. 75 | /// Its default implementation will accept all connections and run sync with all our interests. 76 | /// 77 | /// To actually accept connections, an [`Endpoint::accept`] loop has to be run outside of the 78 | /// engine, passing all connections that match [`crate::net::ALPN`] to the engine with 79 | /// [`Engine::handle_connection`]. 80 | /// 81 | /// The engine will spawn a dedicated storage thread, and the `create_store` closure will be called on 82 | /// this thread, so that the [`Storage`] does not have to be `Send`. 83 | pub fn spawn( 84 | endpoint: Endpoint, 85 | create_store: impl 'static + Send + FnOnce() -> S, 86 | accept_opts: AcceptOpts, 87 | ) -> Self { 88 | let me = endpoint.node_id(); 89 | let actor_handle = ActorHandle::spawn(create_store, me); 90 | let (pm_inbox_tx, pm_inbox_rx) = mpsc::channel(PEER_MANAGER_INBOX_CAP); 91 | let peer_manager = PeerManager::new( 92 | actor_handle.clone(), 93 | endpoint.clone(), 94 | pm_inbox_rx, 95 | accept_opts, 96 | ); 97 | let peer_manager_task = tokio::task::spawn( 98 | async move { peer_manager.run().await.map_err(|e| e.to_string()) } 99 | .instrument(error_span!("peer_manager", me=%me.fmt_short())), 100 | ); 101 | let peer_manager_task = AbortOnDropHandle::new(peer_manager_task) 102 | .map_err(Box::new(|e: JoinError| e.to_string()) as JoinErrToStr) 103 | .shared(); 104 | Engine { 105 | actor_handle, 106 | endpoint, 107 | peer_manager_inbox: pm_inbox_tx, 108 | peer_manager_task, 109 | rpc_handler: Default::default(), 110 | } 111 | } 112 | 113 | /// Handle an incoming connection. 114 | pub async fn handle_connection(&self, conn: Connection) -> Result<()> { 115 | self.peer_manager_inbox 116 | .send(peer_manager::Input::HandleConnection { conn }) 117 | .await?; 118 | Ok(()) 119 | } 120 | 121 | /// Synchronises with a peer. 122 | /// 123 | /// Will try to establish a connection to `peer` if there is none already, and then open a 124 | /// synchronisation session. 125 | /// 126 | /// `init` contains the initialisation options for this synchronisation intent. 127 | /// 128 | /// Returns an [`IntentHandle`] which receives events and can submit updates into the session. 129 | /// 130 | /// This can freely be called multiple times for the same peer. The engine will merge the 131 | /// intents and make sure that only a single session is opened per peer. 132 | pub async fn sync_with_peer(&self, peer: NodeId, init: SessionInit) -> Result { 133 | let (intent, handle) = Intent::new(init); 134 | self.peer_manager_inbox 135 | .send(peer_manager::Input::SubmitIntent { peer, intent }) 136 | .await?; 137 | Ok(handle) 138 | } 139 | 140 | /// Shutdown the engine. 141 | /// 142 | /// This will try to close all connections gracefully for up to 10 seconds, 143 | /// and abort them otherwise. 144 | pub async fn shutdown(&self) -> Result<()> { 145 | debug!("shutdown engine"); 146 | let (reply, reply_rx) = oneshot::channel(); 147 | self.peer_manager_inbox 148 | .send(peer_manager::Input::Shutdown { reply }) 149 | .await?; 150 | reply_rx.await?; 151 | let res = self.peer_manager_task.clone().await; 152 | match res { 153 | Err(err) => error!(?err, "peer manager task panicked"), 154 | Ok(Err(err)) => error!(?err, "peer manager task failed"), 155 | Ok(Ok(())) => {} 156 | }; 157 | debug!("shutdown engine: peer manager terminated"); 158 | self.actor_handle.shutdown().await?; 159 | debug!("shutdown engine: willow actor terminated"); 160 | Ok(()) 161 | } 162 | } 163 | 164 | impl std::ops::Deref for Engine { 165 | type Target = ActorHandle; 166 | 167 | fn deref(&self) -> &Self::Target { 168 | &self.actor_handle 169 | } 170 | } 171 | 172 | impl ProtocolHandler for Engine { 173 | fn accept(&self, conn: Connection) -> Boxed> { 174 | let this = self.clone(); 175 | async move { this.handle_connection(conn).await }.boxed() 176 | } 177 | 178 | fn shutdown(&self) -> Boxed<()> { 179 | let this = self.clone(); 180 | async move { 181 | crate::engine::Engine::shutdown(&this).await.ok(); 182 | } 183 | .boxed() 184 | } 185 | } 186 | -------------------------------------------------------------------------------- /src/store/willow_store_glue.rs: -------------------------------------------------------------------------------- 1 | //! Code required for willow-rs and willow-store to interface together. 2 | 3 | use std::fmt::Display; 4 | 5 | use anyhow::Result; 6 | use ed25519_dalek::ed25519; 7 | use iroh_blobs::Hash; 8 | use willow_data_model::grouping::{Range, RangeEnd}; 9 | use willow_store::{ 10 | BlobSeq, BlobSeqRef, FixedSize, IsLowerBound, KeyParams, LowerBound, Point, QueryRange, 11 | QueryRange3d, TreeParams, 12 | }; 13 | 14 | use crate::proto::{ 15 | data_model::{ 16 | AuthorisationToken, AuthorisedEntry, Component, Entry, NamespaceId, Path, PayloadDigest, 17 | SubspaceId, Timestamp, 18 | }, 19 | grouping::Range3d, 20 | wgps::Fingerprint, 21 | }; 22 | 23 | #[derive( 24 | Debug, 25 | Clone, 26 | Copy, 27 | PartialEq, 28 | Eq, 29 | zerocopy_derive::FromBytes, 30 | zerocopy_derive::AsBytes, 31 | zerocopy_derive::FromZeroes, 32 | )] 33 | #[repr(C, packed)] 34 | pub(crate) struct StoredAuthorisedEntry { 35 | pub(crate) authorisation_token_id: ed25519::SignatureBytes, 36 | pub(crate) payload_digest: [u8; 32], 37 | pub(crate) payload_size: u64, 38 | } 39 | 40 | impl FixedSize for StoredAuthorisedEntry { 41 | const SIZE: usize = std::mem::size_of::(); 42 | } 43 | 44 | impl StoredAuthorisedEntry { 45 | pub fn from_authorised_entry(entry: &AuthorisedEntry) -> (Point, Self) { 46 | let point = willow_store::Point::::new( 47 | entry.entry().subspace_id(), 48 | &StoredTimestamp::new(entry.entry().timestamp()), 49 | &path_to_blobseq(entry.entry().path()), 50 | ); 51 | let entry = Self { 52 | authorisation_token_id: entry.token().signature.to_bytes(), 53 | payload_digest: *entry.entry().payload_digest().0.as_bytes(), 54 | payload_size: entry.entry().payload_length(), 55 | }; 56 | (point, entry) 57 | } 58 | 59 | pub fn into_authorised_entry( 60 | self, 61 | namespace: NamespaceId, 62 | key: &Point, 63 | auth_token: AuthorisationToken, 64 | ) -> Result { 65 | Ok(AuthorisedEntry::new( 66 | self.into_entry(namespace, key)?, 67 | auth_token, 68 | )?) 69 | } 70 | 71 | pub fn into_entry( 72 | self, 73 | namespace: NamespaceId, 74 | key: &Point, 75 | ) -> Result { 76 | let subspace = key.x(); 77 | let timestamp = key.y(); 78 | let blobseq = key.z().to_owned(); 79 | let path = blobseq_to_path(&blobseq)?; 80 | Ok(Entry::new( 81 | namespace, 82 | *subspace, 83 | path, 84 | timestamp.timestamp(), 85 | self.payload_size, 86 | PayloadDigest(Hash::from_bytes(self.payload_digest)), 87 | )) 88 | } 89 | } 90 | 91 | /// A newtype around memory that represents a timestamp. 92 | /// 93 | /// This newtype is needed to avoid alignment issues. 94 | #[derive( 95 | Clone, 96 | Copy, 97 | Debug, 98 | PartialEq, 99 | Eq, 100 | PartialOrd, 101 | Ord, 102 | zerocopy_derive::FromBytes, 103 | zerocopy_derive::AsBytes, 104 | zerocopy_derive::FromZeroes, 105 | )] 106 | #[repr(C, packed)] 107 | pub(crate) struct StoredTimestamp([u8; 8]); 108 | 109 | impl LowerBound for StoredTimestamp { 110 | fn min_value() -> Self { 111 | Self([0u8; 8]) 112 | } 113 | } 114 | 115 | impl IsLowerBound for StoredTimestamp { 116 | fn is_min_value(&self) -> bool { 117 | self.0 == [0u8; 8] 118 | } 119 | } 120 | 121 | impl FixedSize for StoredTimestamp { 122 | const SIZE: usize = std::mem::size_of::(); 123 | } 124 | 125 | impl Display for StoredTimestamp { 126 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 127 | self.timestamp().fmt(f) 128 | } 129 | } 130 | 131 | // The `StoredTimestamp` needs to be big-endian so the derived 132 | // `Ord` instance on the inner [u8; 8] matches the ord instance 133 | // of the equivalent u64. 134 | // See also the associated proptest in this module. 135 | impl StoredTimestamp { 136 | pub(crate) fn new(ts: Timestamp) -> Self { 137 | Self(ts.to_be_bytes()) 138 | } 139 | 140 | pub(crate) fn timestamp(&self) -> Timestamp { 141 | u64::from_be_bytes(self.0) 142 | } 143 | } 144 | 145 | #[derive(Debug, Default, Clone, Copy, Ord, PartialOrd, PartialEq, Eq)] 146 | pub(crate) struct IrohWillowParams; 147 | 148 | impl TreeParams for IrohWillowParams { 149 | type V = StoredAuthorisedEntry; 150 | type M = Fingerprint; 151 | } 152 | 153 | impl KeyParams for IrohWillowParams { 154 | type X = SubspaceId; 155 | type Y = StoredTimestamp; 156 | type ZOwned = BlobSeq; 157 | type Z = BlobSeqRef; 158 | } 159 | 160 | pub(crate) fn path_to_blobseq(path: &Path) -> BlobSeq { 161 | let path_bytes = path 162 | .components() 163 | .map(|component| component.to_vec()) 164 | .collect::>(); 165 | 166 | BlobSeq::from(path_bytes) 167 | } 168 | 169 | pub(crate) fn blobseq_to_path(blobseq: &BlobSeq) -> Result { 170 | let components = blobseq 171 | .components() 172 | .map(|c| { 173 | Component::new(c) 174 | .ok_or_else(|| anyhow::anyhow!("Path component exceeded length restriction")) 175 | }) 176 | .collect::>>()?; 177 | let total_length = components.iter().map(|c| c.len()).sum::(); 178 | let path = Path::new_from_iter(total_length, &mut components.into_iter())?; 179 | Ok(path) 180 | } 181 | 182 | pub(crate) fn to_query(range3d: &Range3d) -> QueryRange3d { 183 | let path_start = path_to_blobseq(&range3d.paths().start); 184 | let path_end = match &range3d.paths().end { 185 | RangeEnd::Closed(end) => Some(path_to_blobseq(end)), 186 | RangeEnd::Open => None, 187 | }; 188 | QueryRange3d { 189 | x: to_query_range(range3d.subspaces()), 190 | y: to_query_range(&map_range(range3d.times(), |ts| StoredTimestamp::new(*ts))), 191 | z: QueryRange::new(path_start, path_end), 192 | } 193 | } 194 | 195 | pub(crate) fn to_query_range(range: &Range) -> QueryRange { 196 | QueryRange::new( 197 | range.start.clone(), 198 | match &range.end { 199 | RangeEnd::Closed(end) => Some(end.clone()), 200 | RangeEnd::Open => None, 201 | }, 202 | ) 203 | } 204 | 205 | pub(crate) fn to_range3d(query_range3d: QueryRange3d) -> Result { 206 | let path_max = match query_range3d.z.max { 207 | Some(max) => RangeEnd::Closed(blobseq_to_path(&max)?), 208 | None => RangeEnd::Open, 209 | }; 210 | Ok(Range3d::new( 211 | to_range(query_range3d.x), 212 | Range { 213 | start: blobseq_to_path(&query_range3d.z.min)?, 214 | end: path_max, 215 | }, 216 | Range { 217 | start: query_range3d.y.min.timestamp(), 218 | end: query_range3d 219 | .y 220 | .max 221 | .map_or(RangeEnd::Open, |ts| RangeEnd::Closed(ts.timestamp())), 222 | }, 223 | )) 224 | } 225 | 226 | fn to_range(qr: QueryRange) -> Range { 227 | Range { 228 | start: qr.min, 229 | end: qr.max.map_or(RangeEnd::Open, RangeEnd::Closed), 230 | } 231 | } 232 | 233 | pub(crate) fn map_range(range: &Range, f: impl Fn(&S) -> T) -> Range { 234 | Range { 235 | start: f(&range.start), 236 | end: match &range.end { 237 | RangeEnd::Closed(end) => RangeEnd::Closed(f(end)), 238 | RangeEnd::Open => RangeEnd::Open, 239 | }, 240 | } 241 | } 242 | 243 | #[cfg(test)] 244 | mod tests { 245 | use proptest::prop_assert_eq; 246 | use test_strategy::proptest; 247 | 248 | use super::StoredTimestamp; 249 | 250 | #[proptest] 251 | fn prop_stored_timestamp_ord_matches_u64_ord(num: u64, other: u64) { 252 | let expected = num.cmp(&other); 253 | let actual = StoredTimestamp::new(num).cmp(&StoredTimestamp::new(other)); 254 | prop_assert_eq!(expected, actual); 255 | } 256 | } 257 | -------------------------------------------------------------------------------- /.github/workflows/tests.yaml: -------------------------------------------------------------------------------- 1 | # Run all tests, with or without flaky tests. 2 | 3 | name: Tests 4 | 5 | on: 6 | workflow_call: 7 | inputs: 8 | rust-version: 9 | description: 'The version of the rust compiler to run' 10 | type: string 11 | default: 'stable' 12 | flaky: 13 | description: 'Whether to also run flaky tests' 14 | type: boolean 15 | default: false 16 | git-ref: 17 | description: 'Which git ref to checkout' 18 | type: string 19 | default: ${{ github.ref }} 20 | 21 | env: 22 | RUST_BACKTRACE: 1 23 | RUSTFLAGS: -Dwarnings 24 | RUSTDOCFLAGS: -Dwarnings 25 | SCCACHE_CACHE_SIZE: "50G" 26 | CRATES_LIST: "iroh-willow" 27 | IROH_FORCE_STAGING_RELAYS: "1" 28 | 29 | jobs: 30 | build_and_test_nix: 31 | timeout-minutes: 30 32 | name: "Tests" 33 | runs-on: ${{ matrix.runner }} 34 | strategy: 35 | fail-fast: false 36 | matrix: 37 | name: [ubuntu-latest, macOS-arm-latest] 38 | rust: [ '${{ inputs.rust-version }}' ] 39 | features: [all, none, default] 40 | include: 41 | - name: ubuntu-latest 42 | os: ubuntu-latest 43 | release-os: linux 44 | release-arch: amd64 45 | runner: [self-hosted, linux, X64] 46 | - name: macOS-arm-latest 47 | os: macOS-latest 48 | release-os: darwin 49 | release-arch: aarch64 50 | runner: [self-hosted, macOS, ARM64] 51 | env: 52 | # Using self-hosted runners so use local cache for sccache and 53 | # not SCCACHE_GHA_ENABLED. 54 | RUSTC_WRAPPER: "sccache" 55 | steps: 56 | - name: Checkout 57 | uses: actions/checkout@v6 58 | with: 59 | ref: ${{ inputs.git-ref }} 60 | 61 | - name: Install ${{ matrix.rust }} rust 62 | uses: dtolnay/rust-toolchain@master 63 | with: 64 | toolchain: ${{ matrix.rust }} 65 | 66 | - name: Install cargo-nextest 67 | uses: taiki-e/install-action@v2 68 | with: 69 | tool: nextest@0.9.80 70 | 71 | - name: Install sccache 72 | uses: mozilla-actions/sccache-action@v0.0.9 73 | 74 | - name: Select features 75 | run: | 76 | case "${{ matrix.features }}" in 77 | all) 78 | echo "FEATURES=--all-features" >> "$GITHUB_ENV" 79 | ;; 80 | none) 81 | echo "FEATURES=--no-default-features" >> "$GITHUB_ENV" 82 | ;; 83 | default) 84 | echo "FEATURES=" >> "$GITHUB_ENV" 85 | ;; 86 | *) 87 | exit 1 88 | esac 89 | 90 | - name: check features 91 | if: ${{ ! inputs.flaky }} 92 | run: | 93 | for i in ${CRATES_LIST//,/ } 94 | do 95 | echo "Checking $i $FEATURES" 96 | if [ $i = "iroh-cli" ]; then 97 | targets="--bins" 98 | else 99 | targets="--lib --bins" 100 | fi 101 | echo cargo check -p $i $FEATURES $targets 102 | cargo check -p $i $FEATURES $targets 103 | done 104 | env: 105 | RUST_LOG: ${{ runner.debug && 'TRACE' || 'DEBUG'}} 106 | 107 | - name: build tests 108 | run: | 109 | cargo nextest run --workspace ${{ env.FEATURES }} --lib --bins --tests --no-run 110 | 111 | - name: list ignored tests 112 | run: | 113 | cargo nextest list --workspace ${{ env.FEATURES }} --lib --bins --tests --run-ignored ignored-only 114 | 115 | - name: run tests 116 | run: | 117 | mkdir -p output 118 | cargo nextest run --workspace ${{ env.FEATURES }} --lib --bins --tests --profile ci --run-ignored ${{ inputs.flaky && 'all' || 'default' }} --no-fail-fast --message-format ${{ inputs.flaky && 'libtest-json' || 'human' }} > output/${{ matrix.name }}_${{ matrix.features }}_${{ matrix.rust }}.json 119 | env: 120 | RUST_LOG: ${{ runner.debug && 'TRACE' || 'DEBUG'}} 121 | NEXTEST_EXPERIMENTAL_LIBTEST_JSON: 1 122 | 123 | - name: upload results 124 | if: ${{ failure() && inputs.flaky }} 125 | uses: actions/upload-artifact@v5 126 | with: 127 | name: libtest_run_${{ github.run_number }}-${{ github.run_attempt }}-${{ matrix.name }}_${{ matrix.features }}_${{ matrix.rust }}.json 128 | path: output 129 | retention-days: 45 130 | compression-level: 0 131 | 132 | - name: doctests 133 | if: ${{ (! inputs.flaky) && matrix.features == 'all' }} 134 | run: | 135 | if [ -n "${{ runner.debug }}" ]; then 136 | export RUST_LOG=TRACE 137 | else 138 | export RUST_LOG=DEBUG 139 | fi 140 | cargo test --workspace --all-features --doc 141 | 142 | build_and_test_windows: 143 | timeout-minutes: 30 144 | name: "Tests" 145 | runs-on: ${{ matrix.runner }} 146 | strategy: 147 | fail-fast: false 148 | matrix: 149 | name: [windows-latest] 150 | rust: [ '${{ inputs.rust-version}}' ] 151 | features: [all, none, default] 152 | target: 153 | - x86_64-pc-windows-msvc 154 | include: 155 | - name: windows-latest 156 | os: windows 157 | runner: [self-hosted, windows, x64] 158 | env: 159 | # Using self-hosted runners so use local cache for sccache and 160 | # not SCCACHE_GHA_ENABLED. 161 | RUSTC_WRAPPER: "sccache" 162 | steps: 163 | - name: Checkout 164 | uses: actions/checkout@v6 165 | with: 166 | ref: ${{ inputs.git-ref }} 167 | 168 | - name: Install ${{ matrix.rust }} 169 | run: | 170 | rustup toolchain install ${{ matrix.rust }} 171 | rustup toolchain default ${{ matrix.rust }} 172 | rustup target add ${{ matrix.target }} 173 | rustup set default-host ${{ matrix.target }} 174 | 175 | - name: Install cargo-nextest 176 | shell: powershell 177 | run: | 178 | $tmp = New-TemporaryFile | Rename-Item -NewName { $_ -replace 'tmp$', 'zip' } -PassThru 179 | Invoke-WebRequest -OutFile $tmp https://get.nexte.st/latest/windows 180 | $outputDir = if ($Env:CARGO_HOME) { Join-Path $Env:CARGO_HOME "bin" } else { "~/.cargo/bin" } 181 | $tmp | Expand-Archive -DestinationPath $outputDir -Force 182 | $tmp | Remove-Item 183 | 184 | - name: Select features 185 | run: | 186 | switch ("${{ matrix.features }}") { 187 | "all" { 188 | echo "FEATURES=--all-features" | Out-File -FilePath $Env:GITHUB_ENV -Encoding utf8 -Append 189 | } 190 | "none" { 191 | echo "FEATURES=--no-default-features" | Out-File -FilePath $Env:GITHUB_ENV -Encoding utf8 -Append 192 | } 193 | "default" { 194 | echo "FEATURES=" | Out-File -FilePath $Env:GITHUB_ENV -Encoding utf8 -Append 195 | } 196 | default { 197 | Exit 1 198 | } 199 | } 200 | 201 | - name: Install sccache 202 | uses: mozilla-actions/sccache-action@v0.0.9 203 | 204 | - uses: msys2/setup-msys2@v2 205 | 206 | - name: build tests 207 | run: | 208 | cargo nextest run --workspace ${{ env.FEATURES }} --lib --bins --tests --target ${{ matrix.target }} --no-run 209 | 210 | - name: list ignored tests 211 | run: | 212 | cargo nextest list --workspace ${{ env.FEATURES }} --lib --bins --tests --target ${{ matrix.target }} --run-ignored ignored-only 213 | 214 | - name: tests 215 | run: | 216 | mkdir -p output 217 | cargo nextest run --workspace ${{ env.FEATURES }} --lib --bins --tests --profile ci --target ${{ matrix.target }} --run-ignored ${{ inputs.flaky && 'all' || 'default' }} --no-fail-fast --message-format ${{ inputs.flaky && 'libtest-json' || 'human' }} > output/${{ matrix.name }}_${{ matrix.features }}_${{ matrix.rust }}.json 218 | env: 219 | RUST_LOG: ${{ runner.debug && 'TRACE' || 'DEBUG'}} 220 | NEXTEST_EXPERIMENTAL_LIBTEST_JSON: 1 221 | 222 | - name: upload results 223 | if: ${{ failure() && inputs.flaky }} 224 | uses: actions/upload-artifact@v5 225 | with: 226 | name: libtest_run_${{ github.run_number }}-${{ github.run_attempt }}-${{ matrix.name }}_${{ matrix.features }}_${{ matrix.rust }}.json 227 | path: output 228 | retention-days: 1 229 | compression-level: 0 230 | -------------------------------------------------------------------------------- /src/session/aoi_finder.rs: -------------------------------------------------------------------------------- 1 | use std::collections::hash_map; 2 | 3 | use futures_lite::{Stream, StreamExt}; 4 | use genawaiter::rc::Co; 5 | 6 | use crate::{ 7 | interest::InterestMap, 8 | proto::{ 9 | grouping::{Area, AreaOfInterest}, 10 | keys::NamespaceId, 11 | meadowcap::{ReadAuthorisation, ReadCapability}, 12 | wgps::{ 13 | AreaOfInterestHandle, CapabilityHandle, IntersectionHandle, SetupBindAreaOfInterest, 14 | }, 15 | }, 16 | session::{ 17 | capabilities::Capabilities, 18 | pai_finder::PaiIntersection, 19 | resource::{ResourceMap, Scope}, 20 | Error, 21 | }, 22 | util::gen_stream::GenStream, 23 | }; 24 | 25 | /// Intersection between two areas of interest. 26 | #[derive(Debug, Clone)] 27 | pub struct AoiIntersection { 28 | pub our_handle: AreaOfInterestHandle, 29 | pub their_handle: AreaOfInterestHandle, 30 | pub intersection: AreaOfInterest, 31 | pub namespace: NamespaceId, 32 | } 33 | 34 | impl AoiIntersection { 35 | pub fn id(&self) -> (AreaOfInterestHandle, AreaOfInterestHandle) { 36 | (self.our_handle, self.their_handle) 37 | } 38 | 39 | pub fn area(&self) -> &Area { 40 | &self.intersection.area 41 | } 42 | } 43 | 44 | #[derive(Debug)] 45 | pub enum Input { 46 | AddInterests(InterestMap), 47 | PaiIntersection(PaiIntersection), 48 | ReceivedValidatedAoi { 49 | namespace: NamespaceId, 50 | aoi: AreaOfInterest, 51 | }, 52 | } 53 | 54 | #[derive(Debug)] 55 | pub enum Output { 56 | SendMessage(SetupBindAreaOfInterest), 57 | SubmitAuthorisation(Box), 58 | AoiIntersection(AoiIntersection), 59 | SignAndSendCapability { 60 | handle: IntersectionHandle, 61 | capability: ReadCapability, 62 | }, 63 | } 64 | 65 | #[derive(derive_more::Debug)] 66 | pub struct IntersectionFinder { 67 | #[debug("Co")] 68 | co: Co, 69 | caps: Capabilities, 70 | handles: AoiResources, 71 | interests: InterestMap, 72 | } 73 | 74 | impl IntersectionFinder { 75 | /// Run the [`IntersectionFinder`]. 76 | /// 77 | /// The returned stream is a generator, so it must be polled repeatedly to progress. 78 | pub fn run_gen( 79 | caps: Capabilities, 80 | inbox: impl Stream, 81 | ) -> impl Stream> { 82 | GenStream::new(|co| Self::new(co, caps).run(inbox)) 83 | } 84 | 85 | fn new(co: Co, caps: Capabilities) -> Self { 86 | Self { 87 | co, 88 | caps, 89 | interests: Default::default(), 90 | handles: Default::default(), 91 | } 92 | } 93 | 94 | async fn run(mut self, inbox: impl Stream) -> Result<(), Error> { 95 | tokio::pin!(inbox); 96 | while let Some(input) = inbox.next().await { 97 | match input { 98 | Input::AddInterests(interests) => self.add_interests(interests).await, 99 | Input::PaiIntersection(intersection) => { 100 | self.on_pai_intersection(intersection).await?; 101 | } 102 | Input::ReceivedValidatedAoi { namespace, aoi } => { 103 | self.handles 104 | .bind_validated(&self.co, Scope::Theirs, namespace, aoi) 105 | .await; 106 | } 107 | } 108 | } 109 | Ok(()) 110 | } 111 | 112 | async fn add_interests(&mut self, interests: InterestMap) { 113 | for (authorisation, aois) in interests.into_iter() { 114 | let namespace = authorisation.namespace(); 115 | match self.interests.entry(authorisation.clone()) { 116 | hash_map::Entry::Occupied(mut entry) => { 117 | // The authorisation is already submitted. 118 | let existing = entry.get_mut(); 119 | let capability_handle = self.caps.find_ours(authorisation.read_cap()); 120 | for aoi in aois { 121 | // If the AoI is new, and the capability is already bound, bind and send 122 | // the AoI right away. 123 | if existing.insert(aoi.clone()) { 124 | if let Some(capability_handle) = capability_handle { 125 | self.handles 126 | .bind_and_send_ours(&self.co, namespace, capability_handle, aoi) 127 | .await; 128 | } 129 | } 130 | } 131 | } 132 | hash_map::Entry::Vacant(entry) => { 133 | // The authorisation is new. Submit to the PaiFinder. 134 | entry.insert(aois); 135 | self.co 136 | .yield_(Output::SubmitAuthorisation(Box::new(authorisation))) 137 | .await; 138 | } 139 | } 140 | } 141 | } 142 | 143 | async fn on_pai_intersection(&mut self, intersection: PaiIntersection) -> Result<(), Error> { 144 | let PaiIntersection { 145 | authorisation, 146 | handle, 147 | } = intersection; 148 | let aois = self 149 | .interests 150 | .get(&authorisation) 151 | .ok_or(Error::NoKnownInterestsForCapability)? 152 | .clone(); 153 | let namespace = authorisation.namespace(); 154 | let (capability_handle, is_new) = self.caps.bind_ours(authorisation.read_cap().clone()); 155 | if is_new { 156 | self.co 157 | .yield_(Output::SignAndSendCapability { 158 | handle, 159 | capability: authorisation.read_cap().clone(), 160 | }) 161 | .await; 162 | } 163 | 164 | for aoi in aois.into_iter() { 165 | self.handles 166 | .bind_and_send_ours(&self.co, namespace, capability_handle, aoi) 167 | .await; 168 | } 169 | Ok(()) 170 | } 171 | } 172 | 173 | #[derive(Debug, Default)] 174 | struct AoiResources { 175 | our_handles: ResourceMap, 176 | their_handles: ResourceMap, 177 | } 178 | 179 | impl AoiResources { 180 | async fn bind_and_send_ours( 181 | &mut self, 182 | co: &Co, 183 | namespace: NamespaceId, 184 | authorisation: CapabilityHandle, 185 | aoi: AreaOfInterest, 186 | ) { 187 | self.bind_validated(co, Scope::Ours, namespace, aoi.clone()) 188 | .await; 189 | let msg = SetupBindAreaOfInterest { 190 | area_of_interest: aoi.into(), 191 | authorisation, 192 | }; 193 | co.yield_(Output::SendMessage(msg)).await; 194 | } 195 | pub async fn bind_validated( 196 | &mut self, 197 | co: &Co, 198 | scope: Scope, 199 | namespace: NamespaceId, 200 | aoi: AreaOfInterest, 201 | ) { 202 | let info = AoiInfo { 203 | aoi: aoi.clone(), 204 | namespace, 205 | }; 206 | let bound_handle = match scope { 207 | Scope::Ours => self.our_handles.bind(info), 208 | Scope::Theirs => self.their_handles.bind(info), 209 | }; 210 | 211 | let store_to_check_against = match scope { 212 | Scope::Ours => &self.their_handles, 213 | Scope::Theirs => &self.our_handles, 214 | }; 215 | 216 | // TODO: If we stored the AoIs by namespace we would need to iterate less. 217 | for (other_handle, other_aoi) in store_to_check_against.iter() { 218 | if other_aoi.namespace != namespace { 219 | continue; 220 | } 221 | let other_handle = *other_handle; 222 | // Check if we have an intersection. 223 | if let Some(intersection) = other_aoi.aoi.intersection(&aoi) { 224 | // We found an intersection! 225 | let (our_handle, their_handle) = match scope { 226 | Scope::Ours => (bound_handle, other_handle), 227 | Scope::Theirs => (other_handle, bound_handle), 228 | }; 229 | let intersection = AoiIntersection { 230 | our_handle, 231 | their_handle, 232 | intersection, 233 | namespace, 234 | }; 235 | co.yield_(Output::AoiIntersection(intersection)).await; 236 | } 237 | } 238 | } 239 | } 240 | 241 | #[derive(Debug)] 242 | struct AoiInfo { 243 | aoi: AreaOfInterest, 244 | namespace: NamespaceId, 245 | } 246 | -------------------------------------------------------------------------------- /src/proto/grouping.rs: -------------------------------------------------------------------------------- 1 | //! Utilities for Willow's entry [groupings](https://willowprotocol.org/specs/grouping-entries/index.html#grouping_entries). 2 | 3 | use serde::{Deserialize, Serialize}; 4 | pub use willow_data_model::grouping::{Range, RangeEnd}; 5 | use willow_data_model::SubspaceId as _; 6 | 7 | use super::data_model::{ 8 | self, Entry, Path, SubspaceId, Timestamp, MAX_COMPONENT_COUNT, MAX_COMPONENT_LENGTH, 9 | MAX_PATH_LENGTH, 10 | }; 11 | 12 | /// See [`willow_data_model::grouping::Range3d`]. 13 | pub type Range3d = willow_data_model::grouping::Range3d< 14 | MAX_COMPONENT_LENGTH, 15 | MAX_COMPONENT_COUNT, 16 | MAX_PATH_LENGTH, 17 | SubspaceId, 18 | >; 19 | 20 | /// See [`willow_data_model::grouping::Area`]. 21 | pub type Area = willow_data_model::grouping::Area< 22 | MAX_COMPONENT_LENGTH, 23 | MAX_COMPONENT_COUNT, 24 | MAX_PATH_LENGTH, 25 | SubspaceId, 26 | >; 27 | 28 | /// See [`willow_data_model::grouping::AreaSubspace`]. 29 | pub type AreaSubspace = willow_data_model::grouping::AreaSubspace; 30 | 31 | /// See [`willow_data_model::grouping::AreaOfInterest`]. 32 | pub type AreaOfInterest = willow_data_model::grouping::AreaOfInterest< 33 | MAX_COMPONENT_LENGTH, 34 | MAX_COMPONENT_COUNT, 35 | MAX_PATH_LENGTH, 36 | SubspaceId, 37 | >; 38 | 39 | /// Extension methods for [`AreaOfInterest`]. 40 | // TODO: Upstream to willow-rs as methods on [`AreaOfInterest]. 41 | pub trait AreaOfInterestExt { 42 | /// Creates a new area of interest with the specified area and no other limits. 43 | fn with_area(area: Area) -> AreaOfInterest; 44 | } 45 | 46 | impl AreaOfInterestExt for AreaOfInterest { 47 | fn with_area(area: Area) -> AreaOfInterest { 48 | AreaOfInterest { 49 | area, 50 | max_count: 0, 51 | max_size: 0, 52 | } 53 | } 54 | } 55 | 56 | /// Extension methods for [`Area`]. 57 | // TODO: Upstream to willow-rs as methods on [`Area`]. 58 | pub trait AreaExt { 59 | /// Returns `true` if the area contains `point`. 60 | fn includes_point(&self, point: &Point) -> bool; 61 | 62 | /// Creates a new area with `path` as prefix and no constraints on subspace or timestamp. 63 | fn new_path(path: Path) -> Area; 64 | 65 | /// Converts the area into a [`Range3d`]. 66 | fn to_range(&self) -> Range3d; 67 | } 68 | 69 | impl AreaExt for Area { 70 | fn includes_point(&self, point: &Point) -> bool { 71 | self.includes_area(&point.into_area()) 72 | } 73 | 74 | fn new_path(path: Path) -> Self { 75 | Self::new(AreaSubspace::Any, path, Range::full()) 76 | } 77 | 78 | fn to_range(&self) -> Range3d { 79 | let subspaces = match self.subspace() { 80 | AreaSubspace::Id(id) => match id.successor() { 81 | None => Range::new_open(*id), 82 | Some(end) => Range::new_closed(*id, end).expect("successor is bigger"), 83 | }, 84 | AreaSubspace::Any => Default::default(), 85 | }; 86 | let path = self.path(); 87 | let path_range = match path.greater_but_not_prefixed() { 88 | None => Range::new_open(path.clone()), 89 | Some(end) => Range::new_closed(path.clone(), end).expect("successor is bigger"), 90 | }; 91 | Range3d::new(subspaces, path_range, *self.times()) 92 | } 93 | } 94 | 95 | /// A single point in the 3D range space. 96 | /// 97 | /// I.e. an entry. 98 | // TODO: Upstream to willow-rs. 99 | #[derive(Debug, Clone, Hash, Eq, PartialEq, Serialize, Deserialize)] 100 | pub struct Point { 101 | #[serde(with = "data_model::serde_encoding::path")] 102 | pub path: Path, 103 | pub timestamp: Timestamp, 104 | pub subspace_id: SubspaceId, 105 | } 106 | 107 | impl Point { 108 | pub fn new(subspace_id: SubspaceId, path: Path, timestamp: Timestamp) -> Self { 109 | Self { 110 | subspace_id, 111 | path, 112 | timestamp, 113 | } 114 | } 115 | pub fn from_entry(entry: &Entry) -> Self { 116 | Self { 117 | path: entry.path().clone(), 118 | timestamp: entry.timestamp(), 119 | subspace_id: *entry.subspace_id(), 120 | } 121 | } 122 | 123 | pub fn into_area(&self) -> Area { 124 | let times = Range::new_closed(self.timestamp, self.timestamp + 1).expect("verified"); 125 | Area::new(AreaSubspace::Id(self.subspace_id), self.path.clone(), times) 126 | } 127 | } 128 | 129 | pub mod serde_encoding { 130 | use serde::{de, Deserialize, Deserializer, Serialize}; 131 | 132 | use super::*; 133 | use crate::util::codec2::{from_bytes_relative, to_vec_relative}; 134 | 135 | pub mod area { 136 | use super::*; 137 | pub fn serialize( 138 | area: &Area, 139 | serializer: S, 140 | ) -> Result { 141 | let previous = Area::new_full(); 142 | let encoded_area = to_vec_relative(&previous, area); 143 | encoded_area.serialize(serializer) 144 | } 145 | 146 | pub fn deserialize<'de, D>(deserializer: D) -> Result 147 | where 148 | D: Deserializer<'de>, 149 | { 150 | let relative = Area::new_full(); 151 | let encoded_area: Vec = Deserialize::deserialize(deserializer)?; 152 | let area = from_bytes_relative(&relative, &encoded_area).map_err(de::Error::custom)?; 153 | Ok(area) 154 | } 155 | } 156 | 157 | pub mod area_of_interest { 158 | use super::*; 159 | pub fn serialize( 160 | aoi: &AreaOfInterest, 161 | serializer: S, 162 | ) -> Result { 163 | let previous = Area::new_full(); 164 | let encoded_area = to_vec_relative(&previous, &aoi.area); 165 | (encoded_area, aoi.max_count, aoi.max_size).serialize(serializer) 166 | } 167 | 168 | pub fn deserialize<'de, D>(deserializer: D) -> Result 169 | where 170 | D: Deserializer<'de>, 171 | { 172 | let relative = Area::new_full(); 173 | let (encoded_area, max_count, max_size): (Vec, u64, u64) = 174 | Deserialize::deserialize(deserializer)?; 175 | let area = from_bytes_relative(&relative, &encoded_area).map_err(de::Error::custom)?; 176 | Ok(AreaOfInterest::new(area, max_count, max_size)) 177 | } 178 | } 179 | 180 | pub mod range_3d { 181 | use super::*; 182 | pub fn serialize( 183 | range: &Range3d, 184 | serializer: S, 185 | ) -> Result { 186 | let previous = Range3d::new_full(); 187 | to_vec_relative(&previous, range).serialize(serializer) 188 | } 189 | 190 | pub fn deserialize<'de, D>(deserializer: D) -> Result 191 | where 192 | D: Deserializer<'de>, 193 | { 194 | let previous = Range3d::new_full(); 195 | let bytes: Vec = Deserialize::deserialize(deserializer)?; 196 | let decoded = from_bytes_relative(&previous, &bytes).map_err(de::Error::custom)?; 197 | Ok(decoded) 198 | } 199 | } 200 | 201 | #[derive( 202 | Debug, 203 | Clone, 204 | Eq, 205 | PartialEq, 206 | derive_more::From, 207 | derive_more::Into, 208 | derive_more::Deref, 209 | Serialize, 210 | Deserialize, 211 | )] 212 | pub struct SerdeArea(#[serde(with = "area")] pub Area); 213 | 214 | #[derive( 215 | Debug, 216 | Clone, 217 | Eq, 218 | PartialEq, 219 | derive_more::From, 220 | derive_more::Into, 221 | derive_more::Deref, 222 | Serialize, 223 | Deserialize, 224 | )] 225 | pub struct SerdeAreaOfInterest(#[serde(with = "area_of_interest")] pub AreaOfInterest); 226 | 227 | #[derive( 228 | Debug, 229 | Clone, 230 | derive_more::From, 231 | derive_more::Into, 232 | derive_more::Deref, 233 | Serialize, 234 | Deserialize, 235 | )] 236 | pub struct SerdeRange3d(#[serde(with = "range_3d")] pub Range3d); 237 | } 238 | 239 | #[cfg(test)] 240 | mod tests { 241 | use std::collections::HashSet; 242 | 243 | use crate::proto::{ 244 | data_model::{Path, PathExt}, 245 | grouping::{Area, AreaExt}, 246 | }; 247 | 248 | #[test] 249 | fn area_eq() { 250 | let p1 = Path::from_bytes(&[b"foo", b"bar"]).unwrap(); 251 | let a1 = Area::new_path(p1); 252 | let p2 = Path::from_bytes(&[b"foo", b"bar"]).unwrap(); 253 | let a2 = Area::new_path(p2); 254 | assert_eq!(a1, a2); 255 | let mut set = HashSet::new(); 256 | set.insert(a1.clone()); 257 | set.insert(a2.clone()); 258 | assert_eq!(set.len(), 1); 259 | } 260 | } 261 | -------------------------------------------------------------------------------- /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | name: CI 2 | 3 | on: 4 | pull_request: 5 | types: [ 'labeled', 'unlabeled', 'opened', 'synchronize', 'reopened' ] 6 | merge_group: 7 | push: 8 | branches: 9 | - main 10 | 11 | concurrency: 12 | group: ci-${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} 13 | cancel-in-progress: true 14 | 15 | env: 16 | RUST_BACKTRACE: 1 17 | RUSTFLAGS: -Dwarnings 18 | RUSTDOCFLAGS: -Dwarnings 19 | MSRV: "1.81" 20 | SCCACHE_CACHE_SIZE: "50G" 21 | IROH_FORCE_STAGING_RELAYS: "1" 22 | 23 | jobs: 24 | tests: 25 | name: CI Test Suite 26 | if: "github.event_name != 'pull_request' || ! contains(github.event.pull_request.labels.*.name, 'flaky-test')" 27 | uses: './.github/workflows/tests.yaml' 28 | 29 | cross_build: 30 | name: Cross Build Only 31 | if: "github.event_name != 'pull_request' || ! contains(github.event.pull_request.labels.*.name, 'flaky-test')" 32 | timeout-minutes: 30 33 | runs-on: [self-hosted, linux, X64] 34 | strategy: 35 | fail-fast: false 36 | matrix: 37 | target: 38 | # cross tests are currently broken vor armv7 and aarch64 39 | # see https://github.com/cross-rs/cross/issues/1311 40 | # - armv7-linux-androideabi 41 | # - aarch64-linux-android 42 | # Freebsd execution fails in cross 43 | # - i686-unknown-freebsd # Linking fails :/ 44 | - x86_64-unknown-freebsd 45 | # Netbsd execution fails to link in cross 46 | # - x86_64-unknown-netbsd 47 | steps: 48 | - name: Checkout 49 | uses: actions/checkout@v6 50 | with: 51 | submodules: recursive 52 | 53 | - name: Install rust stable 54 | uses: dtolnay/rust-toolchain@stable 55 | 56 | - name: Cleanup Docker 57 | continue-on-error: true 58 | run: | 59 | docker kill $(docker ps -q) 60 | 61 | # See https://github.com/cross-rs/cross/issues/1222 62 | - uses: taiki-e/install-action@cross 63 | 64 | - name: build 65 | # cross tests are currently broken vor armv7 and aarch64 66 | # see https://github.com/cross-rs/cross/issues/1311. So on 67 | # those platforms we only build but do not run tests. 68 | run: cross build --all --target ${{ matrix.target }} 69 | env: 70 | RUST_LOG: ${{ runner.debug && 'TRACE' || 'DEBUG'}} 71 | 72 | android_build: 73 | name: Android Build Only 74 | if: "github.event_name != 'pull_request' || ! contains(github.event.pull_request.labels.*.name, 'flaky-test')" 75 | timeout-minutes: 30 76 | # runs-on: ubuntu-latest 77 | runs-on: [self-hosted, linux, X64] 78 | strategy: 79 | fail-fast: false 80 | matrix: 81 | target: 82 | - aarch64-linux-android 83 | - armv7-linux-androideabi 84 | steps: 85 | - name: Checkout 86 | uses: actions/checkout@v6 87 | 88 | - name: Set up Rust 89 | uses: dtolnay/rust-toolchain@stable 90 | with: 91 | target: ${{ matrix.target }} 92 | - name: Install rustup target 93 | run: rustup target add ${{ matrix.target }} 94 | 95 | - name: Setup Java 96 | uses: actions/setup-java@v5 97 | with: 98 | distribution: 'temurin' 99 | java-version: '17' 100 | 101 | - name: Setup Android SDK 102 | uses: android-actions/setup-android@v3 103 | 104 | - name: Setup Android NDK 105 | uses: arqu/setup-ndk@main 106 | id: setup-ndk 107 | with: 108 | ndk-version: r23 109 | add-to-path: true 110 | 111 | - name: Build 112 | env: 113 | ANDROID_NDK_HOME: ${{ steps.setup-ndk.outputs.ndk-path }} 114 | run: | 115 | cargo install --version 3.5.4 cargo-ndk 116 | cargo ndk --target ${{ matrix.target }} build 117 | 118 | cross_test: 119 | name: Cross Test 120 | if: "github.event_name != 'pull_request' || ! contains(github.event.pull_request.labels.*.name, 'flaky-test')" 121 | timeout-minutes: 30 122 | runs-on: [self-hosted, linux, X64] 123 | strategy: 124 | fail-fast: false 125 | matrix: 126 | target: 127 | - i686-unknown-linux-gnu 128 | steps: 129 | - name: Checkout 130 | uses: actions/checkout@v6 131 | with: 132 | submodules: recursive 133 | 134 | - name: Install rust stable 135 | uses: dtolnay/rust-toolchain@stable 136 | 137 | - name: Cleanup Docker 138 | continue-on-error: true 139 | run: | 140 | docker kill $(docker ps -q) 141 | 142 | # See https://github.com/cross-rs/cross/issues/1222 143 | - uses: taiki-e/install-action@cross 144 | 145 | - name: test 146 | run: cross test --all --target ${{ matrix.target }} -- --test-threads=12 147 | env: 148 | RUST_LOG: ${{ runner.debug && 'TRACE' || 'DEBUG' }} 149 | 150 | check_semver: 151 | runs-on: ubuntu-latest 152 | env: 153 | RUSTC_WRAPPER: "sccache" 154 | SCCACHE_GHA_ENABLED: "on" 155 | steps: 156 | - uses: actions/checkout@v6 157 | with: 158 | fetch-depth: 0 159 | - name: Install sccache 160 | uses: mozilla-actions/sccache-action@v0.0.9 161 | 162 | - name: Setup Environment (PR) 163 | if: ${{ github.event_name == 'pull_request' }} 164 | shell: bash 165 | run: | 166 | echo "HEAD_COMMIT_SHA=$(git rev-parse origin/${{ github.base_ref }})" >> ${GITHUB_ENV} 167 | - name: Setup Environment (Push) 168 | if: ${{ github.event_name == 'push' || github.event_name == 'merge_group' }} 169 | shell: bash 170 | run: | 171 | echo "HEAD_COMMIT_SHA=$(git rev-parse origin/main)" >> ${GITHUB_ENV} 172 | - name: Check semver 173 | # uses: obi1kenobi/cargo-semver-checks-action@v2 174 | uses: n0-computer/cargo-semver-checks-action@feat-baseline 175 | with: 176 | package: iroh-willow 177 | baseline-rev: ${{ env.HEAD_COMMIT_SHA }} 178 | use-cache: false 179 | 180 | check_fmt: 181 | timeout-minutes: 30 182 | name: Checking fmt 183 | runs-on: ubuntu-latest 184 | env: 185 | RUSTC_WRAPPER: "sccache" 186 | SCCACHE_GHA_ENABLED: "on" 187 | steps: 188 | - uses: actions/checkout@v6 189 | - uses: dtolnay/rust-toolchain@stable 190 | with: 191 | components: rustfmt 192 | - uses: mozilla-actions/sccache-action@v0.0.9 193 | - uses: taiki-e/install-action@cargo-make 194 | - run: cargo make format-check 195 | 196 | check_docs: 197 | timeout-minutes: 30 198 | name: Checking docs 199 | runs-on: ubuntu-latest 200 | env: 201 | RUSTC_WRAPPER: "sccache" 202 | SCCACHE_GHA_ENABLED: "on" 203 | steps: 204 | - uses: actions/checkout@v6 205 | - uses: dtolnay/rust-toolchain@master 206 | with: 207 | toolchain: nightly-2024-11-30 208 | - name: Install sccache 209 | uses: mozilla-actions/sccache-action@v0.0.9 210 | 211 | - name: Docs 212 | run: cargo doc --all-features --no-deps --document-private-items 213 | env: 214 | RUSTDOCFLAGS: --cfg docsrs 215 | 216 | clippy_check: 217 | timeout-minutes: 30 218 | runs-on: ubuntu-latest 219 | env: 220 | RUSTC_WRAPPER: "sccache" 221 | SCCACHE_GHA_ENABLED: "on" 222 | steps: 223 | - uses: actions/checkout@v6 224 | - uses: dtolnay/rust-toolchain@stable 225 | with: 226 | components: clippy 227 | - name: Install sccache 228 | uses: mozilla-actions/sccache-action@v0.0.9 229 | 230 | # TODO: We have a bunch of platform-dependent code so should 231 | # probably run this job on the full platform matrix 232 | - name: clippy check (all features) 233 | run: cargo clippy --all-features --all-targets --bins --tests --benches 234 | 235 | - name: clippy check (no features) 236 | run: cargo clippy --no-default-features --lib --bins --tests 237 | 238 | - name: clippy check (default features) 239 | run: cargo clippy --all-targets 240 | 241 | msrv: 242 | if: "github.event_name != 'pull_request' || ! contains(github.event.pull_request.labels.*.name, 'flaky-test')" 243 | timeout-minutes: 30 244 | name: Minimal Supported Rust Version 245 | runs-on: ubuntu-latest 246 | env: 247 | RUSTC_WRAPPER: "sccache" 248 | SCCACHE_GHA_ENABLED: "on" 249 | steps: 250 | - uses: actions/checkout@v6 251 | - uses: dtolnay/rust-toolchain@master 252 | with: 253 | toolchain: ${{ env.MSRV }} 254 | - name: Install sccache 255 | uses: mozilla-actions/sccache-action@v0.0.9 256 | 257 | - name: Check MSRV all features 258 | run: | 259 | cargo +$MSRV check --all-targets 260 | 261 | cargo_deny: 262 | timeout-minutes: 30 263 | name: cargo deny 264 | runs-on: ubuntu-latest 265 | steps: 266 | - uses: actions/checkout@v6 267 | - uses: EmbarkStudios/cargo-deny-action@v2 268 | with: 269 | arguments: --all-features 270 | command: check 271 | command-arguments: "-Dwarnings" 272 | 273 | codespell: 274 | timeout-minutes: 30 275 | runs-on: ubuntu-latest 276 | steps: 277 | - uses: actions/checkout@v6 278 | - run: pip install --user codespell[toml] 279 | - run: codespell --ignore-words-list=ans,atmost,crate,inout,ratatui,ser,stayin,swarmin,worl --skip=CHANGELOG.md 280 | -------------------------------------------------------------------------------- /src/proto/meadowcap.rs: -------------------------------------------------------------------------------- 1 | //! The capability system of Willow. 2 | //! 3 | //! Contains an instantiation of [`meadowcap`] for use in iroh-willow. 4 | 5 | use serde::{Deserialize, Serialize}; 6 | use willow_data_model::AuthorisationToken; 7 | 8 | use super::{ 9 | grouping::Area, 10 | keys::{self, NamespaceSecretKey, UserSecretKey}, 11 | }; 12 | 13 | pub type UserPublicKey = keys::UserPublicKey; 14 | pub type NamespacePublicKey = keys::NamespacePublicKey; 15 | pub type UserId = keys::UserId; 16 | pub type NamespaceId = keys::NamespaceId; 17 | pub type UserSignature = keys::UserSignature; 18 | pub type NamespaceSignature = keys::NamespaceSignature; 19 | 20 | pub use meadowcap::{AccessMode, IsCommunal}; 21 | 22 | use super::data_model::{Entry, MAX_COMPONENT_COUNT, MAX_COMPONENT_LENGTH, MAX_PATH_LENGTH}; 23 | 24 | #[derive(Debug, derive_more::From, Serialize, Deserialize)] 25 | pub enum SecretKey { 26 | User(keys::UserSecretKey), 27 | Namespace(keys::NamespaceSecretKey), 28 | } 29 | 30 | pub type McCapability = meadowcap::McCapability< 31 | MAX_COMPONENT_LENGTH, 32 | MAX_COMPONENT_COUNT, 33 | MAX_PATH_LENGTH, 34 | keys::NamespaceId, 35 | keys::NamespaceSignature, 36 | keys::UserId, 37 | keys::UserSignature, 38 | >; 39 | 40 | pub type McSubspaceCapability = meadowcap::McSubspaceCapability< 41 | keys::NamespaceId, 42 | keys::NamespaceSignature, 43 | keys::UserId, 44 | keys::UserSignature, 45 | >; 46 | 47 | pub type SubspaceCapability = McSubspaceCapability; 48 | pub type ReadCapability = McCapability; 49 | pub type WriteCapability = McCapability; 50 | 51 | pub type McAuthorisationToken = meadowcap::McAuthorisationToken< 52 | MAX_COMPONENT_LENGTH, 53 | MAX_COMPONENT_COUNT, 54 | MAX_PATH_LENGTH, 55 | keys::NamespaceId, 56 | keys::NamespaceSignature, 57 | keys::UserId, 58 | keys::UserSignature, 59 | >; 60 | 61 | pub fn is_authorised_write(entry: &Entry, token: &McAuthorisationToken) -> bool { 62 | token.is_authorised_write(entry) 63 | } 64 | 65 | pub type FailedDelegationError = meadowcap::FailedDelegationError< 66 | MAX_COMPONENT_LENGTH, 67 | MAX_COMPONENT_COUNT, 68 | MAX_PATH_LENGTH, 69 | keys::UserId, 70 | >; 71 | 72 | /// Represents an authorisation to read an area of data in a Namespace. 73 | #[derive(Debug, Clone, Hash, Eq, PartialEq)] 74 | pub struct ReadAuthorisation(McCapability, Option); 75 | 76 | impl ReadAuthorisation { 77 | pub fn new(read_cap: McCapability, subspace_cap: Option) -> Self { 78 | Self(read_cap, subspace_cap) 79 | } 80 | 81 | pub fn new_owned( 82 | namespace_secret: &NamespaceSecretKey, 83 | user_key: UserId, 84 | ) -> anyhow::Result { 85 | let read_cap = McCapability::new_owned( 86 | namespace_secret.public_key().id(), 87 | namespace_secret, 88 | user_key, 89 | AccessMode::Read, 90 | )?; 91 | let subspace_cap = meadowcap::McSubspaceCapability::new( 92 | namespace_secret.public_key().id(), 93 | namespace_secret, 94 | user_key, 95 | )?; 96 | Ok(Self::new(read_cap, Some(subspace_cap))) 97 | } 98 | 99 | pub fn read_cap(&self) -> &McCapability { 100 | &self.0 101 | } 102 | 103 | pub fn subspace_cap(&self) -> Option<&McSubspaceCapability> { 104 | self.1.as_ref() 105 | } 106 | 107 | pub fn namespace(&self) -> NamespaceId { 108 | *self.0.granted_namespace() 109 | } 110 | 111 | pub fn delegate( 112 | &self, 113 | user_secret: &UserSecretKey, 114 | new_user: UserId, 115 | new_area: Area, 116 | ) -> anyhow::Result { 117 | let subspace_cap = match self.subspace_cap() { 118 | Some(subspace_cap) if new_area.subspace().is_any() && !new_area.path().is_empty() => { 119 | Some(subspace_cap.delegate(user_secret, &new_user)?) 120 | } 121 | _ => None, 122 | }; 123 | let read_cap = self 124 | .read_cap() 125 | .delegate(user_secret, &new_user, &new_area)?; 126 | Ok(Self::new(read_cap, subspace_cap)) 127 | } 128 | } 129 | 130 | /// Returns `true` if `self` covers a larger area than `other`, 131 | /// or if covers the same area and has less delegations. 132 | pub fn is_wider_than(a: &McCapability, b: &McCapability) -> bool { 133 | (a.granted_area().includes_area(&b.granted_area())) 134 | || (a.granted_area() == b.granted_area() && a.delegations().len() < b.delegations().len()) 135 | } 136 | 137 | pub mod serde_encoding { 138 | use serde::{de, Deserialize, Deserializer}; 139 | 140 | use super::*; 141 | use crate::{ 142 | proto::grouping::Area, 143 | util::codec2::{from_bytes, from_bytes_relative, to_vec, to_vec_relative}, 144 | }; 145 | 146 | pub mod read_authorisation { 147 | use super::*; 148 | pub fn serialize( 149 | value: &ReadAuthorisation, 150 | serializer: S, 151 | ) -> Result { 152 | let encoded_cap = to_vec_relative(&Area::new_full(), value.read_cap()); 153 | let encoded_subspace_cap = value.subspace_cap().map(to_vec); 154 | (encoded_cap, encoded_subspace_cap).serialize(serializer) 155 | } 156 | 157 | pub fn deserialize<'de, D>(deserializer: D) -> Result 158 | where 159 | D: Deserializer<'de>, 160 | { 161 | let (read_cap, subspace_cap): (SerdeMcCapability, Option) = 162 | Deserialize::deserialize(deserializer)?; 163 | Ok(ReadAuthorisation( 164 | read_cap.into(), 165 | subspace_cap.map(Into::into), 166 | )) 167 | } 168 | } 169 | 170 | #[derive( 171 | Debug, 172 | Clone, 173 | Eq, 174 | PartialEq, 175 | Hash, 176 | derive_more::From, 177 | derive_more::Into, 178 | derive_more::Deref, 179 | Serialize, 180 | Deserialize, 181 | )] 182 | pub struct SerdeReadAuthorisation(#[serde(with = "read_authorisation")] pub ReadAuthorisation); 183 | 184 | pub mod mc_capability { 185 | use super::*; 186 | pub fn serialize( 187 | value: &McCapability, 188 | serializer: S, 189 | ) -> Result { 190 | let previous = Area::new_full(); 191 | to_vec_relative(&previous, value).serialize(serializer) 192 | } 193 | 194 | pub fn deserialize<'de, D>(deserializer: D) -> Result 195 | where 196 | D: Deserializer<'de>, 197 | { 198 | let previous = Area::new_full(); 199 | let bytes: Vec = Deserialize::deserialize(deserializer)?; 200 | let decoded = from_bytes_relative(&previous, &bytes).map_err(de::Error::custom)?; 201 | Ok(decoded) 202 | } 203 | } 204 | 205 | #[derive( 206 | Debug, 207 | Clone, 208 | Eq, 209 | PartialEq, 210 | Hash, 211 | derive_more::From, 212 | derive_more::Into, 213 | derive_more::Deref, 214 | Serialize, 215 | Deserialize, 216 | )] 217 | pub struct SerdeMcCapability(#[serde(with = "mc_capability")] pub McCapability); 218 | 219 | pub mod mc_subspace_capability { 220 | use super::*; 221 | pub fn serialize( 222 | value: &McSubspaceCapability, 223 | serializer: S, 224 | ) -> Result { 225 | to_vec(value).serialize(serializer) 226 | } 227 | 228 | pub fn deserialize<'de, D>(deserializer: D) -> Result 229 | where 230 | D: Deserializer<'de>, 231 | { 232 | let bytes: Vec = Deserialize::deserialize(deserializer)?; 233 | let decoded = from_bytes(&bytes).map_err(de::Error::custom)?; 234 | Ok(decoded) 235 | } 236 | } 237 | 238 | #[derive( 239 | Debug, 240 | Clone, 241 | derive_more::From, 242 | derive_more::Into, 243 | derive_more::Deref, 244 | Serialize, 245 | Deserialize, 246 | )] 247 | pub struct SerdeMcSubspaceCapability( 248 | #[serde(with = "mc_subspace_capability")] pub McSubspaceCapability, 249 | ); 250 | 251 | pub mod access_mode { 252 | use super::*; 253 | pub fn serialize( 254 | value: &AccessMode, 255 | serializer: S, 256 | ) -> Result { 257 | match value { 258 | AccessMode::Read => 0u8.serialize(serializer), 259 | AccessMode::Write => 1u8.serialize(serializer), 260 | } 261 | } 262 | 263 | pub fn deserialize<'de, D>(deserializer: D) -> Result 264 | where 265 | D: Deserializer<'de>, 266 | { 267 | let value: u8 = Deserialize::deserialize(deserializer)?; 268 | match value { 269 | 0 => Ok(AccessMode::Read), 270 | 1 => Ok(AccessMode::Write), 271 | _ => Err(de::Error::custom("Invalid access mode")), 272 | } 273 | } 274 | } 275 | } 276 | -------------------------------------------------------------------------------- /.github/workflows/netsim_runner.yaml: -------------------------------------------------------------------------------- 1 | name: netsim-runner 2 | 3 | on: 4 | workflow_dispatch: 5 | inputs: 6 | branch: 7 | required: true 8 | type: string 9 | max_workers: 10 | required: true 11 | type: number 12 | default: 4 13 | netsim_branch: 14 | required: true 15 | type: string 16 | default: "main" 17 | debug_logs: 18 | required: false 19 | type: boolean 20 | default: false 21 | build_profile: 22 | required: false 23 | type: string 24 | default: "release" 25 | sim_paths: 26 | required: false 27 | type: string 28 | default: "sims/iroh,sims/integration" 29 | publish_metrics: 30 | required: false 31 | type: boolean 32 | default: false 33 | visualizations: 34 | required: false 35 | type: boolean 36 | default: false 37 | pr_number: 38 | required: false 39 | type: string 40 | default: "" 41 | report_table: 42 | required: false 43 | type: boolean 44 | default: false 45 | workflow_call: 46 | inputs: 47 | branch: 48 | required: true 49 | type: string 50 | max_workers: 51 | required: true 52 | type: number 53 | default: 4 54 | netsim_branch: 55 | required: true 56 | type: string 57 | default: "main" 58 | debug_logs: 59 | required: false 60 | type: boolean 61 | default: false 62 | build_profile: 63 | required: false 64 | type: string 65 | default: "release" 66 | sim_paths: 67 | required: false 68 | type: string 69 | default: "sims/iroh,sims/integration" 70 | publish_metrics: 71 | required: false 72 | type: boolean 73 | default: false 74 | visualizations: 75 | required: false 76 | type: boolean 77 | default: false 78 | pr_number: 79 | required: false 80 | type: string 81 | default: "" 82 | report_table: 83 | required: false 84 | type: boolean 85 | default: false 86 | 87 | env: 88 | RUST_BACKTRACE: 1 89 | RUSTFLAGS: -Dwarnings 90 | MSRV: "1.66" 91 | SCCACHE_GHA_ENABLED: "true" 92 | RUSTC_WRAPPER: "sccache" 93 | IROH_FORCE_STAGING_RELAYS: "1" 94 | 95 | jobs: 96 | netsim: 97 | permissions: write-all 98 | name: Netsim 99 | timeout-minutes: 45 100 | runs-on: [self-hosted, linux, X64] 101 | steps: 102 | - name: Checkout 103 | uses: actions/checkout@v6 104 | with: 105 | submodules: recursive 106 | ref: ${{ inputs.branch }} 107 | 108 | - name: Install rust stable 109 | uses: dtolnay/rust-toolchain@stable 110 | 111 | - name: Install sccache 112 | uses: mozilla-actions/sccache-action@v0.0.9 113 | 114 | - name: Build iroh 115 | run: | 116 | cargo build --profile ${{ inputs.build_profile }} --workspace --all-features --examples --bins 117 | 118 | - name: Fetch and build chuck 119 | run: | 120 | cd .. 121 | rm -rf chuck 122 | git clone --single-branch --branch ${{ inputs.netsim_branch }} https://github.com/n0-computer/chuck.git 123 | cd chuck 124 | cargo build --release 125 | 126 | - name: Install netsim deps 127 | run: | 128 | cd ../chuck/netsim 129 | sudo apt update 130 | ./setup.sh 131 | ./cleanup.sh || true 132 | 133 | - name: Copy binaries to right location 134 | run: | 135 | cp target/${{inputs.build_profile}}/examples/* ../chuck/netsim/bins/ 136 | cp target/${{inputs.build_profile}}/iroh ../chuck/netsim/bins/iroh 137 | cp target/${{inputs.build_profile}}/iroh-relay ../chuck/netsim/bins/iroh-relay 138 | cp ../chuck/target/release/chuck ../chuck/netsim/bins/chuck 139 | 140 | - name: Get commit sha 141 | shell: bash 142 | run: | 143 | echo "LAST_COMMIT_SHA=$(git rev-parse --short ${GITHUB_SHA})" >> ${GITHUB_ENV} 144 | 145 | - name: Run tests 146 | id: run_tests 147 | continue-on-error: true 148 | run: | 149 | cd ../chuck/netsim 150 | # split sim_paths by comma 151 | IFS=',' read -ra sim_paths <<< "${{ inputs.sim_paths }}" 152 | for sim_path in "${sim_paths[@]}"; do 153 | sudo python3 main.py ${{ inputs.debug_logs && '--debug' || ''}} ${{ inputs.visualizations && '--visualize' || ''}} --max-workers=${{ inputs.max_workers }} --integration $sim_path 154 | done 155 | 156 | - name: Generate report 157 | id: generate_report 158 | if: always() 159 | run: | 160 | cd ../chuck/netsim 161 | python3 reports_csv.py --table > report_table.txt 162 | python3 reports_csv.py --prom --commit ${{ env.LAST_COMMIT_SHA }} > report_prom.txt 163 | python3 reports_csv.py --metro --commit ${{ env.LAST_COMMIT_SHA }} > report_metro.txt 164 | python3 reports_csv.py --metro --integration --commit ${{ env.LAST_COMMIT_SHA }} > report_metro_integration.txt 165 | 166 | - name: Upload report 167 | if: always() 168 | run: | 169 | export AWS_ACCESS_KEY_ID=${{secrets.S3_ACCESS_KEY_ID}} 170 | export AWS_SECRET_ACCESS_KEY=${{secrets.S3_ACCESS_KEY}} 171 | export AWS_DEFAULT_REGION=us-west-2 172 | 173 | curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip" 174 | unzip -q awscliv2.zip 175 | sudo ./aws/install --update 176 | 177 | cd ../chuck/netsim 178 | 179 | aws_fname=${{ env.LAST_COMMIT_SHA }}.tar.gz 180 | tar -cvzf report.tar.gz report_prom.txt report_table.txt report_metro.txt report_metro_integration.txt logs/ report/ viz/ 181 | if [[ -n "${{ secrets.S3_BUCKET }}" ]]; then 182 | aws s3 cp ./report.tar.gz s3://${{secrets.S3_REPORT_BUCKET}}/$aws_fname --no-progress 183 | fi 184 | 185 | - name: Move report 186 | if: always() 187 | run: | 188 | cp ../chuck/netsim/report.tar.gz ./report.tar.gz 189 | 190 | - name: Upload report 191 | if: always() 192 | uses: actions/upload-artifact@v5 193 | id: upload-report 194 | with: 195 | name: netsim-report-${{ env.LAST_COMMIT_SHA }} 196 | path: report.tar.gz 197 | retention-days: 3 198 | overwrite: true 199 | 200 | - name: Fail Job if Tests Failed 201 | if: ${{ steps.run_tests.outcome == 'failure' }} 202 | run: | 203 | echo "Tests failed logs are available at: ${{steps.upload-report.outputs.artifact-url}}" 204 | exit 1 205 | 206 | - name: Find Docs Comment 207 | if: ${{ inputs.pr_number != '' }} 208 | uses: peter-evans/find-comment@v4 209 | id: fc 210 | with: 211 | issue-number: ${{ inputs.pr_number }} 212 | comment-author: 'github-actions[bot]' 213 | body-includes: Netsim report & logs for this PR have been generated 214 | 215 | - name: Create or Update Docs Comment 216 | if: ${{ inputs.pr_number != '' && !github.event.pull_request.head.repo.fork }} 217 | uses: peter-evans/create-or-update-comment@v5 218 | with: 219 | issue-number: ${{ inputs.pr_number }} 220 | comment-id: ${{ steps.fc.outputs.comment-id }} 221 | body: | 222 | Netsim report & logs for this PR have been generated and is available at: [LOGS](${{steps.upload-report.outputs.artifact-url}}) 223 | This report will remain available for 3 days. 224 | 225 | Last updated for commit: ${{ env.LAST_COMMIT_SHA }} 226 | edit-mode: replace 227 | 228 | - name: Generate report table 229 | if: ${{ inputs.pr_number != '' && inputs.report_table}} 230 | id: generate_report_table 231 | run: | 232 | cd ../chuck/netsim 233 | export NETSIM_REPORT=$(cat report_table.txt) 234 | echo "NETSIM_REPORT<> ${GITHUB_OUTPUT} 235 | echo "${NETSIM_REPORT}" >> ${GITHUB_OUTPUT} 236 | echo "EOFMARKER" >> ${GITHUB_OUTPUT} 237 | 238 | - name: Echo Report Table on PR 239 | uses: peter-evans/create-or-update-comment@v5 240 | if: ${{ inputs.pr_number != '' && inputs.report_table && !github.event.pull_request.head.repo.fork }} 241 | with: 242 | issue-number: ${{ inputs.pr_number }} 243 | body: | 244 | `${{ inputs.branch }}.${{ env.LAST_COMMIT_SHA }}` 245 | Perf report: 246 | ${{ steps.generate_report_table.outputs.NETSIM_REPORT }} 247 | 248 | - name: Publish metrics 249 | if: ${{ inputs.publish_metrics && !github.event.pull_request.head.repo.fork }} 250 | run: | 251 | cd ../chuck/netsim 252 | d=$(cat report_metro.txt) 253 | metro_data=$(printf "%s\n " "$d") 254 | curl -X POST -H "Content-Type: application/json" -H "Authorization: Bearer ${{secrets.METRO_TOKEN}}" --data "$metro_data" ${{secrets.METRO_ENDPOINT}} 255 | d=$(cat report_metro_integration.txt) 256 | metro_data=$(printf "%s\n " "$d") 257 | curl -X POST -H "Content-Type: application/json" -H "Authorization: Bearer ${{secrets.METRO_TOKEN}}" --data "$metro_data" ${{secrets.METRO_ENDPOINT}} 258 | 259 | - name: Echo metrics (debug) 260 | run: | 261 | cd ../chuck/netsim 262 | d=$(cat report_metro.txt) 263 | metro_data=$(printf "%s\n " "$d") 264 | echo "$metro_data" 265 | d=$(cat report_metro_integration.txt) 266 | metro_data=$(printf "%s\n " "$d") 267 | echo "$metro_data" 268 | 269 | - name: Cleanup 270 | run: | 271 | ./cleanup.sh || true --------------------------------------------------------------------------------