├── .config
└── nextest.toml
├── .github
├── dependabot.yaml
├── pull_request_template.md
└── workflows
│ ├── beta.yaml
│ ├── ci.yaml
│ ├── cleanup.yaml
│ ├── commit.yaml
│ ├── docs.yaml
│ ├── flaky.yaml
│ └── tests.yaml
├── .gitignore
├── .img
└── iroh_wordmark.svg
├── CHANGELOG.md
├── Cargo.lock
├── Cargo.toml
├── LICENSE-APACHE
├── LICENSE-MIT
├── Makefile.toml
├── README.md
├── cliff.toml
├── code_of_conduct.md
├── deny.toml
├── docs
└── img
│ ├── get_machine.drawio
│ └── get_machine.drawio.svg
├── examples
├── custom-protocol.rs
├── discovery-local-network.rs
├── fetch-fsm.rs
├── fetch-stream.rs
├── hello-world-fetch.rs
├── hello-world-provide.rs
├── provide-bytes.rs
└── transfer.rs
├── proptest-regressions
├── protocol
│ └── range_spec.txt
└── provider.txt
├── release.toml
├── src
├── cli.rs
├── cli
│ └── tags.rs
├── downloader.rs
├── downloader
│ ├── get.rs
│ ├── invariants.rs
│ ├── progress.rs
│ ├── test.rs
│ └── test
│ │ ├── dialer.rs
│ │ └── getter.rs
├── export.rs
├── format.rs
├── format
│ └── collection.rs
├── get.rs
├── get
│ ├── db.rs
│ ├── error.rs
│ ├── progress.rs
│ └── request.rs
├── hash.rs
├── hashseq.rs
├── lib.rs
├── metrics.rs
├── net_protocol.rs
├── protocol.rs
├── protocol
│ └── range_spec.rs
├── provider.rs
├── rpc.rs
├── rpc
│ ├── client.rs
│ ├── client
│ │ ├── blobs.rs
│ │ ├── blobs
│ │ │ └── batch.rs
│ │ └── tags.rs
│ ├── proto.rs
│ └── proto
│ │ ├── blobs.rs
│ │ └── tags.rs
├── store.rs
├── store
│ ├── bao_file.rs
│ ├── fs.rs
│ ├── fs
│ │ ├── tables.rs
│ │ ├── test_support.rs
│ │ ├── tests.rs
│ │ ├── util.rs
│ │ └── validate.rs
│ ├── mem.rs
│ ├── mutable_mem_storage.rs
│ ├── readonly_mem.rs
│ └── traits.rs
├── ticket.rs
├── util.rs
└── util
│ ├── fs.rs
│ ├── hexdump.rs
│ ├── io.rs
│ ├── local_pool.rs
│ ├── mem_or_file.rs
│ ├── progress.rs
│ └── sparse_mem_file.rs
└── tests
├── blobs.rs
├── gc.rs
├── rpc.rs
└── tags.rs
/.config/nextest.toml:
--------------------------------------------------------------------------------
1 | [test-groups]
2 | run-in-isolation = { max-threads = 32 }
3 | # these are tests that must not run with other tests concurrently. All tests in
4 | # this group can take up at most 32 threads among them, but each one requiring
5 | # 16 threads also. The effect should be that tests run isolated.
6 |
7 | [[profile.ci.overrides]]
8 | filter = 'test(::run_in_isolation::)'
9 | test-group = 'run-in-isolation'
10 | threads-required = 32
11 |
--------------------------------------------------------------------------------
/.github/dependabot.yaml:
--------------------------------------------------------------------------------
1 | # Keep GitHub Actions up to date with GitHub's Dependabot...
2 | # https://docs.github.com/en/code-security/dependabot/working-with-dependabot/keeping-your-actions-up-to-date-with-dependabot
3 | # https://docs.github.com/en/code-security/dependabot/dependabot-version-updates/configuration-options-for-the-dependabot.yml-file#package-ecosystem
4 | version: 2
5 | updates:
6 | - package-ecosystem: github-actions
7 | directory: /
8 | groups:
9 | github-actions:
10 | patterns:
11 | - "*" # Group all Actions updates into a single larger pull request
12 | schedule:
13 | interval: weekly
14 |
--------------------------------------------------------------------------------
/.github/pull_request_template.md:
--------------------------------------------------------------------------------
1 | ## Description
2 |
3 |
4 |
5 | ## Breaking Changes
6 |
7 |
8 |
9 | ## Notes & open questions
10 |
11 |
12 |
13 | ## Change checklist
14 |
15 | - [ ] Self-review.
16 | - [ ] Documentation updates following the [style guide](https://rust-lang.github.io/rfcs/1574-more-api-documentation-conventions.html#appendix-a-full-conventions-text), if relevant.
17 | - [ ] Tests if relevant.
18 | - [ ] All breaking changes documented.
19 |
--------------------------------------------------------------------------------
/.github/workflows/beta.yaml:
--------------------------------------------------------------------------------
1 | # Run tests using the beta Rust compiler
2 |
3 | name: Beta Rust
4 |
5 | on:
6 | schedule:
7 | # 06:50 UTC every Monday
8 | - cron: '50 6 * * 1'
9 | workflow_dispatch:
10 |
11 | concurrency:
12 | group: beta-${{ github.workflow }}-${{ github.ref }}
13 | cancel-in-progress: true
14 |
15 | env:
16 | IROH_FORCE_STAGING_RELAYS: "1"
17 |
18 | jobs:
19 | tests:
20 | uses: './.github/workflows/tests.yaml'
21 | with:
22 | rust-version: beta
23 | notify:
24 | needs: tests
25 | if: ${{ always() }}
26 | runs-on: ubuntu-latest
27 | steps:
28 | - name: Extract test results
29 | run: |
30 | printf '${{ toJSON(needs) }}\n'
31 | result=$(echo '${{ toJSON(needs) }}' | jq -r .tests.result)
32 | echo TESTS_RESULT=$result
33 | echo "TESTS_RESULT=$result" >>"$GITHUB_ENV"
34 | - name: Notify discord on failure
35 | uses: n0-computer/discord-webhook-notify@v1
36 | if: ${{ env.TESTS_RESULT == 'failure' }}
37 | with:
38 | severity: error
39 | details: |
40 | Rustc beta tests failed in **${{ github.repository }}**
41 | See https://github.com/${{ github.repository }}/actions/workflows/beta.yaml
42 | webhookUrl: ${{ secrets.DISCORD_N0_GITHUB_CHANNEL_WEBHOOK_URL }}
43 |
44 |
--------------------------------------------------------------------------------
/.github/workflows/ci.yaml:
--------------------------------------------------------------------------------
1 | name: CI
2 |
3 | on:
4 | pull_request:
5 | types: [ 'labeled', 'unlabeled', 'opened', 'synchronize', 'reopened' ]
6 | merge_group:
7 | push:
8 | branches:
9 | - main
10 |
11 | concurrency:
12 | group: ci-${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
13 | cancel-in-progress: true
14 |
15 | env:
16 | RUST_BACKTRACE: 1
17 | RUSTFLAGS: -Dwarnings
18 | RUSTDOCFLAGS: -Dwarnings
19 | MSRV: "1.81"
20 | SCCACHE_CACHE_SIZE: "50G"
21 | IROH_FORCE_STAGING_RELAYS: "1"
22 |
23 | jobs:
24 | tests:
25 | name: CI Test Suite
26 | if: "github.event_name != 'pull_request' || ! contains(github.event.pull_request.labels.*.name, 'flaky-test')"
27 | uses: './.github/workflows/tests.yaml'
28 |
29 | cross_build:
30 | name: Cross Build Only
31 | if: "github.event_name != 'pull_request' || ! contains(github.event.pull_request.labels.*.name, 'flaky-test')"
32 | timeout-minutes: 30
33 | runs-on: [self-hosted, linux, X64]
34 | strategy:
35 | fail-fast: false
36 | matrix:
37 | target:
38 | # cross tests are currently broken vor armv7 and aarch64
39 | # see https://github.com/cross-rs/cross/issues/1311
40 | # - armv7-linux-androideabi
41 | # - aarch64-linux-android
42 | # Freebsd execution fails in cross
43 | # - i686-unknown-freebsd # Linking fails :/
44 | - x86_64-unknown-freebsd
45 | # Netbsd execution fails to link in cross
46 | # - x86_64-unknown-netbsd
47 | steps:
48 | - name: Checkout
49 | uses: actions/checkout@v4
50 | with:
51 | submodules: recursive
52 |
53 | - name: Install rust stable
54 | uses: dtolnay/rust-toolchain@stable
55 |
56 | - name: Cleanup Docker
57 | continue-on-error: true
58 | run: |
59 | docker kill $(docker ps -q)
60 |
61 | # See https://github.com/cross-rs/cross/issues/1222
62 | - uses: taiki-e/install-action@cross
63 |
64 | - name: build
65 | # cross tests are currently broken vor armv7 and aarch64
66 | # see https://github.com/cross-rs/cross/issues/1311. So on
67 | # those platforms we only build but do not run tests.
68 | run: cross build --all --target ${{ matrix.target }}
69 | env:
70 | RUST_LOG: ${{ runner.debug && 'TRACE' || 'DEBUG'}}
71 |
72 | android_build:
73 | name: Android Build Only
74 | if: "github.event_name != 'pull_request' || ! contains(github.event.pull_request.labels.*.name, 'flaky-test')"
75 | timeout-minutes: 30
76 | # runs-on: ubuntu-latest
77 | runs-on: [self-hosted, linux, X64]
78 | strategy:
79 | fail-fast: false
80 | matrix:
81 | target:
82 | - aarch64-linux-android
83 | - armv7-linux-androideabi
84 | steps:
85 | - name: Checkout
86 | uses: actions/checkout@v4
87 |
88 | - name: Set up Rust
89 | uses: dtolnay/rust-toolchain@stable
90 | with:
91 | target: ${{ matrix.target }}
92 | - name: Install rustup target
93 | run: rustup target add ${{ matrix.target }}
94 |
95 | - name: Setup Java
96 | uses: actions/setup-java@v4
97 | with:
98 | distribution: 'temurin'
99 | java-version: '17'
100 |
101 | - name: Setup Android SDK
102 | uses: android-actions/setup-android@v3
103 |
104 | - name: Setup Android NDK
105 | uses: arqu/setup-ndk@main
106 | id: setup-ndk
107 | with:
108 | ndk-version: r23
109 | add-to-path: true
110 |
111 | - name: Build
112 | env:
113 | ANDROID_NDK_HOME: ${{ steps.setup-ndk.outputs.ndk-path }}
114 | run: |
115 | cargo install --version 3.5.4 cargo-ndk
116 | cargo ndk --target ${{ matrix.target }} build
117 |
118 | cross_test:
119 | name: Cross Test
120 | if: "github.event_name != 'pull_request' || ! contains(github.event.pull_request.labels.*.name, 'flaky-test')"
121 | timeout-minutes: 30
122 | runs-on: [self-hosted, linux, X64]
123 | strategy:
124 | fail-fast: false
125 | matrix:
126 | target:
127 | - i686-unknown-linux-gnu
128 | steps:
129 | - name: Checkout
130 | uses: actions/checkout@v4
131 | with:
132 | submodules: recursive
133 |
134 | - name: Install rust stable
135 | uses: dtolnay/rust-toolchain@stable
136 |
137 | - name: Cleanup Docker
138 | continue-on-error: true
139 | run: |
140 | docker kill $(docker ps -q)
141 |
142 | # See https://github.com/cross-rs/cross/issues/1222
143 | - uses: taiki-e/install-action@cross
144 |
145 | - name: test
146 | run: cross test --all --target ${{ matrix.target }} -- --test-threads=12
147 | env:
148 | RUST_LOG: ${{ runner.debug && 'TRACE' || 'DEBUG' }}
149 |
150 | check_semver:
151 | runs-on: ubuntu-latest
152 | env:
153 | RUSTC_WRAPPER: "sccache"
154 | SCCACHE_GHA_ENABLED: "on"
155 | steps:
156 | - uses: actions/checkout@v4
157 | with:
158 | fetch-depth: 0
159 | - name: Install sccache
160 | uses: mozilla-actions/sccache-action@v0.0.9
161 |
162 | - name: Setup Environment (PR)
163 | if: ${{ github.event_name == 'pull_request' }}
164 | shell: bash
165 | run: |
166 | echo "HEAD_COMMIT_SHA=$(git rev-parse origin/${{ github.base_ref }})" >> ${GITHUB_ENV}
167 | - name: Setup Environment (Push)
168 | if: ${{ github.event_name == 'push' || github.event_name == 'merge_group' }}
169 | shell: bash
170 | run: |
171 | echo "HEAD_COMMIT_SHA=$(git rev-parse origin/main)" >> ${GITHUB_ENV}
172 | - name: Check semver
173 | # uses: obi1kenobi/cargo-semver-checks-action@v2
174 | uses: n0-computer/cargo-semver-checks-action@feat-baseline
175 | with:
176 | package: iroh-blobs
177 | baseline-rev: ${{ env.HEAD_COMMIT_SHA }}
178 | use-cache: false
179 |
180 | check_fmt:
181 | timeout-minutes: 30
182 | name: Checking fmt
183 | runs-on: ubuntu-latest
184 | env:
185 | RUSTC_WRAPPER: "sccache"
186 | SCCACHE_GHA_ENABLED: "on"
187 | steps:
188 | - uses: actions/checkout@v4
189 | - uses: dtolnay/rust-toolchain@stable
190 | with:
191 | components: rustfmt
192 | - uses: mozilla-actions/sccache-action@v0.0.9
193 | - uses: taiki-e/install-action@cargo-make
194 | - run: cargo make format-check
195 |
196 | check_docs:
197 | timeout-minutes: 30
198 | name: Checking docs
199 | runs-on: ubuntu-latest
200 | env:
201 | RUSTC_WRAPPER: "sccache"
202 | SCCACHE_GHA_ENABLED: "on"
203 | steps:
204 | - uses: actions/checkout@v4
205 | - uses: dtolnay/rust-toolchain@master
206 | with:
207 | toolchain: nightly-2024-11-30
208 | - name: Install sccache
209 | uses: mozilla-actions/sccache-action@v0.0.9
210 |
211 | - name: Docs
212 | run: cargo doc --workspace --all-features --no-deps --document-private-items
213 | env:
214 | RUSTDOCFLAGS: --cfg docsrs
215 |
216 | clippy_check:
217 | timeout-minutes: 30
218 | runs-on: ubuntu-latest
219 | env:
220 | RUSTC_WRAPPER: "sccache"
221 | SCCACHE_GHA_ENABLED: "on"
222 | steps:
223 | - uses: actions/checkout@v4
224 | - uses: dtolnay/rust-toolchain@stable
225 | with:
226 | components: clippy
227 | - name: Install sccache
228 | uses: mozilla-actions/sccache-action@v0.0.9
229 |
230 | # TODO: We have a bunch of platform-dependent code so should
231 | # probably run this job on the full platform matrix
232 | - name: clippy check (all features)
233 | run: cargo clippy --workspace --all-features --all-targets --bins --tests --benches
234 |
235 | - name: clippy check (no features)
236 | run: cargo clippy --workspace --no-default-features --lib --bins --tests
237 |
238 | - name: clippy check (default features)
239 | run: cargo clippy --workspace --all-targets
240 |
241 | msrv:
242 | if: "github.event_name != 'pull_request' || ! contains(github.event.pull_request.labels.*.name, 'flaky-test')"
243 | timeout-minutes: 30
244 | name: Minimal Supported Rust Version
245 | runs-on: ubuntu-latest
246 | env:
247 | RUSTC_WRAPPER: "sccache"
248 | SCCACHE_GHA_ENABLED: "on"
249 | steps:
250 | - uses: actions/checkout@v4
251 | - uses: dtolnay/rust-toolchain@master
252 | with:
253 | toolchain: ${{ env.MSRV }}
254 | - name: Install sccache
255 | uses: mozilla-actions/sccache-action@v0.0.9
256 |
257 | - name: Check MSRV all features
258 | run: |
259 | cargo +$MSRV check --workspace --all-targets
260 |
261 | cargo_deny:
262 | timeout-minutes: 30
263 | name: cargo deny
264 | runs-on: ubuntu-latest
265 | steps:
266 | - uses: actions/checkout@v4
267 | - uses: EmbarkStudios/cargo-deny-action@v2
268 | with:
269 | arguments: --workspace --all-features
270 | command: check
271 | command-arguments: "-Dwarnings"
272 |
273 | codespell:
274 | timeout-minutes: 30
275 | runs-on: ubuntu-latest
276 | steps:
277 | - uses: actions/checkout@v4
278 | - run: pip install --user codespell[toml]
279 | - run: codespell --ignore-words-list=ans,atmost,crate,inout,ratatui,ser,stayin,swarmin,worl --skip=CHANGELOG.md
280 |
--------------------------------------------------------------------------------
/.github/workflows/cleanup.yaml:
--------------------------------------------------------------------------------
1 | # Run tests using the beta Rust compiler
2 |
3 | name: Cleanup
4 |
5 | on:
6 | schedule:
7 | # 06:50 UTC every Monday
8 | - cron: '50 6 * * 1'
9 | workflow_dispatch:
10 |
11 | concurrency:
12 | group: beta-${{ github.workflow }}-${{ github.ref }}
13 | cancel-in-progress: true
14 |
15 | env:
16 | IROH_FORCE_STAGING_RELAYS: "1"
17 |
18 | jobs:
19 | clean_docs_branch:
20 | permissions:
21 | issues: write
22 | contents: write
23 | runs-on: ubuntu-latest
24 | steps:
25 | - name: Checkout
26 | uses: actions/checkout@v4
27 | with:
28 | ref: generated-docs-preview
29 | - name: Clean docs branch
30 | run: |
31 | cd pr/
32 | # keep the last 25 prs
33 | dirs=$(ls -1d [0-9]* | sort -n)
34 | total_dirs=$(echo "$dirs" | wc -l)
35 | dirs_to_remove=$(echo "$dirs" | head -n $(($total_dirs - 25)))
36 | if [ -n "$dirs_to_remove" ]; then
37 | echo "$dirs_to_remove" | xargs rm -rf
38 | fi
39 | git add .
40 | git commit -m "Cleanup old docs"
41 | git push
42 |
43 |
44 |
45 |
46 |
--------------------------------------------------------------------------------
/.github/workflows/commit.yaml:
--------------------------------------------------------------------------------
1 | name: Commits
2 |
3 | on:
4 | pull_request:
5 | branches: [main]
6 | types: [opened, edited, synchronize]
7 |
8 | env:
9 | IROH_FORCE_STAGING_RELAYS: "1"
10 |
11 | jobs:
12 | check-for-cc:
13 | runs-on: ubuntu-latest
14 | steps:
15 | - name: check-for-cc
16 | id: check-for-cc
17 | uses: agenthunt/conventional-commit-checker-action@v2.0.0
18 | with:
19 | pr-title-regex: "^(.+)(?:(([^)s]+)))?!?: (.+)"
20 |
--------------------------------------------------------------------------------
/.github/workflows/docs.yaml:
--------------------------------------------------------------------------------
1 | name: Docs Preview
2 |
3 | on:
4 | pull_request:
5 | workflow_dispatch:
6 | inputs:
7 | pr_number:
8 | required: true
9 | type: string
10 |
11 | # ensure job runs sequentially so pushing to the preview branch doesn't conflict
12 | concurrency:
13 | group: ci-docs-preview
14 |
15 | env:
16 | IROH_FORCE_STAGING_RELAYS: "1"
17 |
18 | jobs:
19 | preview_docs:
20 | permissions: write-all
21 | timeout-minutes: 30
22 | name: Docs preview
23 | if: ${{ (github.event_name == 'pull_request' || github.event_name == 'workflow_dispatch' ) && !github.event.pull_request.head.repo.fork }}
24 | runs-on: ubuntu-latest
25 | env:
26 | RUSTC_WRAPPER: "sccache"
27 | SCCACHE_GHA_ENABLED: "on"
28 | SCCACHE_CACHE_SIZE: "50G"
29 | PREVIEW_PATH: pr/${{ github.event.pull_request.number || inputs.pr_number }}/docs
30 |
31 | steps:
32 | - uses: actions/checkout@v4
33 | - uses: dtolnay/rust-toolchain@master
34 | with:
35 | toolchain: nightly-2024-11-30
36 | - name: Install sccache
37 | uses: mozilla-actions/sccache-action@v0.0.9
38 |
39 | - name: Generate Docs
40 | run: cargo doc --workspace --all-features --no-deps
41 | env:
42 | RUSTDOCFLAGS: --cfg iroh_docsrs
43 |
44 | - name: Deploy Docs to Preview Branch
45 | uses: peaceiris/actions-gh-pages@v4
46 | with:
47 | github_token: ${{ secrets.GITHUB_TOKEN }}
48 | publish_dir: ./target/doc/
49 | destination_dir: ${{ env.PREVIEW_PATH }}
50 | publish_branch: generated-docs-preview
51 |
52 | - name: Find Docs Comment
53 | uses: peter-evans/find-comment@v3
54 | id: fc
55 | with:
56 | issue-number: ${{ github.event.pull_request.number || inputs.pr_number }}
57 | comment-author: 'github-actions[bot]'
58 | body-includes: Documentation for this PR has been generated
59 |
60 | - name: Get current timestamp
61 | id: get_timestamp
62 | run: echo "TIMESTAMP=$(date -u +'%Y-%m-%dT%H:%M:%SZ')" >> $GITHUB_ENV
63 |
64 | - name: Create or Update Docs Comment
65 | uses: peter-evans/create-or-update-comment@v4
66 | with:
67 | issue-number: ${{ github.event.pull_request.number || inputs.pr_number }}
68 | comment-id: ${{ steps.fc.outputs.comment-id }}
69 | body: |
70 | Documentation for this PR has been generated and is available at: https://${{ github.repository_owner }}.github.io/${{ github.event.repository.name }}/${{ env.PREVIEW_PATH }}/iroh_blobs/
71 |
72 | Last updated: ${{ env.TIMESTAMP }}
73 | edit-mode: replace
74 |
--------------------------------------------------------------------------------
/.github/workflows/flaky.yaml:
--------------------------------------------------------------------------------
1 | # Run all tests, including flaky test.
2 | #
3 | # The default CI workflow ignores flaky tests. This workflow will run
4 | # all tests, including ignored ones.
5 | #
6 | # To use this workflow you can either:
7 | #
8 | # - Label a PR with "flaky-test", the normal CI workflow will not run
9 | # any jobs but the jobs here will be run. Note that to merge the PR
10 | # you'll need to remove the label eventually because the normal CI
11 | # jobs are required by branch protection.
12 | #
13 | # - Manually trigger the workflow, you may choose a branch for this to
14 | # run on.
15 | #
16 | # Additionally this jobs runs once a day on a schedule.
17 | #
18 | # Currently doctests are not run by this workflow.
19 |
20 | name: Flaky CI
21 |
22 | on:
23 | pull_request:
24 | types: [ 'labeled', 'unlabeled', 'opened', 'synchronize', 'reopened' ]
25 | schedule:
26 | # 06:30 UTC every day
27 | - cron: '30 6 * * *'
28 | workflow_dispatch:
29 | inputs:
30 | branch:
31 | description: 'Branch to run on, defaults to main'
32 | required: true
33 | default: 'main'
34 | type: string
35 |
36 | concurrency:
37 | group: flaky-${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
38 | cancel-in-progress: true
39 |
40 | env:
41 | IROH_FORCE_STAGING_RELAYS: "1"
42 |
43 | jobs:
44 | tests:
45 | if: "contains(github.event.pull_request.labels.*.name, 'flaky-test') || github.event_name == 'workflow_dispatch' || github.event_name == 'schedule'"
46 | uses: './.github/workflows/tests.yaml'
47 | with:
48 | flaky: true
49 | git-ref: ${{ inputs.branch }}
50 | notify:
51 | needs: tests
52 | if: ${{ always() }}
53 | runs-on: ubuntu-latest
54 | steps:
55 | - name: Extract test results
56 | run: |
57 | printf '${{ toJSON(needs) }}\n'
58 | result=$(echo '${{ toJSON(needs) }}' | jq -r .tests.result)
59 | echo TESTS_RESULT=$result
60 | echo "TESTS_RESULT=$result" >>"$GITHUB_ENV"
61 | - name: download nextest reports
62 | uses: actions/download-artifact@v4
63 | with:
64 | pattern: libtest_run_${{ github.run_number }}-${{ github.run_attempt }}-*
65 | merge-multiple: true
66 | path: nextest-results
67 | - name: create summary report
68 | id: make_summary
69 | run: |
70 | # prevent the glob expression in the loop to match on itself when the dir is empty
71 | shopt -s nullglob
72 | # to deal with multiline outputs it's recommended to use a random EOF, the syntax is based on
73 | # https://docs.github.com/en/actions/using-workflows/workflow-commands-for-github-actions#multiline-strings
74 | EOF=aP51VriWCxNJ1JjvmO9i
75 | echo "summary<<$EOF" >> $GITHUB_OUTPUT
76 | echo "Flaky tests failure:" >> $GITHUB_OUTPUT
77 | echo " " >> $GITHUB_OUTPUT
78 | for report in nextest-results/*.json; do
79 | # remove the name prefix and extension, and split the parts
80 | name=$(echo ${report:16:-5} | tr _ ' ')
81 | echo $name
82 | echo "- **$name**" >> $GITHUB_OUTPUT
83 | # select the failed tests
84 | # the tests have this format "crate::module$test_name", the sed expressions remove the quotes and replace $ for ::
85 | failure=$(jq --slurp '.[] | select(.["type"] == "test" and .["event"] == "failed" ) | .["name"]' $report | sed -e 's/^"//g' -e 's/\$/::/' -e 's/"//')
86 | echo "$failure"
87 | echo "$failure" >> $GITHUB_OUTPUT
88 | done
89 | echo "" >> $GITHUB_OUTPUT
90 | echo "See https://github.com/${{ github.repository }}/actions/workflows/flaky.yaml" >> $GITHUB_OUTPUT
91 | echo "$EOF" >> $GITHUB_OUTPUT
92 | - name: Notify discord on failure
93 | uses: n0-computer/discord-webhook-notify@v1
94 | if: ${{ env.TESTS_RESULT == 'failure' || env.TESTS_RESULT == 'success' }}
95 | with:
96 | text: "Flaky tests in **${{ github.repository }}**:"
97 | severity: ${{ env.TESTS_RESULT == 'failure' && 'warn' || 'info' }}
98 | details: ${{ env.TESTS_RESULT == 'failure' && steps.make_summary.outputs.summary || 'No flaky failures!' }}
99 | webhookUrl: ${{ secrets.DISCORD_N0_GITHUB_CHANNEL_WEBHOOK_URL }}
100 |
--------------------------------------------------------------------------------
/.github/workflows/tests.yaml:
--------------------------------------------------------------------------------
1 | # Run all tests, with or without flaky tests.
2 |
3 | name: Tests
4 |
5 | on:
6 | workflow_call:
7 | inputs:
8 | rust-version:
9 | description: 'The version of the rust compiler to run'
10 | type: string
11 | default: 'stable'
12 | flaky:
13 | description: 'Whether to also run flaky tests'
14 | type: boolean
15 | default: false
16 | git-ref:
17 | description: 'Which git ref to checkout'
18 | type: string
19 | default: ${{ github.ref }}
20 |
21 | env:
22 | RUST_BACKTRACE: 1
23 | RUSTFLAGS: -Dwarnings
24 | RUSTDOCFLAGS: -Dwarnings
25 | SCCACHE_CACHE_SIZE: "50G"
26 | CRATES_LIST: "iroh-blobs"
27 | IROH_FORCE_STAGING_RELAYS: "1"
28 |
29 | jobs:
30 | build_and_test_nix:
31 | timeout-minutes: 30
32 | name: "Tests"
33 | runs-on: ${{ matrix.runner }}
34 | strategy:
35 | fail-fast: false
36 | matrix:
37 | name: [ubuntu-latest, macOS-arm-latest]
38 | rust: [ '${{ inputs.rust-version }}' ]
39 | features: [all, none, default]
40 | include:
41 | - name: ubuntu-latest
42 | os: ubuntu-latest
43 | release-os: linux
44 | release-arch: amd64
45 | runner: [self-hosted, linux, X64]
46 | - name: macOS-arm-latest
47 | os: macOS-latest
48 | release-os: darwin
49 | release-arch: aarch64
50 | runner: [self-hosted, macOS, ARM64]
51 | env:
52 | # Using self-hosted runners so use local cache for sccache and
53 | # not SCCACHE_GHA_ENABLED.
54 | RUSTC_WRAPPER: "sccache"
55 | steps:
56 | - name: Checkout
57 | uses: actions/checkout@v4
58 | with:
59 | ref: ${{ inputs.git-ref }}
60 |
61 | - name: Install ${{ matrix.rust }} rust
62 | uses: dtolnay/rust-toolchain@master
63 | with:
64 | toolchain: ${{ matrix.rust }}
65 |
66 | - name: Install cargo-nextest
67 | uses: taiki-e/install-action@v2
68 | with:
69 | tool: nextest@0.9.80
70 |
71 | - name: Install sccache
72 | uses: mozilla-actions/sccache-action@v0.0.9
73 |
74 | - name: Select features
75 | run: |
76 | case "${{ matrix.features }}" in
77 | all)
78 | echo "FEATURES=--all-features" >> "$GITHUB_ENV"
79 | ;;
80 | none)
81 | echo "FEATURES=--no-default-features" >> "$GITHUB_ENV"
82 | ;;
83 | default)
84 | echo "FEATURES=" >> "$GITHUB_ENV"
85 | ;;
86 | *)
87 | exit 1
88 | esac
89 |
90 | - name: check features
91 | if: ${{ ! inputs.flaky }}
92 | run: |
93 | for i in ${CRATES_LIST//,/ }
94 | do
95 | echo "Checking $i $FEATURES"
96 | if [ $i = "iroh-cli" ]; then
97 | targets="--bins"
98 | else
99 | targets="--lib --bins"
100 | fi
101 | echo cargo check -p $i $FEATURES $targets
102 | cargo check -p $i $FEATURES $targets
103 | done
104 | env:
105 | RUST_LOG: ${{ runner.debug && 'TRACE' || 'DEBUG'}}
106 |
107 | - name: build tests
108 | run: |
109 | cargo nextest run --workspace ${{ env.FEATURES }} --lib --bins --tests --no-run
110 |
111 | - name: list ignored tests
112 | run: |
113 | cargo nextest list --workspace ${{ env.FEATURES }} --lib --bins --tests --run-ignored ignored-only
114 |
115 | - name: run tests
116 | run: |
117 | mkdir -p output
118 | cargo nextest run --workspace ${{ env.FEATURES }} --lib --bins --tests --profile ci --run-ignored ${{ inputs.flaky && 'all' || 'default' }} --no-fail-fast --message-format ${{ inputs.flaky && 'libtest-json' || 'human' }} > output/${{ matrix.name }}_${{ matrix.features }}_${{ matrix.rust }}.json
119 | env:
120 | RUST_LOG: ${{ runner.debug && 'TRACE' || 'DEBUG'}}
121 | NEXTEST_EXPERIMENTAL_LIBTEST_JSON: 1
122 |
123 | - name: upload results
124 | if: ${{ failure() && inputs.flaky }}
125 | uses: actions/upload-artifact@v4
126 | with:
127 | name: libtest_run_${{ github.run_number }}-${{ github.run_attempt }}-${{ matrix.name }}_${{ matrix.features }}_${{ matrix.rust }}.json
128 | path: output
129 | retention-days: 45
130 | compression-level: 0
131 |
132 | - name: doctests
133 | if: ${{ (! inputs.flaky) && matrix.features == 'all' }}
134 | run: |
135 | if [ -n "${{ runner.debug }}" ]; then
136 | export RUST_LOG=TRACE
137 | else
138 | export RUST_LOG=DEBUG
139 | fi
140 | cargo test --workspace --all-features --doc
141 |
142 | build_and_test_windows:
143 | timeout-minutes: 30
144 | name: "Tests"
145 | runs-on: ${{ matrix.runner }}
146 | strategy:
147 | fail-fast: false
148 | matrix:
149 | name: [windows-latest]
150 | rust: [ '${{ inputs.rust-version}}' ]
151 | features: [all, none, default]
152 | target:
153 | - x86_64-pc-windows-msvc
154 | include:
155 | - name: windows-latest
156 | os: windows
157 | runner: [self-hosted, windows, x64]
158 | env:
159 | # Using self-hosted runners so use local cache for sccache and
160 | # not SCCACHE_GHA_ENABLED.
161 | RUSTC_WRAPPER: "sccache"
162 | steps:
163 | - name: Checkout
164 | uses: actions/checkout@v4
165 | with:
166 | ref: ${{ inputs.git-ref }}
167 |
168 | - name: Install ${{ matrix.rust }}
169 | run: |
170 | rustup toolchain install ${{ matrix.rust }}
171 | rustup toolchain default ${{ matrix.rust }}
172 | rustup target add ${{ matrix.target }}
173 | rustup set default-host ${{ matrix.target }}
174 |
175 | - name: Install cargo-nextest
176 | shell: powershell
177 | run: |
178 | $tmp = New-TemporaryFile | Rename-Item -NewName { $_ -replace 'tmp$', 'zip' } -PassThru
179 | Invoke-WebRequest -OutFile $tmp https://get.nexte.st/latest/windows
180 | $outputDir = if ($Env:CARGO_HOME) { Join-Path $Env:CARGO_HOME "bin" } else { "~/.cargo/bin" }
181 | $tmp | Expand-Archive -DestinationPath $outputDir -Force
182 | $tmp | Remove-Item
183 |
184 | - name: Select features
185 | run: |
186 | switch ("${{ matrix.features }}") {
187 | "all" {
188 | echo "FEATURES=--all-features" | Out-File -FilePath $Env:GITHUB_ENV -Encoding utf8 -Append
189 | }
190 | "none" {
191 | echo "FEATURES=--no-default-features" | Out-File -FilePath $Env:GITHUB_ENV -Encoding utf8 -Append
192 | }
193 | "default" {
194 | echo "FEATURES=" | Out-File -FilePath $Env:GITHUB_ENV -Encoding utf8 -Append
195 | }
196 | default {
197 | Exit 1
198 | }
199 | }
200 |
201 | - name: Install sccache
202 | uses: mozilla-actions/sccache-action@v0.0.9
203 |
204 | - uses: msys2/setup-msys2@v2
205 |
206 | - name: build tests
207 | run: |
208 | cargo nextest run --workspace ${{ env.FEATURES }} --lib --bins --tests --target ${{ matrix.target }} --no-run
209 |
210 | - name: list ignored tests
211 | run: |
212 | cargo nextest list --workspace ${{ env.FEATURES }} --lib --bins --tests --target ${{ matrix.target }} --run-ignored ignored-only
213 |
214 | - name: tests
215 | run: |
216 | mkdir -p output
217 | cargo nextest run --workspace ${{ env.FEATURES }} --lib --bins --tests --profile ci --target ${{ matrix.target }} --run-ignored ${{ inputs.flaky && 'all' || 'default' }} --no-fail-fast --message-format ${{ inputs.flaky && 'libtest-json' || 'human' }} > output/${{ matrix.name }}_${{ matrix.features }}_${{ matrix.rust }}.json
218 | env:
219 | RUST_LOG: ${{ runner.debug && 'TRACE' || 'DEBUG'}}
220 | NEXTEST_EXPERIMENTAL_LIBTEST_JSON: 1
221 |
222 | - name: upload results
223 | if: ${{ failure() && inputs.flaky }}
224 | uses: actions/upload-artifact@v4
225 | with:
226 | name: libtest_run_${{ github.run_number }}-${{ github.run_attempt }}-${{ matrix.name }}_${{ matrix.features }}_${{ matrix.rust }}.json
227 | path: output
228 | retention-days: 1
229 | compression-level: 0
230 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | /target
2 | iroh.config.toml
3 | .vscode/*
4 |
--------------------------------------------------------------------------------
/.img/iroh_wordmark.svg:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/Cargo.toml:
--------------------------------------------------------------------------------
1 | [package]
2 | name = "iroh-blobs"
3 | version = "0.35.0"
4 | edition = "2021"
5 | readme = "README.md"
6 | description = "blob and collection transfer support for iroh"
7 | license = "MIT OR Apache-2.0"
8 | authors = ["dignifiedquire ", "n0 team"]
9 | repository = "https://github.com/n0-computer/iroh-blobs"
10 | keywords = ["hashing", "quic", "blake3"]
11 |
12 | # Sadly this also needs to be updated in .github/workflows/ci.yml
13 | rust-version = "1.81"
14 |
15 | [dependencies]
16 | anyhow = { version = "1" }
17 | async-channel = "2.3.1"
18 | bao-tree = { version = "0.15.1", features = [
19 | "tokio_fsm",
20 | "validate",
21 | ], default-features = false }
22 | blake3 = { version = "1.8" }
23 | bytes = { version = "1.7", features = ["serde"] }
24 | chrono = "0.4.31"
25 | clap = { version = "4.5.20", features = ["derive"], optional = true }
26 | data-encoding = { version = "2.3.3" }
27 | derive_more = { version = "1.0.0", features = [
28 | "debug",
29 | "display",
30 | "deref",
31 | "deref_mut",
32 | "from",
33 | "try_into",
34 | "into",
35 | ] }
36 | futures-buffered = "0.2.4"
37 | futures-lite = "2.3"
38 | futures-util = { version = "0.3.30", optional = true }
39 | genawaiter = { version = "0.99.1", features = ["futures03"] }
40 | hashlink = { version = "0.9.0", optional = true }
41 | hex = "0.4.3"
42 | indicatif = { version = "0.17.8", optional = true }
43 | iroh-base = "0.35"
44 | iroh-io = { version = "0.6.0", features = ["stats"] }
45 | iroh-metrics = { version = "0.34", default-features = false }
46 | iroh = "0.35"
47 | nested_enum_utils = { version = "0.1.0", optional = true }
48 | num_cpus = "1.15.0"
49 | oneshot = "0.1.8"
50 | parking_lot = { version = "0.12.1", optional = true }
51 | portable-atomic = { version = "1", optional = true }
52 | postcard = { version = "1", default-features = false, features = [
53 | "alloc",
54 | "use-std",
55 | "experimental-derive",
56 | ] }
57 | quic-rpc = { version = "0.20", optional = true }
58 | quic-rpc-derive = { version = "0.20", optional = true }
59 | rand = "0.8"
60 | range-collections = "0.4.0"
61 | redb = { version = "=2.4", optional = true }
62 | reflink-copy = { version = "0.1.8", optional = true }
63 | self_cell = "1.0.1"
64 | serde = { version = "1", features = ["derive"] }
65 | serde-error = "0.1.3"
66 | smallvec = { version = "1.10.0", features = ["serde", "const_new"] }
67 | strum = { version = "0.26.3", optional = true }
68 | ssh-key = { version = "0.6", optional = true, features = ["ed25519"] }
69 | tempfile = { version = "3.10.0", optional = true }
70 | thiserror = "2"
71 | tokio = { version = "1", features = ["fs"] }
72 | tokio-util = { version = "0.7", features = ["io-util", "io"] }
73 | tracing = "0.1"
74 | tracing-futures = "0.2.5"
75 | walkdir = { version = "2.5.0", optional = true }
76 |
77 | # Examples
78 | console = { version = "0.15.8", optional = true }
79 | tracing-test = "0.2.5"
80 |
81 | [dev-dependencies]
82 | http-body = "1.0"
83 | iroh = { version = "0.35", features = ["test-utils"] }
84 | quinn = { package = "iroh-quinn", version = "0.13", features = ["ring"] }
85 | futures-buffered = "0.2.4"
86 | proptest = "1.0.0"
87 | serde_json = "1.0.107"
88 | serde_test = "1.0.176"
89 | testresult = "0.4.0"
90 | tokio = { version = "1", features = ["macros", "test-util"] }
91 | tracing-subscriber = { version = "0.3", features = ["env-filter"] }
92 | rcgen = "0.13"
93 | rustls = { version = "0.23", default-features = false, features = ["ring"] }
94 | tempfile = "3.10.0"
95 | futures-util = "0.3.30"
96 | testdir = "0.9.1"
97 |
98 | [features]
99 | default = ["fs-store", "net_protocol", "rpc"]
100 | downloader = ["dep:parking_lot", "tokio-util/time", "dep:hashlink"]
101 | net_protocol = ["downloader", "dep:futures-util"]
102 | fs-store = ["dep:reflink-copy", "redb", "dep:tempfile"]
103 | metrics = ["iroh-metrics/metrics"]
104 | redb = ["dep:redb"]
105 | cli = ["rpc", "dep:clap", "dep:indicatif", "dep:console"]
106 | rpc = [
107 | "dep:quic-rpc",
108 | "dep:quic-rpc-derive",
109 | "dep:nested_enum_utils",
110 | "dep:strum",
111 | "dep:futures-util",
112 | "dep:portable-atomic",
113 | "dep:walkdir",
114 | "dep:ssh-key",
115 | "downloader",
116 | ]
117 |
118 | example-iroh = [
119 | "dep:clap",
120 | "dep:indicatif",
121 | "dep:console",
122 | "iroh/discovery-local-network"
123 | ]
124 | test = ["quic-rpc/quinn-transport", "quic-rpc/test-utils"]
125 |
126 | [package.metadata.docs.rs]
127 | all-features = true
128 | rustdoc-args = ["--cfg", "iroh_docsrs"]
129 |
130 | [[example]]
131 | name = "provide-bytes"
132 |
133 | [[example]]
134 | name = "fetch-fsm"
135 |
136 | [[example]]
137 | name = "fetch-stream"
138 |
139 | [[example]]
140 | name = "transfer"
141 | required-features = ["rpc"]
142 |
143 | [[example]]
144 | name = "hello-world-fetch"
145 | required-features = ["example-iroh"]
146 |
147 | [[example]]
148 | name = "hello-world-provide"
149 | required-features = ["example-iroh"]
150 |
151 | [[example]]
152 | name = "discovery-local-network"
153 | required-features = ["example-iroh"]
154 |
155 | [[example]]
156 | name = "custom-protocol"
157 | required-features = ["example-iroh"]
158 |
159 | [lints.rust]
160 | missing_debug_implementations = "warn"
161 |
162 | # We use this --cfg for documenting the cargo features on which an API
163 | # is available. To preview this locally use: RUSTFLAGS="--cfg
164 | # iroh_docsrs cargo +nightly doc --all-features". We use our own
165 | # iroh_docsrs instead of the common docsrs to avoid also enabling this
166 | # feature in any dependencies, because some indirect dependencies
167 | # require a feature enabled when using `--cfg docsrs` which we can not
168 | # do. To enable for a crate set `#![cfg_attr(iroh_docsrs,
169 | # feature(doc_cfg))]` in the crate.
170 | unexpected_cfgs = { level = "warn", check-cfg = ["cfg(iroh_docsrs)"] }
171 |
172 | [lints.clippy]
173 | unused-async = "warn"
174 |
175 | [profile.dev-ci]
176 | inherits = 'dev'
177 | opt-level = 1
178 |
179 | [profile.optimized-release]
180 | inherits = 'release'
181 | debug = false
182 | lto = true
183 | debug-assertions = false
184 | opt-level = 3
185 | panic = 'abort'
186 | incremental = false
187 |
--------------------------------------------------------------------------------
/LICENSE-MIT:
--------------------------------------------------------------------------------
1 | Copyright 2023 N0, INC.
2 |
3 | Permission is hereby granted, free of charge, to any
4 | person obtaining a copy of this software and associated
5 | documentation files (the "Software"), to deal in the
6 | Software without restriction, including without
7 | limitation the rights to use, copy, modify, merge,
8 | publish, distribute, sublicense, and/or sell copies of
9 | the Software, and to permit persons to whom the Software
10 | is furnished to do so, subject to the following
11 | conditions:
12 |
13 | The above copyright notice and this permission notice
14 | shall be included in all copies or substantial portions
15 | of the Software.
16 |
17 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
18 | ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
19 | TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
20 | PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
21 | SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
22 | CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 | OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
24 | IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25 | DEALINGS IN THE SOFTWARE.
26 |
--------------------------------------------------------------------------------
/Makefile.toml:
--------------------------------------------------------------------------------
1 | # Use cargo-make to run tasks here: https://crates.io/crates/cargo-make
2 |
3 | [tasks.format]
4 | workspace = false
5 | command = "cargo"
6 | args = [
7 | "fmt",
8 | "--all",
9 | "--",
10 | "--config",
11 | "unstable_features=true",
12 | "--config",
13 | "imports_granularity=Crate,group_imports=StdExternalCrate,reorder_imports=true",
14 | ]
15 |
16 | [tasks.format-check]
17 | workspace = false
18 | command = "cargo"
19 | args = [
20 | "fmt",
21 | "--all",
22 | "--check",
23 | "--",
24 | "--config",
25 | "unstable_features=true",
26 | "--config",
27 | "imports_granularity=Crate,group_imports=StdExternalCrate,reorder_imports=true",
28 | ]
29 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # iroh-blobs
2 |
3 | This crate provides blob and blob sequence transfer support for iroh. It implements a simple request-response protocol based on BLAKE3 verified streaming.
4 |
5 | A request describes data in terms of BLAKE3 hashes and byte ranges. It is possible to request blobs or ranges of blobs, as well as entire sequences of blobs in one request.
6 |
7 | The requester opens a QUIC stream to the provider and sends the request. The provider answers with the requested data, encoded as [BLAKE3](https://github.com/BLAKE3-team/BLAKE3-specs/blob/master/blake3.pdf) verified streams, on the same QUIC stream.
8 |
9 | This crate is used together with [iroh](https://crates.io/crates/iroh). Connection establishment is left up to the user or higher level APIs.
10 |
11 | ## Concepts
12 |
13 | - **Blob:** a sequence of bytes of arbitrary size, without any metadata.
14 |
15 | - **Link:** a 32 byte BLAKE3 hash of a blob.
16 |
17 | - **HashSeq:** a blob that contains a sequence of links. Its size is a multiple of 32.
18 |
19 | - **Provider:** The side that provides data and answers requests. Providers wait for incoming requests from Requests.
20 |
21 | - **Requester:** The side that asks for data. It is initiating requests to one or many providers.
22 |
23 |
24 | ## Getting started
25 |
26 | The `iroh-blobs` protocol was designed to be used in conjunction with `iroh`. [Iroh](https://docs.rs/iroh) is a networking library for making direct connections, these connections are what power the data transfers in `iroh-blobs`.
27 |
28 | Iroh provides a [`Router`](https://docs.rs/iroh/latest/iroh/protocol/struct.Router.html) that takes an [`Endpoint`](https://docs.rs/iroh/latest/iroh/endpoint/struct.Endpoint.html) and any protocols needed for the application. Similar to a router in webserver library, it runs a loop accepting incoming connections and routes them to the specific protocol handler, based on `ALPN`.
29 |
30 | Here is a basic example of how to set up `iroh-blobs` with `iroh`:
31 |
32 | ```rust
33 | use iroh::{protocol::Router, Endpoint};
34 | use iroh_blobs::{store::Store, net_protocol::Blobs};
35 |
36 | #[tokio::main]
37 | async fn main() -> anyhow::Result<()> {
38 | // create an iroh endpoint that includes the standard discovery mechanisms
39 | // we've built at number0
40 | let endpoint = Endpoint::builder().discovery_n0().bind().await?;
41 |
42 | // create an in-memory blob store
43 | // use `iroh_blobs::net_protocol::Blobs::persistent` to load or create a
44 | // persistent blob store from a path
45 | let blobs = Blobs::memory().build(&endpoint);
46 |
47 | // turn on the "rpc" feature if you need to create blobs and tags clients
48 | let blobs_client = blobs.client();
49 | let tags_client = blobs_client.tags();
50 |
51 | // build the router
52 | let router = Router::builder(endpoint)
53 | .accept(iroh_blobs::ALPN, blobs.clone())
54 | .spawn();
55 |
56 | // do fun stuff with the blobs protocol!
57 | router.shutdown().await?;
58 | drop(tags_client);
59 | Ok(())
60 | }
61 | ```
62 |
63 | ## Examples
64 |
65 | Examples that use `iroh-blobs` can be found in [this repo](https://github.com/n0-computer/iroh-blobs/tree/main/examples).
66 |
67 | # License
68 |
69 | This project is licensed under either of
70 |
71 | * Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
72 | )
73 | * MIT license ([LICENSE-MIT](LICENSE-MIT) or
74 | )
75 |
76 | at your option.
77 |
78 | ### Contribution
79 |
80 | Unless you explicitly state otherwise, any contribution intentionally submitted
81 | for inclusion in this project by you, as defined in the Apache-2.0 license,
82 | shall be dual licensed as above, without any additional terms or conditions.
83 |
--------------------------------------------------------------------------------
/cliff.toml:
--------------------------------------------------------------------------------
1 | [changelog]
2 | # changelog header
3 | header = """
4 | # Changelog\n
5 | All notable changes to iroh-blobs will be documented in this file.\n
6 | """
7 |
8 | body = """
9 | {% if version %}\
10 | {% if previous.version %}\
11 | ## [{{ version | trim_start_matches(pat="v") }}](/compare/{{ previous.version }}..{{ version }}) - {{ timestamp | date(format="%Y-%m-%d") }}
12 | {% else %}\
13 | ## [{{ version | trim_start_matches(pat="v") }}] - {{ timestamp | date(format="%Y-%m-%d") }}
14 | {% endif %}\
15 | {% else %}\
16 | ## [unreleased]
17 | {% endif %}\
18 |
19 | {% macro commit(commit) -%}
20 | - {% if commit.scope %}*({{ commit.scope }})* {% endif %}{% if commit.breaking %}[**breaking**] {% endif %}\
21 | {{ commit.message | upper_first }} - ([{{ commit.id | truncate(length=7, end="") }}](/commit/{{ commit.id }}))\
22 | {% endmacro -%}
23 |
24 | {% for group, commits in commits | group_by(attribute="group") %}
25 | ### {{ group | striptags | trim | upper_first }}
26 | {% for commit in commits
27 | | filter(attribute="scope")
28 | | sort(attribute="scope") %}
29 | {{ self::commit(commit=commit) }}
30 | {%- endfor -%}
31 | {% raw %}\n{% endraw %}\
32 | {%- for commit in commits %}
33 | {%- if not commit.scope -%}
34 | {{ self::commit(commit=commit) }}
35 | {% endif -%}
36 | {% endfor -%}
37 | {% endfor %}\n
38 | """
39 |
40 | footer = ""
41 | postprocessors = [
42 | { pattern = '', replace = "https://github.com/n0-computer/iroh-blobs" },
43 | { pattern = "\\(#([0-9]+)\\)", replace = "([#${1}](https://github.com/n0-computer/iroh-blobs/issues/${1}))"}
44 | ]
45 |
46 |
47 | [git]
48 | # regex for parsing and grouping commits
49 | commit_parsers = [
50 | { message = "^feat", group = "⛰️ Features" },
51 | { message = "^fix", group = "🐛 Bug Fixes" },
52 | { message = "^doc", group = "📚 Documentation" },
53 | { message = "^perf", group = "⚡ Performance" },
54 | { message = "^refactor", group = "🚜 Refactor" },
55 | { message = "^style", group = "🎨 Styling" },
56 | { message = "^test", group = "🧪 Testing" },
57 | { message = "^chore\\(release\\)", skip = true },
58 | { message = "^chore\\(deps\\)", skip = true },
59 | { message = "^chore\\(pr\\)", skip = true },
60 | { message = "^chore\\(pull\\)", skip = true },
61 | { message = "^chore|ci", group = "⚙️ Miscellaneous Tasks" },
62 | { body = ".*security", group = "🛡️ Security" },
63 | { message = "^revert", group = "◀️ Revert" },
64 | ]
65 |
--------------------------------------------------------------------------------
/code_of_conduct.md:
--------------------------------------------------------------------------------
1 | # Code of Conduct
2 |
3 | Online or off, Number Zero is a harassment-free environment for everyone, regardless of gender, gender identity and expression, sexual orientation, disability, physical appearance, body size, race, age or religion or technical skill level. We do not tolerate harassment of participants in any form.
4 |
5 | Harassment includes verbal comments that reinforce social structures of domination related to gender, gender identity and expression, sexual orientation, disability, physical appearance, body size, race, age, religion, sexual images in public spaces, deliberate intimidation, stalking, following, harassing photography or recording, sustained disruption of talks or other events, inappropriate physical contact, and unwelcome sexual attention. Participants asked to stop any harassing behavior are expected to comply immediately.
6 |
7 | If a participant engages in harassing behaviour, the organizers may take any action they deem appropriate, including warning the offender or expulsion from events and online forums.
8 |
9 | If you are being harassed, notice that someone else is being harassed, or have any other concerns, please contact a member of the organizing team immediately.
10 |
11 | At offline events, organizers will identify themselves, and will help participants contact venue security or local law enforcement, provide escorts, or otherwise assist those experiencing harassment to feel safe for the duration of the event. We value your participation!
12 |
13 | This document is based on a similar code from [EDGI](https://envirodatagov.org/) and [Civic Tech Toronto](http://civictech.ca/about-us/), itself derived from the [Recurse Center’s Social Rules](https://www.recurse.com/manual#sec-environment), and the [anti-harassment policy from the Geek Feminism Wiki](http://geekfeminism.wikia.com/wiki/Conference_anti-harassment/Policy).
14 |
--------------------------------------------------------------------------------
/deny.toml:
--------------------------------------------------------------------------------
1 | [advisories]
2 | ignore = [
3 | "RUSTSEC-2024-0370",
4 | "RUSTSEC-2024-0384",
5 | "RUSTSEC-2024-0436",
6 | "RUSTSEC-2023-0089",
7 | ]
8 |
9 | [bans]
10 | deny = [
11 | "aws-lc",
12 | "aws-lc-rs",
13 | "aws-lc-sys",
14 | "native-tls",
15 | "openssl",
16 | ]
17 | multiple-versions = "allow"
18 |
19 | [licenses]
20 | allow = [
21 | "Apache-2.0",
22 | "Apache-2.0 WITH LLVM-exception",
23 | "BSD-2-Clause",
24 | "BSD-3-Clause",
25 | "BSL-1.0",
26 | "ISC",
27 | "MIT",
28 | "Zlib",
29 | "MPL-2.0",
30 | "Unicode-3.0",
31 | "Unlicense",
32 | ]
33 |
34 | [[licenses.clarify]]
35 | expression = "MIT AND ISC AND OpenSSL"
36 | name = "ring"
37 |
38 | [[licenses.clarify.license-files]]
39 | hash = 3171872035
40 | path = "LICENSE"
41 |
42 | [sources]
43 | allow-git = []
44 |
--------------------------------------------------------------------------------
/examples/discovery-local-network.rs:
--------------------------------------------------------------------------------
1 | //! Example that runs and iroh node with local node discovery and no relay server
2 | //!
3 | //! Run the follow command to run the "accept" side, that hosts the content:
4 | //! $ cargo run --example discovery_local_network --features="discovery-local-network" -- accept [FILE_PATH]
5 | //! Wait for output that looks like the following:
6 | //! $ cargo run --example discovery_local_network --features="discovery-local-network" -- connect [NODE_ID] [HASH] -o [FILE_PATH]
7 | //! Run that command on another machine in the same local network, replacing [FILE_PATH] to the path on which you want to save the transferred content.
8 | use std::path::PathBuf;
9 |
10 | use anyhow::ensure;
11 | use clap::{Parser, Subcommand};
12 | use iroh::{
13 | discovery::mdns::MdnsDiscovery, protocol::Router, Endpoint, NodeAddr, PublicKey, RelayMode,
14 | SecretKey,
15 | };
16 | use iroh_blobs::{net_protocol::Blobs, rpc::client::blobs::WrapOption, Hash};
17 | use tracing_subscriber::{prelude::*, EnvFilter};
18 |
19 | use self::progress::show_download_progress;
20 |
21 | // set the RUST_LOG env var to one of {debug,info,warn} to see logging info
22 | pub fn setup_logging() {
23 | tracing_subscriber::registry()
24 | .with(tracing_subscriber::fmt::layer().with_writer(std::io::stderr))
25 | .with(EnvFilter::from_default_env())
26 | .try_init()
27 | .ok();
28 | }
29 |
30 | #[derive(Debug, Parser)]
31 | #[command(version, about)]
32 | pub struct Cli {
33 | #[clap(subcommand)]
34 | command: Commands,
35 | }
36 |
37 | #[derive(Subcommand, Clone, Debug)]
38 | pub enum Commands {
39 | /// Launch an iroh node and provide the content at the given path
40 | Accept {
41 | /// path to the file you want to provide
42 | path: PathBuf,
43 | },
44 | /// Get the node_id and hash string from a node running accept in the local network
45 | /// Download the content from that node.
46 | Connect {
47 | /// Node ID of a node on the local network
48 | node_id: PublicKey,
49 | /// Hash of content you want to download from the node
50 | hash: Hash,
51 | /// save the content to a file
52 | #[clap(long, short)]
53 | out: Option,
54 | },
55 | }
56 |
57 | #[tokio::main]
58 | async fn main() -> anyhow::Result<()> {
59 | setup_logging();
60 | let cli = Cli::parse();
61 |
62 | let key = SecretKey::generate(rand::rngs::OsRng);
63 | let discovery = MdnsDiscovery::new(key.public())?;
64 |
65 | println!("Starting iroh node with mdns discovery...");
66 | // create a new node
67 | let endpoint = Endpoint::builder()
68 | .secret_key(key)
69 | .discovery(Box::new(discovery))
70 | .relay_mode(RelayMode::Disabled)
71 | .bind()
72 | .await?;
73 | let builder = Router::builder(endpoint);
74 | let blobs = Blobs::memory().build(builder.endpoint());
75 | let builder = builder.accept(iroh_blobs::ALPN, blobs.clone());
76 | let node = builder.spawn();
77 | let blobs_client = blobs.client();
78 |
79 | match &cli.command {
80 | Commands::Accept { path } => {
81 | if !path.is_file() {
82 | println!("Content must be a file.");
83 | node.shutdown().await?;
84 | return Ok(());
85 | }
86 | let absolute = path.canonicalize()?;
87 | println!("Adding {} as {}...", path.display(), absolute.display());
88 | let stream = blobs_client
89 | .add_from_path(
90 | absolute,
91 | true,
92 | iroh_blobs::util::SetTagOption::Auto,
93 | WrapOption::NoWrap,
94 | )
95 | .await?;
96 | let outcome = stream.finish().await?;
97 | println!("To fetch the blob:\n\tcargo run --example discovery_local_network --features=\"discovery-local-network\" -- connect {} {} -o [FILE_PATH]", node.endpoint().node_id(), outcome.hash);
98 | tokio::signal::ctrl_c().await?;
99 | node.shutdown().await?;
100 | std::process::exit(0);
101 | }
102 | Commands::Connect { node_id, hash, out } => {
103 | println!("NodeID: {}", node.endpoint().node_id());
104 | let mut stream = blobs_client
105 | .download(*hash, NodeAddr::new(*node_id))
106 | .await?;
107 | show_download_progress(*hash, &mut stream).await?;
108 | if let Some(path) = out {
109 | let absolute = std::env::current_dir()?.join(path);
110 | ensure!(!absolute.is_dir(), "output must not be a directory");
111 | tracing::info!(
112 | "exporting {hash} to {} -> {}",
113 | path.display(),
114 | absolute.display()
115 | );
116 | let stream = blobs_client
117 | .export(
118 | *hash,
119 | absolute,
120 | iroh_blobs::store::ExportFormat::Blob,
121 | iroh_blobs::store::ExportMode::Copy,
122 | )
123 | .await?;
124 | stream.await?;
125 | }
126 | }
127 | }
128 | Ok(())
129 | }
130 |
131 | mod progress {
132 | use anyhow::{bail, Result};
133 | use console::style;
134 | use futures_lite::{Stream, StreamExt};
135 | use indicatif::{
136 | HumanBytes, HumanDuration, MultiProgress, ProgressBar, ProgressDrawTarget, ProgressState,
137 | ProgressStyle,
138 | };
139 | use iroh_blobs::{
140 | get::{db::DownloadProgress, progress::BlobProgress, Stats},
141 | Hash,
142 | };
143 |
144 | pub async fn show_download_progress(
145 | hash: Hash,
146 | mut stream: impl Stream- > + Unpin,
147 | ) -> Result<()> {
148 | eprintln!("Fetching: {}", hash);
149 | let mp = MultiProgress::new();
150 | mp.set_draw_target(ProgressDrawTarget::stderr());
151 | let op = mp.add(make_overall_progress());
152 | let ip = mp.add(make_individual_progress());
153 | op.set_message(format!("{} Connecting ...\n", style("[1/3]").bold().dim()));
154 | let mut seq = false;
155 | while let Some(x) = stream.next().await {
156 | match x? {
157 | DownloadProgress::InitialState(state) => {
158 | if state.connected {
159 | op.set_message(format!("{} Requesting ...\n", style("[2/3]").bold().dim()));
160 | }
161 | if let Some(count) = state.root.child_count {
162 | op.set_message(format!(
163 | "{} Downloading {} blob(s)\n",
164 | style("[3/3]").bold().dim(),
165 | count + 1,
166 | ));
167 | op.set_length(count + 1);
168 | op.reset();
169 | op.set_position(state.current.map(u64::from).unwrap_or(0));
170 | seq = true;
171 | }
172 | if let Some(blob) = state.get_current() {
173 | if let Some(size) = blob.size {
174 | ip.set_length(size.value());
175 | ip.reset();
176 | match blob.progress {
177 | BlobProgress::Pending => {}
178 | BlobProgress::Progressing(offset) => ip.set_position(offset),
179 | BlobProgress::Done => ip.finish_and_clear(),
180 | }
181 | if !seq {
182 | op.finish_and_clear();
183 | }
184 | }
185 | }
186 | }
187 | DownloadProgress::FoundLocal { .. } => {}
188 | DownloadProgress::Connected => {
189 | op.set_message(format!("{} Requesting ...\n", style("[2/3]").bold().dim()));
190 | }
191 | DownloadProgress::FoundHashSeq { children, .. } => {
192 | op.set_message(format!(
193 | "{} Downloading {} blob(s)\n",
194 | style("[3/3]").bold().dim(),
195 | children + 1,
196 | ));
197 | op.set_length(children + 1);
198 | op.reset();
199 | seq = true;
200 | }
201 | DownloadProgress::Found { size, child, .. } => {
202 | if seq {
203 | op.set_position(child.into());
204 | } else {
205 | op.finish_and_clear();
206 | }
207 | ip.set_length(size);
208 | ip.reset();
209 | }
210 | DownloadProgress::Progress { offset, .. } => {
211 | ip.set_position(offset);
212 | }
213 | DownloadProgress::Done { .. } => {
214 | ip.finish_and_clear();
215 | }
216 | DownloadProgress::AllDone(Stats {
217 | bytes_read,
218 | elapsed,
219 | ..
220 | }) => {
221 | op.finish_and_clear();
222 | eprintln!(
223 | "Transferred {} in {}, {}/s",
224 | HumanBytes(bytes_read),
225 | HumanDuration(elapsed),
226 | HumanBytes((bytes_read as f64 / elapsed.as_secs_f64()) as u64)
227 | );
228 | break;
229 | }
230 | DownloadProgress::Abort(e) => {
231 | bail!("download aborted: {}", e);
232 | }
233 | }
234 | }
235 | Ok(())
236 | }
237 | fn make_overall_progress() -> ProgressBar {
238 | let pb = ProgressBar::hidden();
239 | pb.enable_steady_tick(std::time::Duration::from_millis(100));
240 | pb.set_style(
241 | ProgressStyle::with_template(
242 | "{msg}{spinner:.green} [{elapsed_precise}] [{wide_bar:.cyan/blue}] {pos}/{len}",
243 | )
244 | .unwrap()
245 | .progress_chars("#>-"),
246 | );
247 | pb
248 | }
249 |
250 | fn make_individual_progress() -> ProgressBar {
251 | let pb = ProgressBar::hidden();
252 | pb.enable_steady_tick(std::time::Duration::from_millis(100));
253 | pb.set_style(
254 | ProgressStyle::with_template("{msg}{spinner:.green} [{elapsed_precise}] [{wide_bar:.cyan/blue}] {bytes}/{total_bytes} ({eta})")
255 | .unwrap()
256 | .with_key(
257 | "eta",
258 | |state: &ProgressState, w: &mut dyn std::fmt::Write| {
259 | write!(w, "{:.1}s", state.eta().as_secs_f64()).unwrap()
260 | },
261 | )
262 | .progress_chars("#>-"),
263 | );
264 | pb
265 | }
266 | }
267 |
--------------------------------------------------------------------------------
/examples/fetch-fsm.rs:
--------------------------------------------------------------------------------
1 | //! An example how to download a single blob or collection from a node and write it to stdout using the `get` finite state machine directly.
2 | //!
3 | //! Since this example does not use [`iroh-net::Endpoint`], it does not do any holepunching, and so will only work locally or between two processes that have public IP addresses.
4 | //!
5 | //! Run the provide-bytes example first. It will give instructions on how to run this example properly.
6 | use std::str::FromStr;
7 |
8 | use anyhow::{Context, Result};
9 | use iroh_blobs::{
10 | get::fsm::{AtInitial, ConnectedNext, EndBlobNext},
11 | hashseq::HashSeq,
12 | protocol::GetRequest,
13 | BlobFormat,
14 | };
15 | use iroh_io::ConcatenateSliceWriter;
16 | use tracing_subscriber::{prelude::*, EnvFilter};
17 |
18 | const EXAMPLE_ALPN: &[u8] = b"n0/iroh/examples/bytes/0";
19 |
20 | // set the RUST_LOG env var to one of {debug,info,warn} to see logging info
21 | pub fn setup_logging() {
22 | tracing_subscriber::registry()
23 | .with(tracing_subscriber::fmt::layer().with_writer(std::io::stderr))
24 | .with(EnvFilter::from_default_env())
25 | .try_init()
26 | .ok();
27 | }
28 |
29 | #[tokio::main]
30 | async fn main() -> Result<()> {
31 | println!("\nfetch fsm example!");
32 | setup_logging();
33 | let args: Vec<_> = std::env::args().collect();
34 | if args.len() != 2 {
35 | anyhow::bail!("usage: fetch-fsm [TICKET]");
36 | }
37 | let ticket =
38 | iroh_blobs::ticket::BlobTicket::from_str(&args[1]).context("unable to parse [TICKET]")?;
39 |
40 | let (node, hash, format) = ticket.into_parts();
41 |
42 | // create an endpoint to listen for incoming connections
43 | let endpoint = iroh::Endpoint::builder()
44 | .relay_mode(iroh::RelayMode::Disabled)
45 | .alpns(vec![EXAMPLE_ALPN.into()])
46 | .bind()
47 | .await?;
48 | println!(
49 | "\nlistening on {:?}",
50 | endpoint.node_addr().await?.direct_addresses
51 | );
52 | println!("fetching hash {hash} from {:?}", node.node_id);
53 |
54 | // connect
55 | let connection = endpoint.connect(node, EXAMPLE_ALPN).await?;
56 |
57 | match format {
58 | BlobFormat::HashSeq => {
59 | // create a request for a collection
60 | let request = GetRequest::all(hash);
61 | // create the initial state of the finite state machine
62 | let initial = iroh_blobs::get::fsm::start(connection, request);
63 |
64 | write_collection(initial).await
65 | }
66 | BlobFormat::Raw => {
67 | // create a request for a single blob
68 | let request = GetRequest::single(hash);
69 | // create the initial state of the finite state machine
70 | let initial = iroh_blobs::get::fsm::start(connection, request);
71 |
72 | write_blob(initial).await
73 | }
74 | }
75 | }
76 |
77 | async fn write_blob(initial: AtInitial) -> Result<()> {
78 | // connect (create a stream pair)
79 | let connected = initial.next().await?;
80 |
81 | // we expect a start root message, since we requested a single blob
82 | let ConnectedNext::StartRoot(start_root) = connected.next().await? else {
83 | panic!("expected start root")
84 | };
85 | // we can just call next to proceed to the header, since we know the root hash
86 | let header = start_root.next();
87 |
88 | // we need to wrap stdout in a struct that implements AsyncSliceWriter. Since we can not
89 | // seek in stdout we use ConcatenateSliceWriter which just concatenates all the writes.
90 | let writer = ConcatenateSliceWriter::new(tokio::io::stdout());
91 |
92 | // make the spacing nicer in the terminal
93 | println!();
94 | // use the utility function write_all to write the entire blob
95 | let end = header.write_all(writer).await?;
96 |
97 | // we requested a single blob, so we expect to enter the closing state
98 | let EndBlobNext::Closing(closing) = end.next() else {
99 | panic!("expected closing")
100 | };
101 |
102 | // close the connection and get the stats
103 | let _stats = closing.next().await?;
104 | Ok(())
105 | }
106 |
107 | async fn write_collection(initial: AtInitial) -> Result<()> {
108 | // connect
109 | let connected = initial.next().await?;
110 | // read the first bytes
111 | let ConnectedNext::StartRoot(start_root) = connected.next().await? else {
112 | anyhow::bail!("failed to parse collection");
113 | };
114 | // check that we requested the whole collection
115 | if !start_root.ranges().is_all() {
116 | anyhow::bail!("collection was not requested completely");
117 | }
118 |
119 | // move to the header
120 | let header: iroh_blobs::get::fsm::AtBlobHeader = start_root.next();
121 | let (root_end, hashes_bytes) = header.concatenate_into_vec().await?;
122 | let next = root_end.next();
123 | let EndBlobNext::MoreChildren(at_meta) = next else {
124 | anyhow::bail!("missing meta blob, got {next:?}");
125 | };
126 | // parse the hashes from the hash sequence bytes
127 | let hashes = HashSeq::try_from(bytes::Bytes::from(hashes_bytes))
128 | .context("failed to parse hashes")?
129 | .into_iter()
130 | .collect::>();
131 | let meta_hash = hashes.first().context("missing meta hash")?;
132 |
133 | let (meta_end, _meta_bytes) = at_meta.next(*meta_hash).concatenate_into_vec().await?;
134 | let mut curr = meta_end.next();
135 | let closing = loop {
136 | match curr {
137 | EndBlobNext::MoreChildren(more) => {
138 | let Some(hash) = hashes.get(more.child_offset() as usize) else {
139 | break more.finish();
140 | };
141 | let header = more.next(*hash);
142 |
143 | // we need to wrap stdout in a struct that implements AsyncSliceWriter. Since we can not
144 | // seek in stdout we use ConcatenateSliceWriter which just concatenates all the writes.
145 | let writer = ConcatenateSliceWriter::new(tokio::io::stdout());
146 |
147 | // use the utility function write_all to write the entire blob
148 | let end = header.write_all(writer).await?;
149 | println!();
150 | curr = end.next();
151 | }
152 | EndBlobNext::Closing(closing) => {
153 | break closing;
154 | }
155 | }
156 | };
157 | // close the connection
158 | let _stats = closing.next().await?;
159 | Ok(())
160 | }
161 |
--------------------------------------------------------------------------------
/examples/fetch-stream.rs:
--------------------------------------------------------------------------------
1 | //! An example how to download a single blob or collection from a node and write it to stdout, using a helper method to turn the `get` finite state machine into a stream.
2 | //!
3 | //! Since this example does not use [`iroh-net::Endpoint`], it does not do any holepunching, and so will only work locally or between two processes that have public IP addresses.
4 | //!
5 | //! Run the provide-bytes example first. It will give instructions on how to run this example properly.
6 | use std::{io, str::FromStr};
7 |
8 | use anyhow::{Context, Result};
9 | use bao_tree::io::fsm::BaoContentItem;
10 | use bytes::Bytes;
11 | use futures_lite::{Stream, StreamExt};
12 | use genawaiter::sync::{Co, Gen};
13 | use iroh_blobs::{
14 | get::fsm::{AtInitial, BlobContentNext, ConnectedNext, EndBlobNext},
15 | hashseq::HashSeq,
16 | protocol::GetRequest,
17 | BlobFormat,
18 | };
19 | use tokio::io::AsyncWriteExt;
20 | use tracing_subscriber::{prelude::*, EnvFilter};
21 |
22 | const EXAMPLE_ALPN: &[u8] = b"n0/iroh/examples/bytes/0";
23 |
24 | // set the RUST_LOG env var to one of {debug,info,warn} to see logging info
25 | pub fn setup_logging() {
26 | tracing_subscriber::registry()
27 | .with(tracing_subscriber::fmt::layer().with_writer(std::io::stderr))
28 | .with(EnvFilter::from_default_env())
29 | .try_init()
30 | .ok();
31 | }
32 |
33 | #[tokio::main]
34 | async fn main() -> Result<()> {
35 | println!("\nfetch stream example!");
36 | setup_logging();
37 | let args: Vec<_> = std::env::args().collect();
38 | if args.len() != 2 {
39 | anyhow::bail!("usage: fetch-stream [TICKET]");
40 | }
41 | let ticket =
42 | iroh_blobs::ticket::BlobTicket::from_str(&args[1]).context("unable to parse [TICKET]")?;
43 |
44 | let (node, hash, format) = ticket.into_parts();
45 |
46 | // create an endpoint to listen for incoming connections
47 | let endpoint = iroh::Endpoint::builder()
48 | .relay_mode(iroh::RelayMode::Disabled)
49 | .alpns(vec![EXAMPLE_ALPN.into()])
50 | .bind()
51 | .await?;
52 | println!(
53 | "\nlistening on {:?}",
54 | endpoint.node_addr().await?.direct_addresses
55 | );
56 | println!("fetching hash {hash} from {:?}", node.node_id);
57 |
58 | // connect
59 | let connection = endpoint.connect(node, EXAMPLE_ALPN).await?;
60 |
61 | let mut stream = match format {
62 | BlobFormat::HashSeq => {
63 | // create a request for a collection
64 | let request = GetRequest::all(hash);
65 |
66 | // create the initial state of the finite state machine
67 | let initial = iroh_blobs::get::fsm::start(connection, request);
68 |
69 | // create a stream that yields all the data of the blob
70 | stream_children(initial).boxed_local()
71 | }
72 | BlobFormat::Raw => {
73 | // create a request for a single blob
74 | let request = GetRequest::single(hash);
75 |
76 | // create the initial state of the finite state machine
77 | let initial = iroh_blobs::get::fsm::start(connection, request);
78 |
79 | // create a stream that yields all the data of the blob
80 | stream_blob(initial).boxed_local()
81 | }
82 | };
83 | while let Some(item) = stream.next().await {
84 | let item = item?;
85 | tokio::io::stdout().write_all(&item).await?;
86 | println!();
87 | }
88 | Ok(())
89 | }
90 |
91 | /// Stream the response for a request for a single blob.
92 | ///
93 | /// If the request was for a part of the blob, this will stream just the requested
94 | /// blocks.
95 | ///
96 | /// This will stream the root blob and close the connection.
97 | fn stream_blob(initial: AtInitial) -> impl Stream
- > + 'static {
98 | async fn inner(initial: AtInitial, co: &Co>) -> io::Result<()> {
99 | // connect
100 | let connected = initial.next().await?;
101 | // read the first bytes
102 | let ConnectedNext::StartRoot(start_root) = connected.next().await? else {
103 | return Err(io::Error::new(io::ErrorKind::Other, "expected start root"));
104 | };
105 | // move to the header
106 | let header = start_root.next();
107 | // get the size of the content
108 | let (mut content, _size) = header.next().await?;
109 | // manually loop over the content and yield all data
110 | let done = loop {
111 | match content.next().await {
112 | BlobContentNext::More((next, data)) => {
113 | if let BaoContentItem::Leaf(leaf) = data? {
114 | // yield the data
115 | co.yield_(Ok(leaf.data)).await;
116 | }
117 | content = next;
118 | }
119 | BlobContentNext::Done(done) => {
120 | // we are done with the root blob
121 | break done;
122 | }
123 | }
124 | };
125 | // close the connection even if there is more data
126 | let closing = match done.next() {
127 | EndBlobNext::Closing(closing) => closing,
128 | EndBlobNext::MoreChildren(more) => more.finish(),
129 | };
130 | // close the connection
131 | let _stats = closing.next().await?;
132 | Ok(())
133 | }
134 |
135 | Gen::new(|co| async move {
136 | if let Err(e) = inner(initial, &co).await {
137 | co.yield_(Err(e)).await;
138 | }
139 | })
140 | }
141 |
142 | /// Stream the response for a request for an iroh collection and its children.
143 | ///
144 | /// If the request was for a part of the children, this will stream just the requested
145 | /// blocks.
146 | ///
147 | /// The root blob is not streamed. It must be fully included in the response.
148 | fn stream_children(initial: AtInitial) -> impl Stream
- > + 'static {
149 | async fn inner(initial: AtInitial, co: &Co>) -> io::Result<()> {
150 | // connect
151 | let connected = initial.next().await?;
152 | // read the first bytes
153 | let ConnectedNext::StartRoot(start_root) = connected.next().await? else {
154 | return Err(io::Error::new(
155 | io::ErrorKind::Other,
156 | "failed to parse collection",
157 | ));
158 | };
159 | // check that we requested the whole collection
160 | if !start_root.ranges().is_all() {
161 | return Err(io::Error::new(
162 | io::ErrorKind::Other,
163 | "collection was not requested completely",
164 | ));
165 | }
166 | // move to the header
167 | let header: iroh_blobs::get::fsm::AtBlobHeader = start_root.next();
168 | let (root_end, hashes_bytes) = header.concatenate_into_vec().await?;
169 |
170 | // parse the hashes from the hash sequence bytes
171 | let hashes = HashSeq::try_from(bytes::Bytes::from(hashes_bytes))
172 | .map_err(|e| {
173 | io::Error::new(io::ErrorKind::Other, format!("failed to parse hashes: {e}"))
174 | })?
175 | .into_iter()
176 | .collect::>();
177 |
178 | let next = root_end.next();
179 | let EndBlobNext::MoreChildren(at_meta) = next else {
180 | return Err(io::Error::new(io::ErrorKind::Other, "missing meta blob"));
181 | };
182 | let meta_hash = hashes
183 | .first()
184 | .ok_or_else(|| io::Error::new(io::ErrorKind::Other, "missing meta link"))?;
185 | let (meta_end, _meta_bytes) = at_meta.next(*meta_hash).concatenate_into_vec().await?;
186 | let mut curr = meta_end.next();
187 | let closing = loop {
188 | match curr {
189 | EndBlobNext::MoreChildren(more) => {
190 | let Some(hash) = hashes.get(more.child_offset() as usize) else {
191 | break more.finish();
192 | };
193 | let header = more.next(*hash);
194 | let (mut content, _size) = header.next().await?;
195 | // manually loop over the content and yield all data
196 | let done = loop {
197 | match content.next().await {
198 | BlobContentNext::More((next, data)) => {
199 | if let BaoContentItem::Leaf(leaf) = data? {
200 | // yield the data
201 | co.yield_(Ok(leaf.data)).await;
202 | }
203 | content = next;
204 | }
205 | BlobContentNext::Done(done) => {
206 | // we are done with the root blob
207 | break done;
208 | }
209 | }
210 | };
211 | curr = done.next();
212 | }
213 | EndBlobNext::Closing(closing) => {
214 | break closing;
215 | }
216 | }
217 | };
218 | // close the connection
219 | let _stats = closing.next().await?;
220 | Ok(())
221 | }
222 |
223 | Gen::new(|co| async move {
224 | if let Err(e) = inner(initial, &co).await {
225 | co.yield_(Err(e)).await;
226 | }
227 | })
228 | }
229 |
--------------------------------------------------------------------------------
/examples/hello-world-fetch.rs:
--------------------------------------------------------------------------------
1 | //! An example that fetches an iroh blob and prints the contents.
2 | //! Will only work with blobs and collections that contain text, and is meant as a companion to the `hello-world-get` examples.
3 | //!
4 | //! This is using an in memory database and a random node id.
5 | //! Run the `provide` example, which will give you instructions on how to run this example.
6 | use std::{env, str::FromStr};
7 |
8 | use anyhow::{bail, ensure, Context, Result};
9 | use iroh::{protocol::Router, Endpoint};
10 | use iroh_blobs::{net_protocol::Blobs, ticket::BlobTicket, BlobFormat};
11 | use tracing_subscriber::{prelude::*, EnvFilter};
12 |
13 | // set the RUST_LOG env var to one of {debug,info,warn} to see logging info
14 | pub fn setup_logging() {
15 | tracing_subscriber::registry()
16 | .with(tracing_subscriber::fmt::layer().with_writer(std::io::stderr))
17 | .with(EnvFilter::from_default_env())
18 | .try_init()
19 | .ok();
20 | }
21 |
22 | #[tokio::main]
23 | async fn main() -> Result<()> {
24 | setup_logging();
25 | println!("\n'Hello World' fetch example!");
26 | // get the ticket
27 | let args: Vec = env::args().collect();
28 |
29 | if args.len() != 2 {
30 | bail!("expected one argument [BLOB_TICKET]\n\nGet a ticket by running the follow command in a separate terminal:\n\n`cargo run --example hello-world-provide`");
31 | }
32 |
33 | // deserialize ticket string into a ticket
34 | let ticket =
35 | BlobTicket::from_str(&args[1]).context("failed parsing blob ticket\n\nGet a ticket by running the follow command in a separate terminal:\n\n`cargo run --example hello-world-provide`")?;
36 |
37 | // create a new node
38 | let endpoint = Endpoint::builder().bind().await?;
39 | let builder = Router::builder(endpoint);
40 | let blobs = Blobs::memory().build(builder.endpoint());
41 | let builder = builder.accept(iroh_blobs::ALPN, blobs.clone());
42 | let node = builder.spawn();
43 | let blobs_client = blobs.client();
44 |
45 | println!("fetching hash: {}", ticket.hash());
46 | println!("node id: {}", node.endpoint().node_id());
47 | println!("node listening addresses:");
48 | let addrs = node.endpoint().node_addr().await?;
49 | for addr in addrs.direct_addresses() {
50 | println!("\t{:?}", addr);
51 | }
52 | println!(
53 | "node relay server url: {:?}",
54 | node.endpoint()
55 | .home_relay()
56 | .get()?
57 | .expect("a default relay url should be provided")
58 | .to_string()
59 | );
60 |
61 | // If the `BlobFormat` is `Raw`, we have the hash for a single blob, and simply need to read the blob using the `blobs` API on the client to get the content.
62 | ensure!(
63 | ticket.format() == BlobFormat::Raw,
64 | "'Hello World' example expects to fetch a single blob, but the ticket indicates a collection.",
65 | );
66 |
67 | // `download` returns a stream of `DownloadProgress` events. You can iterate through these updates to get progress
68 | // on the state of your download.
69 | let download_stream = blobs_client
70 | .download(ticket.hash(), ticket.node_addr().clone())
71 | .await?;
72 |
73 | // You can also just `await` the stream, which will poll the `DownloadProgress` stream for you.
74 | let outcome = download_stream.await.context("unable to download hash")?;
75 |
76 | println!(
77 | "\ndownloaded {} bytes from node {}",
78 | outcome.downloaded_size,
79 | ticket.node_addr().node_id
80 | );
81 |
82 | // Get the content we have just fetched from the iroh database.
83 |
84 | let bytes = blobs_client.read_to_bytes(ticket.hash()).await?;
85 | let s = std::str::from_utf8(&bytes).context("unable to parse blob as as utf-8 string")?;
86 | println!("{s}");
87 |
88 | Ok(())
89 | }
90 |
--------------------------------------------------------------------------------
/examples/hello-world-provide.rs:
--------------------------------------------------------------------------------
1 | //! The smallest possible example to spin up a node and serve a single blob.
2 | //!
3 | //! This is using an in memory database and a random node id.
4 | //! run this example from the project root:
5 | //! $ cargo run --example hello-world-provide
6 | use iroh::{protocol::Router, Endpoint};
7 | use iroh_blobs::{net_protocol::Blobs, ticket::BlobTicket};
8 | use tracing_subscriber::{prelude::*, EnvFilter};
9 |
10 | // set the RUST_LOG env var to one of {debug,info,warn} to see logging info
11 | pub fn setup_logging() {
12 | tracing_subscriber::registry()
13 | .with(tracing_subscriber::fmt::layer().with_writer(std::io::stderr))
14 | .with(EnvFilter::from_default_env())
15 | .try_init()
16 | .ok();
17 | }
18 |
19 | #[tokio::main]
20 | async fn main() -> anyhow::Result<()> {
21 | setup_logging();
22 | println!("'Hello World' provide example!");
23 |
24 | // create a new node
25 | let endpoint = Endpoint::builder().bind().await?;
26 | let builder = Router::builder(endpoint);
27 | let blobs = Blobs::memory().build(builder.endpoint());
28 | let builder = builder.accept(iroh_blobs::ALPN, blobs.clone());
29 | let blobs_client = blobs.client();
30 | let node = builder.spawn();
31 |
32 | // add some data and remember the hash
33 | let res = blobs_client.add_bytes("Hello, world!").await?;
34 |
35 | // create a ticket
36 | let addr = node.endpoint().node_addr().await?;
37 | let ticket = BlobTicket::new(addr, res.hash, res.format)?;
38 |
39 | // print some info about the node
40 | println!("serving hash: {}", ticket.hash());
41 | println!("node id: {}", ticket.node_addr().node_id);
42 | println!("node listening addresses:");
43 | for addr in ticket.node_addr().direct_addresses() {
44 | println!("\t{:?}", addr);
45 | }
46 | println!(
47 | "node relay server url: {:?}",
48 | ticket
49 | .node_addr()
50 | .relay_url()
51 | .expect("a default relay url should be provided")
52 | .to_string()
53 | );
54 | // print the ticket, containing all the above information
55 | println!("\nin another terminal, run:");
56 | println!("\t cargo run --example hello-world-fetch {}", ticket);
57 | // block until SIGINT is received (ctrl+c)
58 | tokio::signal::ctrl_c().await?;
59 | node.shutdown().await?;
60 | Ok(())
61 | }
62 |
--------------------------------------------------------------------------------
/examples/provide-bytes.rs:
--------------------------------------------------------------------------------
1 | //! An example that provides a blob or a collection over a Quinn connection.
2 | //!
3 | //! Since this example does not use [`iroh-net::Endpoint`], it does not do any holepunching, and so will only work locally or between two processes that have public IP addresses.
4 | //!
5 | //! Run this example with
6 | //! cargo run --example provide-bytes blob
7 | //! To provide a blob (single file)
8 | //!
9 | //! Run this example with
10 | //! cargo run --example provide-bytes collection
11 | //! To provide a collection (multiple blobs)
12 | use anyhow::Result;
13 | use iroh_blobs::{format::collection::Collection, util::local_pool::LocalPool, BlobFormat, Hash};
14 | use tracing::warn;
15 | use tracing_subscriber::{prelude::*, EnvFilter};
16 |
17 | const EXAMPLE_ALPN: &[u8] = b"n0/iroh/examples/bytes/0";
18 |
19 | // set the RUST_LOG env var to one of {debug,info,warn} to see logging info
20 | pub fn setup_logging() {
21 | tracing_subscriber::registry()
22 | .with(tracing_subscriber::fmt::layer().with_writer(std::io::stderr))
23 | .with(EnvFilter::from_default_env())
24 | .try_init()
25 | .ok();
26 | }
27 |
28 | #[tokio::main]
29 | async fn main() -> Result<()> {
30 | let args: Vec<_> = std::env::args().collect();
31 | if args.len() != 2 {
32 | anyhow::bail!(
33 | "usage: provide-bytes [FORMAT], where [FORMAT] is either 'blob' or 'collection'\n\nThe 'blob' example demonstrates sending a single blob of bytes. The 'collection' example demonstrates sending multiple blobs of bytes, grouped together in a 'collection'."
34 | );
35 | }
36 | let format = {
37 | if args[1] != "blob" && args[1] != "collection" {
38 | anyhow::bail!(
39 | "expected either 'blob' or 'collection' for FORMAT argument, got {}",
40 | args[1]
41 | );
42 | }
43 | args[1].clone()
44 | };
45 | println!("\nprovide bytes {format} example!");
46 |
47 | let (db, hash, format) = if format == "collection" {
48 | let (mut db, names) = iroh_blobs::store::readonly_mem::Store::new([
49 | ("blob1", b"the first blob of bytes".to_vec()),
50 | ("blob2", b"the second blob of bytes".to_vec()),
51 | ]); // create a collection
52 | let collection: Collection = names
53 | .into_iter()
54 | .map(|(name, hash)| (name, Hash::from(hash)))
55 | .collect();
56 | // add it to the db
57 | let hash = db.insert_many(collection.to_blobs()).unwrap();
58 | (db, hash, BlobFormat::HashSeq)
59 | } else {
60 | // create a new database and add a blob
61 | let (db, names) =
62 | iroh_blobs::store::readonly_mem::Store::new([("hello", b"Hello World!".to_vec())]);
63 |
64 | // get the hash of the content
65 | let hash = names.get("hello").unwrap();
66 | (db, Hash::from(hash.as_bytes()), BlobFormat::Raw)
67 | };
68 |
69 | // create an endpoint to listen for incoming connections
70 | let endpoint = iroh::Endpoint::builder()
71 | .relay_mode(iroh::RelayMode::Disabled)
72 | .alpns(vec![EXAMPLE_ALPN.into()])
73 | .bind()
74 | .await?;
75 | let addr = endpoint.node_addr().await?;
76 | println!("\nlistening on {:?}", addr.direct_addresses);
77 | println!("providing hash {hash}");
78 |
79 | let ticket = iroh_blobs::ticket::BlobTicket::new(addr, hash, format)?;
80 |
81 | println!("\nfetch the content using a finite state machine by running the following example:\n\ncargo run --example fetch-fsm {ticket}");
82 | println!("\nfetch the content using a stream by running the following example:\n\ncargo run --example fetch-stream {ticket}\n");
83 |
84 | // create a new local pool handle with 1 worker thread
85 | let lp = LocalPool::single();
86 |
87 | let accept_task = tokio::spawn(async move {
88 | while let Some(incoming) = endpoint.accept().await {
89 | println!("connection incoming");
90 |
91 | let conn = match incoming.accept() {
92 | Ok(conn) => conn,
93 | Err(err) => {
94 | warn!("incoming connection failed: {err:#}");
95 | // we can carry on in these cases:
96 | // this can be caused by retransmitted datagrams
97 | continue;
98 | }
99 | };
100 | let db = db.clone();
101 | let lp = lp.clone();
102 |
103 | // spawn a task to handle the connection
104 | tokio::spawn(async move {
105 | let conn = match conn.await {
106 | Ok(conn) => conn,
107 | Err(err) => {
108 | warn!("Error connecting: {err:#}");
109 | return;
110 | }
111 | };
112 | iroh_blobs::provider::handle_connection(conn, db, Default::default(), lp).await
113 | });
114 | }
115 | });
116 |
117 | match tokio::signal::ctrl_c().await {
118 | Ok(()) => {
119 | accept_task.abort();
120 | Ok(())
121 | }
122 | Err(e) => Err(anyhow::anyhow!("unable to listen for ctrl-c: {e}")),
123 | }
124 | }
125 |
--------------------------------------------------------------------------------
/examples/transfer.rs:
--------------------------------------------------------------------------------
1 | use std::path::PathBuf;
2 |
3 | use anyhow::Result;
4 | use iroh::{protocol::Router, Endpoint};
5 | use iroh_blobs::{
6 | net_protocol::Blobs,
7 | rpc::client::blobs::WrapOption,
8 | store::{ExportFormat, ExportMode},
9 | ticket::BlobTicket,
10 | util::SetTagOption,
11 | };
12 |
13 | #[tokio::main]
14 | async fn main() -> Result<()> {
15 | // Create an endpoint, it allows creating and accepting
16 | // connections in the iroh p2p world
17 | let endpoint = Endpoint::builder().discovery_n0().bind().await?;
18 | // We initialize the Blobs protocol in-memory
19 | let blobs = Blobs::memory().build(&endpoint);
20 |
21 | // Now we build a router that accepts blobs connections & routes them
22 | // to the blobs protocol.
23 | let router = Router::builder(endpoint)
24 | .accept(iroh_blobs::ALPN, blobs.clone())
25 | .spawn();
26 |
27 | // We use a blobs client to interact with the blobs protocol we're running locally:
28 | let blobs_client = blobs.client();
29 |
30 | // Grab all passed in arguments, the first one is the binary itself, so we skip it.
31 | let args: Vec = std::env::args().skip(1).collect();
32 | // Convert to &str, so we can pattern-match easily:
33 | let arg_refs: Vec<&str> = args.iter().map(String::as_str).collect();
34 |
35 | match arg_refs.as_slice() {
36 | ["send", filename] => {
37 | let filename: PathBuf = filename.parse()?;
38 | let abs_path = std::path::absolute(&filename)?;
39 |
40 | println!("Hashing file.");
41 |
42 | // keep the file in place and link it, instead of copying it into the in-memory blobs database
43 | let in_place = true;
44 | let blob = blobs_client
45 | .add_from_path(abs_path, in_place, SetTagOption::Auto, WrapOption::NoWrap)
46 | .await?
47 | .finish()
48 | .await?;
49 |
50 | let node_id = router.endpoint().node_id();
51 | let ticket = BlobTicket::new(node_id.into(), blob.hash, blob.format)?;
52 |
53 | println!("File hashed. Fetch this file by running:");
54 | println!(
55 | "cargo run --example transfer -- receive {ticket} {}",
56 | filename.display()
57 | );
58 |
59 | tokio::signal::ctrl_c().await?;
60 | }
61 | ["receive", ticket, filename] => {
62 | let filename: PathBuf = filename.parse()?;
63 | let abs_path = std::path::absolute(filename)?;
64 | let ticket: BlobTicket = ticket.parse()?;
65 |
66 | println!("Starting download.");
67 |
68 | blobs_client
69 | .download(ticket.hash(), ticket.node_addr().clone())
70 | .await?
71 | .finish()
72 | .await?;
73 |
74 | println!("Finished download.");
75 | println!("Copying to destination.");
76 |
77 | blobs_client
78 | .export(
79 | ticket.hash(),
80 | abs_path,
81 | ExportFormat::Blob,
82 | ExportMode::Copy,
83 | )
84 | .await?
85 | .finish()
86 | .await?;
87 |
88 | println!("Finished copying.");
89 | }
90 | _ => {
91 | println!("Couldn't parse command line arguments: {args:?}");
92 | println!("Usage:");
93 | println!(" # to send:");
94 | println!(" cargo run --example transfer -- send [FILE]");
95 | println!(" # this will print a ticket.");
96 | println!();
97 | println!(" # to receive:");
98 | println!(" cargo run --example transfer -- receive [TICKET] [FILE]");
99 | }
100 | }
101 |
102 | // Gracefully shut down the node
103 | println!("Shutting down.");
104 | router.shutdown().await?;
105 |
106 | Ok(())
107 | }
108 |
--------------------------------------------------------------------------------
/proptest-regressions/protocol/range_spec.txt:
--------------------------------------------------------------------------------
1 | # Seeds for failure cases proptest has generated in the past. It is
2 | # automatically read and these particular cases re-run before any
3 | # novel cases are generated.
4 | #
5 | # It is recommended to check this file in to source control so that
6 | # everyone who runs the test benefits from these saved cases.
7 | cc 7375b003a63bfe725eb4bcb2f266fae6afd9b3c921f9c2018f97daf6ef05a364 # shrinks to ranges = [RangeSet{ChunkNum(0)..ChunkNum(1)}, RangeSet{}]
8 | cc 23322efa46881646f1468137a688e66aee7ec2a3d01895ccad851d442a7828af # shrinks to ranges = [RangeSet{}, RangeSet{ChunkNum(0)..ChunkNum(1)}]
9 |
--------------------------------------------------------------------------------
/proptest-regressions/provider.txt:
--------------------------------------------------------------------------------
1 | # Seeds for failure cases proptest has generated in the past. It is
2 | # automatically read and these particular cases re-run before any
3 | # novel cases are generated.
4 | #
5 | # It is recommended to check this file in to source control so that
6 | # everyone who runs the test benefits from these saved cases.
7 | cc 25ec044e2b84054195984d7e04b93d9b39e2cc25eaee4037dc1be9398f9fd4b4 # shrinks to db = Database(RwLock { data: {}, poisoned: false, .. })
8 |
--------------------------------------------------------------------------------
/release.toml:
--------------------------------------------------------------------------------
1 | pre-release-hook = ["git", "cliff", "--prepend", "CHANGELOG.md", "--tag", "{{version}}", "--unreleased" ]
2 |
--------------------------------------------------------------------------------
/src/cli/tags.rs:
--------------------------------------------------------------------------------
1 | //! Define the tags subcommand.
2 |
3 | use anyhow::Result;
4 | use bytes::Bytes;
5 | use clap::Subcommand;
6 | use futures_lite::StreamExt;
7 |
8 | use crate::{rpc::client::tags, Tag};
9 |
10 | /// Commands to manage tags.
11 | #[derive(Subcommand, Debug, Clone)]
12 | #[allow(clippy::large_enum_variant)]
13 | pub enum TagCommands {
14 | /// List all tags
15 | List,
16 | /// Delete a tag
17 | Delete {
18 | tag: String,
19 | #[clap(long, default_value_t = false)]
20 | hex: bool,
21 | },
22 | }
23 |
24 | impl TagCommands {
25 | /// Runs the tag command given the iroh client.
26 | pub async fn run(self, tags: &tags::Client) -> Result<()> {
27 | match self {
28 | Self::List => {
29 | let mut response = tags.list().await?;
30 | while let Some(res) = response.next().await {
31 | let res = res?;
32 | println!("{}: {} ({:?})", res.name, res.hash, res.format);
33 | }
34 | }
35 | Self::Delete { tag, hex } => {
36 | let tag = if hex {
37 | Tag::from(Bytes::from(hex::decode(tag)?))
38 | } else {
39 | Tag::from(tag)
40 | };
41 | tags.delete(tag).await?;
42 | }
43 | }
44 | Ok(())
45 | }
46 | }
47 |
--------------------------------------------------------------------------------
/src/downloader/get.rs:
--------------------------------------------------------------------------------
1 | //! [`Getter`] implementation that performs requests over [`Connection`]s.
2 | //!
3 | //! [`Connection`]: iroh::endpoint::Connection
4 |
5 | use futures_lite::FutureExt;
6 | use iroh::endpoint;
7 |
8 | use super::{progress::BroadcastProgressSender, DownloadKind, FailureAction, GetStartFut, Getter};
9 | use crate::{
10 | get::{db::get_to_db_in_steps, error::GetError},
11 | store::Store,
12 | };
13 |
14 | impl From for FailureAction {
15 | fn from(e: GetError) -> Self {
16 | match e {
17 | e @ GetError::NotFound(_) => FailureAction::AbortRequest(e),
18 | e @ GetError::RemoteReset(_) => FailureAction::RetryLater(e.into()),
19 | e @ GetError::NoncompliantNode(_) => FailureAction::DropPeer(e.into()),
20 | e @ GetError::Io(_) => FailureAction::RetryLater(e.into()),
21 | e @ GetError::BadRequest(_) => FailureAction::AbortRequest(e),
22 | // TODO: what do we want to do on local failures?
23 | e @ GetError::LocalFailure(_) => FailureAction::AbortRequest(e),
24 | }
25 | }
26 | }
27 |
28 | /// [`Getter`] implementation that performs requests over [`Connection`]s.
29 | ///
30 | /// [`Connection`]: iroh::endpoint::Connection
31 | pub(crate) struct IoGetter {
32 | pub store: S,
33 | }
34 |
35 | impl Getter for IoGetter
{
36 | type Connection = endpoint::Connection;
37 | type NeedsConn = crate::get::db::GetStateNeedsConn;
38 |
39 | fn get(
40 | &mut self,
41 | kind: DownloadKind,
42 | progress_sender: BroadcastProgressSender,
43 | ) -> GetStartFut {
44 | let store = self.store.clone();
45 | async move {
46 | match get_to_db_in_steps(store, kind.hash_and_format(), progress_sender).await {
47 | Err(err) => Err(err.into()),
48 | Ok(crate::get::db::GetState::Complete(stats)) => {
49 | Ok(super::GetOutput::Complete(stats))
50 | }
51 | Ok(crate::get::db::GetState::NeedsConn(needs_conn)) => {
52 | Ok(super::GetOutput::NeedsConn(needs_conn))
53 | }
54 | }
55 | }
56 | .boxed_local()
57 | }
58 | }
59 |
60 | impl super::NeedsConn for crate::get::db::GetStateNeedsConn {
61 | fn proceed(self, conn: endpoint::Connection) -> super::GetProceedFut {
62 | async move {
63 | let res = self.proceed(conn).await;
64 | match res {
65 | Ok(stats) => Ok(stats),
66 | Err(err) => Err(err.into()),
67 | }
68 | }
69 | .boxed_local()
70 | }
71 | }
72 |
73 | pub(super) fn track_metrics(
74 | res: &Result,
75 | metrics: &crate::metrics::Metrics,
76 | ) {
77 | match res {
78 | Ok(stats) => {
79 | let crate::get::Stats {
80 | bytes_written,
81 | bytes_read: _,
82 | elapsed,
83 | } = stats;
84 |
85 | metrics.downloads_success.inc();
86 | metrics.download_bytes_total.inc_by(*bytes_written);
87 | metrics
88 | .download_time_total
89 | .inc_by(elapsed.as_millis() as u64);
90 | }
91 | Err(e) => match &e {
92 | FailureAction::AbortRequest(GetError::NotFound(_)) => {
93 | metrics.downloads_notfound.inc();
94 | }
95 | _ => {
96 | metrics.downloads_error.inc();
97 | }
98 | },
99 | }
100 | }
101 |
--------------------------------------------------------------------------------
/src/downloader/invariants.rs:
--------------------------------------------------------------------------------
1 | //! Invariants for the service.
2 |
3 | #![cfg(any(test, debug_assertions))]
4 |
5 | use super::*;
6 |
7 | /// invariants for the service.
8 | impl, D: DialerT> Service {
9 | /// Checks the various invariants the service must maintain
10 | #[track_caller]
11 | pub(in crate::downloader) fn check_invariants(&self) {
12 | self.check_active_request_count();
13 | self.check_queued_requests_consistency();
14 | self.check_idle_peer_consistency();
15 | self.check_concurrency_limits();
16 | self.check_provider_map_prunning();
17 | }
18 |
19 | /// Checks concurrency limits are maintained.
20 | #[track_caller]
21 | fn check_concurrency_limits(&self) {
22 | let ConcurrencyLimits {
23 | max_concurrent_requests,
24 | max_concurrent_requests_per_node,
25 | max_open_connections,
26 | max_concurrent_dials_per_hash,
27 | } = &self.concurrency_limits;
28 |
29 | // check the total number of active requests to ensure it stays within the limit
30 | assert!(
31 | self.in_progress_downloads.len() <= *max_concurrent_requests,
32 | "max_concurrent_requests exceeded"
33 | );
34 |
35 | // check that the open and dialing peers don't exceed the connection capacity
36 | tracing::trace!(
37 | "limits: conns: {}/{} | reqs: {}/{}",
38 | self.connections_count(),
39 | max_open_connections,
40 | self.in_progress_downloads.len(),
41 | max_concurrent_requests
42 | );
43 | assert!(
44 | self.connections_count() <= *max_open_connections,
45 | "max_open_connections exceeded"
46 | );
47 |
48 | // check the active requests per peer don't exceed the limit
49 | for (node, info) in self.connected_nodes.iter() {
50 | assert!(
51 | info.active_requests() <= *max_concurrent_requests_per_node,
52 | "max_concurrent_requests_per_node exceeded for {node}"
53 | )
54 | }
55 |
56 | // check that we do not dial more nodes than allowed for the next pending hashes
57 | if let Some(kind) = self.queue.front() {
58 | let hash = kind.hash();
59 | let nodes = self.providers.get_candidates(&hash);
60 | let mut dialing = 0;
61 | for node in nodes {
62 | if self.dialer.is_pending(node) {
63 | dialing += 1;
64 | }
65 | }
66 | assert!(
67 | dialing <= *max_concurrent_dials_per_hash,
68 | "max_concurrent_dials_per_hash exceeded for {hash}"
69 | )
70 | }
71 | }
72 |
73 | /// Checks that the count of active requests per peer is consistent with the active requests,
74 | /// and that active request are consistent with download futures
75 | #[track_caller]
76 | fn check_active_request_count(&self) {
77 | // check that the count of futures we are polling for downloads is consistent with the
78 | // number of requests
79 | assert_eq!(
80 | self.active_requests.len(),
81 | self.in_progress_downloads.len(),
82 | "active_requests and in_progress_downloads are out of sync"
83 | );
84 | // check that the count of requests per peer matches the number of requests that have that
85 | // peer as active
86 | let mut real_count: HashMap =
87 | HashMap::with_capacity(self.connected_nodes.len());
88 | for req_info in self.active_requests.values() {
89 | // nothing like some classic word count
90 | *real_count.entry(req_info.node).or_default() += 1;
91 | }
92 | for (peer, info) in self.connected_nodes.iter() {
93 | assert_eq!(
94 | info.active_requests(),
95 | real_count.get(peer).copied().unwrap_or_default(),
96 | "mismatched count of active requests for {peer}"
97 | )
98 | }
99 | }
100 |
101 | /// Checks that the queued requests all appear in the provider map and request map.
102 | #[track_caller]
103 | fn check_queued_requests_consistency(&self) {
104 | // check that all hashes in the queue have candidates
105 | for entry in self.queue.iter() {
106 | assert!(
107 | self.providers
108 | .get_candidates(&entry.hash())
109 | .next()
110 | .is_some(),
111 | "all queued requests have providers"
112 | );
113 | assert!(
114 | self.requests.contains_key(entry),
115 | "all queued requests have request info"
116 | );
117 | }
118 |
119 | // check that all parked hashes should be parked
120 | for entry in self.queue.iter_parked() {
121 | assert!(
122 | matches!(self.next_step(entry), NextStep::Park),
123 | "all parked downloads evaluate to the correct next step"
124 | );
125 | assert!(
126 | self.providers
127 | .get_candidates(&entry.hash())
128 | .all(|node| matches!(self.node_state(node), NodeState::WaitForRetry)),
129 | "all parked downloads have only retrying nodes"
130 | );
131 | }
132 | }
133 |
134 | /// Check that peers queued to be disconnected are consistent with peers considered idle.
135 | #[track_caller]
136 | fn check_idle_peer_consistency(&self) {
137 | let idle_peers = self
138 | .connected_nodes
139 | .values()
140 | .filter(|info| info.active_requests() == 0)
141 | .count();
142 | assert_eq!(
143 | self.goodbye_nodes_queue.len(),
144 | idle_peers,
145 | "inconsistent count of idle peers"
146 | );
147 | }
148 |
149 | /// Check that every hash in the provider map is needed.
150 | #[track_caller]
151 | fn check_provider_map_prunning(&self) {
152 | for hash in self.providers.hash_node.keys() {
153 | let as_raw = DownloadKind(HashAndFormat::raw(*hash));
154 | let as_hash_seq = DownloadKind(HashAndFormat::hash_seq(*hash));
155 | assert!(
156 | self.queue.contains_hash(*hash)
157 | || self.active_requests.contains_key(&as_raw)
158 | || self.active_requests.contains_key(&as_hash_seq),
159 | "all hashes in the provider map are in the queue or active"
160 | )
161 | }
162 | }
163 | }
164 |
--------------------------------------------------------------------------------
/src/downloader/progress.rs:
--------------------------------------------------------------------------------
1 | use std::{
2 | collections::HashMap,
3 | sync::{
4 | atomic::{AtomicU64, Ordering},
5 | Arc,
6 | },
7 | };
8 |
9 | use anyhow::anyhow;
10 | use parking_lot::Mutex;
11 |
12 | use super::DownloadKind;
13 | use crate::{
14 | get::{db::DownloadProgress, progress::TransferState},
15 | util::progress::{AsyncChannelProgressSender, IdGenerator, ProgressSendError, ProgressSender},
16 | };
17 |
18 | /// The channel that can be used to subscribe to progress updates.
19 | pub type ProgressSubscriber = AsyncChannelProgressSender;
20 |
21 | /// Track the progress of downloads.
22 | ///
23 | /// This struct allows to create [`ProgressSender`] structs to be passed to
24 | /// [`crate::get::db::get_to_db`]. Each progress sender can be subscribed to by any number of
25 | /// [`ProgressSubscriber`] channel senders, which will receive each progress update (if they have
26 | /// capacity). Additionally, the [`ProgressTracker`] maintains a [`TransferState`] for each
27 | /// transfer, applying each progress update to update this state. When subscribing to an already
28 | /// running transfer, the subscriber will receive a [`DownloadProgress::InitialState`] message
29 | /// containing the state at the time of the subscription, and then receive all further progress
30 | /// events directly.
31 | #[derive(Debug, Default)]
32 | pub struct ProgressTracker {
33 | /// Map of shared state for each tracked download.
34 | running: HashMap,
35 | /// Shared [`IdGenerator`] for all progress senders created by the tracker.
36 | id_gen: Arc,
37 | }
38 |
39 | impl ProgressTracker {
40 | pub fn new() -> Self {
41 | Self::default()
42 | }
43 |
44 | /// Track a new download with a list of initial subscribers.
45 | ///
46 | /// Note that this should only be called for *new* downloads. If a download for the `kind` is
47 | /// already tracked in this [`ProgressTracker`], calling `track` will replace all existing
48 | /// state and subscribers (equal to calling [`Self::remove`] first).
49 | pub fn track(
50 | &mut self,
51 | kind: DownloadKind,
52 | subscribers: impl IntoIterator- ,
53 | ) -> BroadcastProgressSender {
54 | let inner = Inner {
55 | subscribers: subscribers.into_iter().collect(),
56 | state: TransferState::new(kind.hash()),
57 | };
58 | let shared = Arc::new(Mutex::new(inner));
59 | self.running.insert(kind, Arc::clone(&shared));
60 | let id_gen = Arc::clone(&self.id_gen);
61 | BroadcastProgressSender { shared, id_gen }
62 | }
63 |
64 | /// Subscribe to a tracked download.
65 | ///
66 | /// Will return an error if `kind` is not yet tracked.
67 | pub async fn subscribe(
68 | &mut self,
69 | kind: DownloadKind,
70 | sender: ProgressSubscriber,
71 | ) -> anyhow::Result<()> {
72 | let initial_msg = self
73 | .running
74 | .get_mut(&kind)
75 | .ok_or_else(|| anyhow!("state for download {kind:?} not found"))?
76 | .lock()
77 | .subscribe(sender.clone());
78 | sender.send(initial_msg).await?;
79 | Ok(())
80 | }
81 |
82 | /// Unsubscribe `sender` from `kind`.
83 | pub fn unsubscribe(&mut self, kind: &DownloadKind, sender: &ProgressSubscriber) {
84 | if let Some(shared) = self.running.get_mut(kind) {
85 | shared.lock().unsubscribe(sender)
86 | }
87 | }
88 |
89 | /// Remove all state for a download.
90 | pub fn remove(&mut self, kind: &DownloadKind) {
91 | self.running.remove(kind);
92 | }
93 | }
94 |
95 | type Shared = Arc>;
96 |
97 | #[derive(Debug)]
98 | struct Inner {
99 | subscribers: Vec,
100 | state: TransferState,
101 | }
102 |
103 | impl Inner {
104 | fn subscribe(&mut self, subscriber: ProgressSubscriber) -> DownloadProgress {
105 | let msg = DownloadProgress::InitialState(self.state.clone());
106 | self.subscribers.push(subscriber);
107 | msg
108 | }
109 |
110 | fn unsubscribe(&mut self, sender: &ProgressSubscriber) {
111 | self.subscribers.retain(|s| !s.same_channel(sender));
112 | }
113 |
114 | fn on_progress(&mut self, progress: DownloadProgress) {
115 | self.state.on_progress(progress);
116 | }
117 | }
118 |
119 | #[derive(Debug, Clone)]
120 | pub struct BroadcastProgressSender {
121 | shared: Shared,
122 | id_gen: Arc,
123 | }
124 |
125 | impl IdGenerator for BroadcastProgressSender {
126 | fn new_id(&self) -> u64 {
127 | self.id_gen.fetch_add(1, Ordering::SeqCst)
128 | }
129 | }
130 |
131 | impl ProgressSender for BroadcastProgressSender {
132 | type Msg = DownloadProgress;
133 |
134 | async fn send(&self, msg: Self::Msg) -> Result<(), ProgressSendError> {
135 | // making sure that the lock is not held across an await point.
136 | let futs = {
137 | let mut inner = self.shared.lock();
138 | inner.on_progress(msg.clone());
139 | let futs = inner
140 | .subscribers
141 | .iter_mut()
142 | .map(|sender| {
143 | let sender = sender.clone();
144 | let msg = msg.clone();
145 | async move {
146 | match sender.send(msg).await {
147 | Ok(()) => None,
148 | Err(ProgressSendError::ReceiverDropped) => Some(sender),
149 | }
150 | }
151 | })
152 | .collect::>();
153 | drop(inner);
154 | futs
155 | };
156 |
157 | let failed_senders = futures_buffered::join_all(futs).await;
158 | // remove senders where the receiver is dropped
159 | if failed_senders.iter().any(|s| s.is_some()) {
160 | let mut inner = self.shared.lock();
161 | for sender in failed_senders.into_iter().flatten() {
162 | inner.unsubscribe(&sender);
163 | }
164 | drop(inner);
165 | }
166 | Ok(())
167 | }
168 |
169 | fn try_send(&self, msg: Self::Msg) -> Result<(), ProgressSendError> {
170 | let mut inner = self.shared.lock();
171 | inner.on_progress(msg.clone());
172 | // remove senders where the receiver is dropped
173 | inner
174 | .subscribers
175 | .retain_mut(|sender| match sender.try_send(msg.clone()) {
176 | Err(ProgressSendError::ReceiverDropped) => false,
177 | Ok(()) => true,
178 | });
179 | Ok(())
180 | }
181 |
182 | fn blocking_send(&self, msg: Self::Msg) -> Result<(), ProgressSendError> {
183 | let mut inner = self.shared.lock();
184 | inner.on_progress(msg.clone());
185 | // remove senders where the receiver is dropped
186 | inner
187 | .subscribers
188 | .retain_mut(|sender| match sender.blocking_send(msg.clone()) {
189 | Err(ProgressSendError::ReceiverDropped) => false,
190 | Ok(()) => true,
191 | });
192 | Ok(())
193 | }
194 | }
195 |
--------------------------------------------------------------------------------
/src/downloader/test/dialer.rs:
--------------------------------------------------------------------------------
1 | //! Implementation of [`super::Dialer`] used for testing.
2 |
3 | use std::task::{Context, Poll};
4 |
5 | use parking_lot::RwLock;
6 |
7 | use super::*;
8 |
9 | /// Dialer for testing that keeps track of the dialing history.
10 | #[derive(Default, Clone)]
11 | pub(super) struct TestingDialer(Arc>);
12 |
13 | struct TestingDialerInner {
14 | /// Peers that are being dialed.
15 | dialing: HashSet,
16 | /// Queue of dials.
17 | dial_futs: delay_queue::DelayQueue,
18 | /// History of attempted dials.
19 | dial_history: Vec,
20 | /// How long does a dial last.
21 | dial_duration: Duration,
22 | /// Fn deciding if a dial is successful.
23 | dial_outcome: Box bool + Send + Sync + 'static>,
24 | /// Our own node id
25 | node_id: NodeId,
26 | }
27 |
28 | impl Default for TestingDialerInner {
29 | fn default() -> Self {
30 | TestingDialerInner {
31 | dialing: HashSet::default(),
32 | dial_futs: delay_queue::DelayQueue::default(),
33 | dial_history: Vec::default(),
34 | dial_duration: Duration::from_millis(10),
35 | dial_outcome: Box::new(|_| true),
36 | node_id: NodeId::from_bytes(&[0u8; 32]).unwrap(),
37 | }
38 | }
39 | }
40 |
41 | impl DialerT for TestingDialer {
42 | type Connection = NodeId;
43 |
44 | fn queue_dial(&mut self, node_id: NodeId) {
45 | let mut inner = self.0.write();
46 | inner.dial_history.push(node_id);
47 | // for now assume every dial works
48 | let dial_duration = inner.dial_duration;
49 | if inner.dialing.insert(node_id) {
50 | inner.dial_futs.insert(node_id, dial_duration);
51 | }
52 | }
53 |
54 | fn pending_count(&self) -> usize {
55 | self.0.read().dialing.len()
56 | }
57 |
58 | fn is_pending(&self, node: NodeId) -> bool {
59 | self.0.read().dialing.contains(&node)
60 | }
61 |
62 | fn node_id(&self) -> NodeId {
63 | self.0.read().node_id
64 | }
65 | }
66 |
67 | impl Stream for TestingDialer {
68 | type Item = (NodeId, anyhow::Result);
69 |
70 | fn poll_next(self: std::pin::Pin<&mut Self>, cx: &mut Context<'_>) -> Poll