├── .config └── nextest.toml ├── .github ├── dependabot.yaml ├── pull_request_template.md └── workflows │ ├── beta.yaml │ ├── ci.yaml │ ├── cleanup.yaml │ ├── commit.yaml │ ├── docs.yaml │ ├── flaky.yaml │ └── tests.yaml ├── .gitignore ├── CHANGELOG.md ├── Cargo.lock ├── Cargo.toml ├── LICENSE-APACHE ├── LICENSE-MIT ├── Makefile.toml ├── README.md ├── cliff.toml ├── code_of_conduct.md ├── deny.toml ├── proptest-regressions └── ranger.txt ├── release.toml ├── src ├── actor.rs ├── cli.rs ├── cli │ └── authors.rs ├── engine.rs ├── engine │ ├── gossip.rs │ ├── live.rs │ └── state.rs ├── heads.rs ├── keys.rs ├── lib.rs ├── metrics.rs ├── net.rs ├── net │ └── codec.rs ├── protocol.rs ├── ranger.rs ├── rpc.rs ├── rpc │ ├── client.rs │ ├── client │ │ ├── authors.rs │ │ └── docs.rs │ ├── docs_handle_request.rs │ └── proto.rs ├── store.rs ├── store │ ├── fs.rs │ ├── fs │ │ ├── bounds.rs │ │ ├── migrate_v1_v2.rs │ │ ├── migrations.rs │ │ ├── query.rs │ │ ├── ranges.rs │ │ └── tables.rs │ ├── pubkeys.rs │ └── util.rs ├── sync.rs └── ticket.rs └── tests ├── client.rs ├── gc.rs ├── sync.rs └── util.rs /.config/nextest.toml: -------------------------------------------------------------------------------- 1 | [test-groups] 2 | run-in-isolation = { max-threads = 32 } 3 | # these are tests that must not run with other tests concurrently. All tests in 4 | # this group can take up at most 32 threads among them, but each one requiring 5 | # 16 threads also. The effect should be that tests run isolated. 6 | 7 | [[profile.ci.overrides]] 8 | filter = 'test(::run_in_isolation::)' 9 | test-group = 'run-in-isolation' 10 | threads-required = 32 11 | -------------------------------------------------------------------------------- /.github/dependabot.yaml: -------------------------------------------------------------------------------- 1 | # Keep GitHub Actions up to date with GitHub's Dependabot... 2 | # https://docs.github.com/en/code-security/dependabot/working-with-dependabot/keeping-your-actions-up-to-date-with-dependabot 3 | # https://docs.github.com/en/code-security/dependabot/dependabot-version-updates/configuration-options-for-the-dependabot.yml-file#package-ecosystem 4 | version: 2 5 | updates: 6 | - package-ecosystem: github-actions 7 | directory: / 8 | groups: 9 | github-actions: 10 | patterns: 11 | - "*" # Group all Actions updates into a single larger pull request 12 | schedule: 13 | interval: weekly 14 | -------------------------------------------------------------------------------- /.github/pull_request_template.md: -------------------------------------------------------------------------------- 1 | ## Description 2 | 3 | 4 | 5 | ## Breaking Changes 6 | 7 | 8 | 9 | ## Notes & open questions 10 | 11 | 12 | 13 | ## Change checklist 14 | 15 | - [ ] Self-review. 16 | - [ ] Documentation updates following the [style guide](https://rust-lang.github.io/rfcs/1574-more-api-documentation-conventions.html#appendix-a-full-conventions-text), if relevant. 17 | - [ ] Tests if relevant. 18 | - [ ] All breaking changes documented. 19 | -------------------------------------------------------------------------------- /.github/workflows/beta.yaml: -------------------------------------------------------------------------------- 1 | # Run tests using the beta Rust compiler 2 | 3 | name: Beta Rust 4 | 5 | on: 6 | schedule: 7 | # 06:50 UTC every Monday 8 | - cron: '50 6 * * 1' 9 | workflow_dispatch: 10 | 11 | concurrency: 12 | group: beta-${{ github.workflow }}-${{ github.ref }} 13 | cancel-in-progress: true 14 | 15 | env: 16 | IROH_FORCE_STAGING_RELAYS: "1" 17 | 18 | jobs: 19 | tests: 20 | uses: './.github/workflows/tests.yaml' 21 | with: 22 | rust-version: beta 23 | notify: 24 | needs: tests 25 | if: ${{ always() }} 26 | runs-on: ubuntu-latest 27 | steps: 28 | - name: Extract test results 29 | run: | 30 | printf '${{ toJSON(needs) }}\n' 31 | result=$(echo '${{ toJSON(needs) }}' | jq -r .tests.result) 32 | echo TESTS_RESULT=$result 33 | echo "TESTS_RESULT=$result" >>"$GITHUB_ENV" 34 | - name: Notify discord on failure 35 | uses: n0-computer/discord-webhook-notify@v1 36 | if: ${{ env.TESTS_RESULT == 'failure' }} 37 | with: 38 | severity: error 39 | details: | 40 | Rustc beta tests failed in **${{ github.repository }}** 41 | See https://github.com/${{ github.repository }}/actions/workflows/beta.yaml 42 | webhookUrl: ${{ secrets.DISCORD_N0_GITHUB_CHANNEL_WEBHOOK_URL }} 43 | 44 | -------------------------------------------------------------------------------- /.github/workflows/ci.yaml: -------------------------------------------------------------------------------- 1 | name: CI 2 | 3 | on: 4 | pull_request: 5 | types: [ 'labeled', 'unlabeled', 'opened', 'synchronize', 'reopened' ] 6 | merge_group: 7 | push: 8 | branches: 9 | - main 10 | 11 | concurrency: 12 | group: ci-${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} 13 | cancel-in-progress: true 14 | 15 | env: 16 | RUST_BACKTRACE: 1 17 | RUSTFLAGS: -Dwarnings 18 | RUSTDOCFLAGS: -Dwarnings 19 | MSRV: "1.81" 20 | SCCACHE_CACHE_SIZE: "50G" 21 | IROH_FORCE_STAGING_RELAYS: "1" 22 | 23 | jobs: 24 | tests: 25 | name: CI Test Suite 26 | if: "github.event_name != 'pull_request' || ! contains(github.event.pull_request.labels.*.name, 'flaky-test')" 27 | uses: './.github/workflows/tests.yaml' 28 | 29 | cross_build: 30 | name: Cross Build Only 31 | if: "github.event_name != 'pull_request' || ! contains(github.event.pull_request.labels.*.name, 'flaky-test')" 32 | timeout-minutes: 30 33 | runs-on: [self-hosted, linux, X64] 34 | strategy: 35 | fail-fast: false 36 | matrix: 37 | target: 38 | # cross tests are currently broken vor armv7 and aarch64 39 | # see https://github.com/cross-rs/cross/issues/1311 40 | # - armv7-linux-androideabi 41 | # - aarch64-linux-android 42 | # Freebsd execution fails in cross 43 | # - i686-unknown-freebsd # Linking fails :/ 44 | - x86_64-unknown-freebsd 45 | # Netbsd execution fails to link in cross 46 | # - x86_64-unknown-netbsd 47 | steps: 48 | - name: Checkout 49 | uses: actions/checkout@v4 50 | with: 51 | submodules: recursive 52 | 53 | - name: Install rust stable 54 | uses: dtolnay/rust-toolchain@stable 55 | 56 | - name: Cleanup Docker 57 | continue-on-error: true 58 | run: | 59 | docker kill $(docker ps -q) 60 | 61 | # See https://github.com/cross-rs/cross/issues/1222 62 | - uses: taiki-e/install-action@cross 63 | 64 | - name: build 65 | # cross tests are currently broken vor armv7 and aarch64 66 | # see https://github.com/cross-rs/cross/issues/1311. So on 67 | # those platforms we only build but do not run tests. 68 | run: cross build --all --target ${{ matrix.target }} 69 | env: 70 | RUST_LOG: ${{ runner.debug && 'TRACE' || 'DEBUG'}} 71 | 72 | android_build: 73 | name: Android Build Only 74 | if: "github.event_name != 'pull_request' || ! contains(github.event.pull_request.labels.*.name, 'flaky-test')" 75 | timeout-minutes: 30 76 | # runs-on: ubuntu-latest 77 | runs-on: [self-hosted, linux, X64] 78 | strategy: 79 | fail-fast: false 80 | matrix: 81 | target: 82 | - aarch64-linux-android 83 | - armv7-linux-androideabi 84 | steps: 85 | - name: Checkout 86 | uses: actions/checkout@v4 87 | 88 | - name: Set up Rust 89 | uses: dtolnay/rust-toolchain@stable 90 | with: 91 | target: ${{ matrix.target }} 92 | - name: Install rustup target 93 | run: rustup target add ${{ matrix.target }} 94 | 95 | - name: Setup Java 96 | uses: actions/setup-java@v4 97 | with: 98 | distribution: 'temurin' 99 | java-version: '17' 100 | 101 | - name: Setup Android SDK 102 | uses: android-actions/setup-android@v3 103 | 104 | - name: Setup Android NDK 105 | uses: arqu/setup-ndk@main 106 | id: setup-ndk 107 | with: 108 | ndk-version: r23 109 | add-to-path: true 110 | 111 | - name: Build 112 | env: 113 | ANDROID_NDK_HOME: ${{ steps.setup-ndk.outputs.ndk-path }} 114 | run: | 115 | cargo install --version 3.5.4 cargo-ndk 116 | cargo ndk --target ${{ matrix.target }} build 117 | 118 | cross_test: 119 | name: Cross Test 120 | if: "github.event_name != 'pull_request' || ! contains(github.event.pull_request.labels.*.name, 'flaky-test')" 121 | timeout-minutes: 30 122 | runs-on: [self-hosted, linux, X64] 123 | strategy: 124 | fail-fast: false 125 | matrix: 126 | target: 127 | - i686-unknown-linux-gnu 128 | steps: 129 | - name: Checkout 130 | uses: actions/checkout@v4 131 | with: 132 | submodules: recursive 133 | 134 | - name: Install rust stable 135 | uses: dtolnay/rust-toolchain@stable 136 | 137 | - name: Cleanup Docker 138 | continue-on-error: true 139 | run: | 140 | docker kill $(docker ps -q) 141 | 142 | # See https://github.com/cross-rs/cross/issues/1222 143 | - uses: taiki-e/install-action@cross 144 | 145 | - name: test 146 | run: cross test --all --target ${{ matrix.target }} -- --test-threads=12 147 | env: 148 | RUST_LOG: ${{ runner.debug && 'TRACE' || 'DEBUG' }} 149 | 150 | check_semver: 151 | runs-on: ubuntu-latest 152 | env: 153 | RUSTC_WRAPPER: "sccache" 154 | SCCACHE_GHA_ENABLED: "on" 155 | steps: 156 | - uses: actions/checkout@v4 157 | with: 158 | fetch-depth: 0 159 | - name: Install sccache 160 | uses: mozilla-actions/sccache-action@v0.0.9 161 | 162 | - name: Setup Environment (PR) 163 | if: ${{ github.event_name == 'pull_request' }} 164 | shell: bash 165 | run: | 166 | echo "HEAD_COMMIT_SHA=$(git rev-parse origin/${{ github.base_ref }})" >> ${GITHUB_ENV} 167 | - name: Setup Environment (Push) 168 | if: ${{ github.event_name == 'push' || github.event_name == 'merge_group' }} 169 | shell: bash 170 | run: | 171 | echo "HEAD_COMMIT_SHA=$(git rev-parse origin/main)" >> ${GITHUB_ENV} 172 | - name: Check semver 173 | continue-on-error: true 174 | # uses: obi1kenobi/cargo-semver-checks-action@v2 175 | uses: n0-computer/cargo-semver-checks-action@feat-baseline 176 | with: 177 | package: iroh-docs 178 | baseline-rev: ${{ env.HEAD_COMMIT_SHA }} 179 | use-cache: false 180 | 181 | check_fmt: 182 | timeout-minutes: 30 183 | name: Checking fmt 184 | runs-on: ubuntu-latest 185 | env: 186 | RUSTC_WRAPPER: "sccache" 187 | SCCACHE_GHA_ENABLED: "on" 188 | steps: 189 | - uses: actions/checkout@v4 190 | - uses: dtolnay/rust-toolchain@stable 191 | with: 192 | components: rustfmt 193 | - uses: mozilla-actions/sccache-action@v0.0.9 194 | - uses: taiki-e/install-action@cargo-make 195 | - run: cargo make format-check 196 | 197 | check_docs: 198 | timeout-minutes: 30 199 | name: Checking docs 200 | runs-on: ubuntu-latest 201 | env: 202 | RUSTC_WRAPPER: "sccache" 203 | SCCACHE_GHA_ENABLED: "on" 204 | steps: 205 | - uses: actions/checkout@v4 206 | - uses: dtolnay/rust-toolchain@master 207 | with: 208 | toolchain: nightly-2024-11-30 209 | - name: Install sccache 210 | uses: mozilla-actions/sccache-action@v0.0.9 211 | 212 | - name: Docs 213 | run: cargo doc --workspace --all-features --no-deps --document-private-items 214 | env: 215 | RUSTDOCFLAGS: --cfg docsrs 216 | 217 | clippy_check: 218 | timeout-minutes: 30 219 | runs-on: ubuntu-latest 220 | env: 221 | RUSTC_WRAPPER: "sccache" 222 | SCCACHE_GHA_ENABLED: "on" 223 | steps: 224 | - uses: actions/checkout@v4 225 | - uses: dtolnay/rust-toolchain@stable 226 | with: 227 | components: clippy 228 | - name: Install sccache 229 | uses: mozilla-actions/sccache-action@v0.0.9 230 | 231 | # TODO: We have a bunch of platform-dependent code so should 232 | # probably run this job on the full platform matrix 233 | - name: clippy check (all features) 234 | run: cargo clippy --workspace --all-features --all-targets --bins --tests --benches 235 | 236 | - name: clippy check (no features) 237 | run: cargo clippy --workspace --no-default-features --lib --bins --tests 238 | 239 | - name: clippy check (default features) 240 | run: cargo clippy --workspace --all-targets 241 | 242 | msrv: 243 | if: "github.event_name != 'pull_request' || ! contains(github.event.pull_request.labels.*.name, 'flaky-test')" 244 | timeout-minutes: 30 245 | name: Minimal Supported Rust Version 246 | runs-on: ubuntu-latest 247 | env: 248 | RUSTC_WRAPPER: "sccache" 249 | SCCACHE_GHA_ENABLED: "on" 250 | steps: 251 | - uses: actions/checkout@v4 252 | - uses: dtolnay/rust-toolchain@master 253 | with: 254 | toolchain: ${{ env.MSRV }} 255 | - name: Install sccache 256 | uses: mozilla-actions/sccache-action@v0.0.9 257 | 258 | - name: Check MSRV all features 259 | continue-on-error: true 260 | run: | 261 | cargo +$MSRV check --workspace --all-targets 262 | 263 | cargo_deny: 264 | timeout-minutes: 30 265 | name: cargo deny 266 | runs-on: ubuntu-latest 267 | steps: 268 | - uses: actions/checkout@v4 269 | - uses: EmbarkStudios/cargo-deny-action@v2 270 | with: 271 | arguments: --workspace --all-features 272 | command: check 273 | command-arguments: "-Dwarnings" 274 | 275 | codespell: 276 | timeout-minutes: 30 277 | runs-on: ubuntu-latest 278 | steps: 279 | - uses: actions/checkout@v4 280 | - run: pip install --user codespell[toml] 281 | - run: codespell --ignore-words-list=ans,atmost,crate,inout,ratatui,ser,stayin,swarmin,worl --skip=CHANGELOG.md 282 | -------------------------------------------------------------------------------- /.github/workflows/cleanup.yaml: -------------------------------------------------------------------------------- 1 | # Run tests using the beta Rust compiler 2 | 3 | name: Cleanup 4 | 5 | on: 6 | schedule: 7 | # 06:50 UTC every Monday 8 | - cron: '50 6 * * 1' 9 | workflow_dispatch: 10 | 11 | concurrency: 12 | group: beta-${{ github.workflow }}-${{ github.ref }} 13 | cancel-in-progress: true 14 | 15 | env: 16 | IROH_FORCE_STAGING_RELAYS: "1" 17 | 18 | jobs: 19 | clean_docs_branch: 20 | permissions: 21 | issues: write 22 | contents: write 23 | runs-on: ubuntu-latest 24 | steps: 25 | - name: Checkout 26 | uses: actions/checkout@v4 27 | with: 28 | ref: generated-docs-preview 29 | - name: Clean docs branch 30 | run: | 31 | cd pr/ 32 | # keep the last 25 prs 33 | dirs=$(ls -1d [0-9]* | sort -n) 34 | total_dirs=$(echo "$dirs" | wc -l) 35 | dirs_to_remove=$(echo "$dirs" | head -n $(($total_dirs - 25))) 36 | if [ -n "$dirs_to_remove" ]; then 37 | echo "$dirs_to_remove" | xargs rm -rf 38 | fi 39 | git add . 40 | git commit -m "Cleanup old docs" 41 | git push 42 | 43 | 44 | 45 | 46 | -------------------------------------------------------------------------------- /.github/workflows/commit.yaml: -------------------------------------------------------------------------------- 1 | name: Commits 2 | 3 | on: 4 | pull_request: 5 | branches: [main] 6 | types: [opened, edited, synchronize] 7 | 8 | env: 9 | IROH_FORCE_STAGING_RELAYS: "1" 10 | 11 | jobs: 12 | check-for-cc: 13 | runs-on: ubuntu-latest 14 | steps: 15 | - name: check-for-cc 16 | id: check-for-cc 17 | uses: agenthunt/conventional-commit-checker-action@v2.0.0 18 | with: 19 | pr-title-regex: "^(.+)(?:(([^)s]+)))?!?: (.+)" 20 | -------------------------------------------------------------------------------- /.github/workflows/docs.yaml: -------------------------------------------------------------------------------- 1 | name: Docs Preview 2 | 3 | on: 4 | pull_request: 5 | workflow_dispatch: 6 | inputs: 7 | pr_number: 8 | required: true 9 | type: string 10 | 11 | # ensure job runs sequentially so pushing to the preview branch doesn't conflict 12 | concurrency: 13 | group: ci-docs-preview 14 | 15 | env: 16 | IROH_FORCE_STAGING_RELAYS: "1" 17 | 18 | jobs: 19 | preview_docs: 20 | permissions: write-all 21 | timeout-minutes: 30 22 | name: Docs preview 23 | if: ${{ (github.event_name == 'pull_request' || github.event_name == 'workflow_dispatch' ) && !github.event.pull_request.head.repo.fork }} 24 | runs-on: ubuntu-latest 25 | env: 26 | RUSTC_WRAPPER: "sccache" 27 | SCCACHE_GHA_ENABLED: "on" 28 | SCCACHE_CACHE_SIZE: "50G" 29 | PREVIEW_PATH: pr/${{ github.event.pull_request.number || inputs.pr_number }}/docs 30 | 31 | steps: 32 | - uses: actions/checkout@v4 33 | - uses: dtolnay/rust-toolchain@master 34 | with: 35 | toolchain: nightly-2024-11-30 36 | - name: Install sccache 37 | uses: mozilla-actions/sccache-action@v0.0.9 38 | 39 | - name: Generate Docs 40 | run: cargo doc --workspace --all-features --no-deps 41 | env: 42 | RUSTDOCFLAGS: --cfg iroh_docsrs 43 | 44 | - name: Deploy Docs to Preview Branch 45 | uses: peaceiris/actions-gh-pages@v4 46 | with: 47 | github_token: ${{ secrets.GITHUB_TOKEN }} 48 | publish_dir: ./target/doc/ 49 | destination_dir: ${{ env.PREVIEW_PATH }} 50 | publish_branch: generated-docs-preview 51 | 52 | - name: Find Docs Comment 53 | uses: peter-evans/find-comment@v3 54 | id: fc 55 | with: 56 | issue-number: ${{ github.event.pull_request.number || inputs.pr_number }} 57 | comment-author: 'github-actions[bot]' 58 | body-includes: Documentation for this PR has been generated 59 | 60 | - name: Get current timestamp 61 | id: get_timestamp 62 | run: echo "TIMESTAMP=$(date -u +'%Y-%m-%dT%H:%M:%SZ')" >> $GITHUB_ENV 63 | 64 | - name: Create or Update Docs Comment 65 | uses: peter-evans/create-or-update-comment@v4 66 | with: 67 | issue-number: ${{ github.event.pull_request.number || inputs.pr_number }} 68 | comment-id: ${{ steps.fc.outputs.comment-id }} 69 | body: | 70 | Documentation for this PR has been generated and is available at: https://${{ github.repository_owner }}.github.io/${{ github.event.repository.name }}/${{ env.PREVIEW_PATH }}/iroh_docs/ 71 | 72 | Last updated: ${{ env.TIMESTAMP }} 73 | edit-mode: replace 74 | -------------------------------------------------------------------------------- /.github/workflows/flaky.yaml: -------------------------------------------------------------------------------- 1 | # Run all tests, including flaky test. 2 | # 3 | # The default CI workflow ignores flaky tests. This workflow will run 4 | # all tests, including ignored ones. 5 | # 6 | # To use this workflow you can either: 7 | # 8 | # - Label a PR with "flaky-test", the normal CI workflow will not run 9 | # any jobs but the jobs here will be run. Note that to merge the PR 10 | # you'll need to remove the label eventually because the normal CI 11 | # jobs are required by branch protection. 12 | # 13 | # - Manually trigger the workflow, you may choose a branch for this to 14 | # run on. 15 | # 16 | # Additionally this jobs runs once a day on a schedule. 17 | # 18 | # Currently doctests are not run by this workflow. 19 | 20 | name: Flaky CI 21 | 22 | on: 23 | pull_request: 24 | types: [ 'labeled', 'unlabeled', 'opened', 'synchronize', 'reopened' ] 25 | schedule: 26 | # 06:30 UTC every day 27 | - cron: '30 6 * * *' 28 | workflow_dispatch: 29 | inputs: 30 | branch: 31 | description: 'Branch to run on, defaults to main' 32 | required: true 33 | default: 'main' 34 | type: string 35 | 36 | concurrency: 37 | group: flaky-${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} 38 | cancel-in-progress: true 39 | 40 | env: 41 | IROH_FORCE_STAGING_RELAYS: "1" 42 | 43 | jobs: 44 | tests: 45 | if: "contains(github.event.pull_request.labels.*.name, 'flaky-test') || github.event_name == 'workflow_dispatch' || github.event_name == 'schedule'" 46 | uses: './.github/workflows/tests.yaml' 47 | with: 48 | flaky: true 49 | git-ref: ${{ inputs.branch }} 50 | notify: 51 | needs: tests 52 | if: ${{ always() }} 53 | runs-on: ubuntu-latest 54 | steps: 55 | - name: Extract test results 56 | run: | 57 | printf '${{ toJSON(needs) }}\n' 58 | result=$(echo '${{ toJSON(needs) }}' | jq -r .tests.result) 59 | echo TESTS_RESULT=$result 60 | echo "TESTS_RESULT=$result" >>"$GITHUB_ENV" 61 | - name: download nextest reports 62 | uses: actions/download-artifact@v4 63 | with: 64 | pattern: libtest_run_${{ github.run_number }}-${{ github.run_attempt }}-* 65 | merge-multiple: true 66 | path: nextest-results 67 | - name: create summary report 68 | id: make_summary 69 | run: | 70 | # prevent the glob expression in the loop to match on itself when the dir is empty 71 | shopt -s nullglob 72 | # to deal with multiline outputs it's recommended to use a random EOF, the syntax is based on 73 | # https://docs.github.com/en/actions/using-workflows/workflow-commands-for-github-actions#multiline-strings 74 | EOF=aP51VriWCxNJ1JjvmO9i 75 | echo "summary<<$EOF" >> $GITHUB_OUTPUT 76 | echo "Flaky tests failure:" >> $GITHUB_OUTPUT 77 | echo " " >> $GITHUB_OUTPUT 78 | for report in nextest-results/*.json; do 79 | # remove the name prefix and extension, and split the parts 80 | name=$(echo ${report:16:-5} | tr _ ' ') 81 | echo $name 82 | echo "- **$name**" >> $GITHUB_OUTPUT 83 | # select the failed tests 84 | # the tests have this format "crate::module$test_name", the sed expressions remove the quotes and replace $ for :: 85 | failure=$(jq --slurp '.[] | select(.["type"] == "test" and .["event"] == "failed" ) | .["name"]' $report | sed -e 's/^"//g' -e 's/\$/::/' -e 's/"//') 86 | echo "$failure" 87 | echo "$failure" >> $GITHUB_OUTPUT 88 | done 89 | echo "" >> $GITHUB_OUTPUT 90 | echo "See https://github.com/${{ github.repository }}/actions/workflows/flaky.yaml" >> $GITHUB_OUTPUT 91 | echo "$EOF" >> $GITHUB_OUTPUT 92 | - name: Notify discord on failure 93 | uses: n0-computer/discord-webhook-notify@v1 94 | if: ${{ env.TESTS_RESULT == 'failure' || env.TESTS_RESULT == 'success' }} 95 | with: 96 | text: "Flaky tests in **${{ github.repository }}**:" 97 | severity: ${{ env.TESTS_RESULT == 'failure' && 'warn' || 'info' }} 98 | details: ${{ env.TESTS_RESULT == 'failure' && steps.make_summary.outputs.summary || 'No flaky failures!' }} 99 | webhookUrl: ${{ secrets.DISCORD_N0_GITHUB_CHANNEL_WEBHOOK_URL }} 100 | -------------------------------------------------------------------------------- /.github/workflows/tests.yaml: -------------------------------------------------------------------------------- 1 | # Run all tests, with or without flaky tests. 2 | 3 | name: Tests 4 | 5 | on: 6 | workflow_call: 7 | inputs: 8 | rust-version: 9 | description: 'The version of the rust compiler to run' 10 | type: string 11 | default: 'stable' 12 | flaky: 13 | description: 'Whether to also run flaky tests' 14 | type: boolean 15 | default: false 16 | git-ref: 17 | description: 'Which git ref to checkout' 18 | type: string 19 | default: ${{ github.ref }} 20 | 21 | env: 22 | RUST_BACKTRACE: 1 23 | RUSTFLAGS: -Dwarnings 24 | RUSTDOCFLAGS: -Dwarnings 25 | SCCACHE_CACHE_SIZE: "50G" 26 | CRATES_LIST: "iroh-docs" 27 | IROH_FORCE_STAGING_RELAYS: "1" 28 | 29 | jobs: 30 | build_and_test_nix: 31 | timeout-minutes: 30 32 | name: "Tests" 33 | runs-on: ${{ matrix.runner }} 34 | strategy: 35 | fail-fast: false 36 | matrix: 37 | name: [ubuntu-latest, macOS-arm-latest] 38 | rust: [ '${{ inputs.rust-version }}' ] 39 | features: [all, none, default] 40 | include: 41 | - name: ubuntu-latest 42 | os: ubuntu-latest 43 | release-os: linux 44 | release-arch: amd64 45 | runner: [self-hosted, linux, X64] 46 | - name: macOS-arm-latest 47 | os: macOS-latest 48 | release-os: darwin 49 | release-arch: aarch64 50 | runner: [self-hosted, macOS, ARM64] 51 | env: 52 | # Using self-hosted runners so use local cache for sccache and 53 | # not SCCACHE_GHA_ENABLED. 54 | RUSTC_WRAPPER: "sccache" 55 | steps: 56 | - name: Checkout 57 | uses: actions/checkout@v4 58 | with: 59 | ref: ${{ inputs.git-ref }} 60 | 61 | - name: Install ${{ matrix.rust }} rust 62 | uses: dtolnay/rust-toolchain@master 63 | with: 64 | toolchain: ${{ matrix.rust }} 65 | 66 | - name: Install cargo-nextest 67 | uses: taiki-e/install-action@v2 68 | with: 69 | tool: nextest@0.9.80 70 | 71 | - name: Install sccache 72 | uses: mozilla-actions/sccache-action@v0.0.9 73 | 74 | - name: Select features 75 | run: | 76 | case "${{ matrix.features }}" in 77 | all) 78 | echo "FEATURES=--all-features" >> "$GITHUB_ENV" 79 | ;; 80 | none) 81 | echo "FEATURES=--no-default-features" >> "$GITHUB_ENV" 82 | ;; 83 | default) 84 | echo "FEATURES=" >> "$GITHUB_ENV" 85 | ;; 86 | *) 87 | exit 1 88 | esac 89 | 90 | - name: check features 91 | if: ${{ ! inputs.flaky }} 92 | run: | 93 | for i in ${CRATES_LIST//,/ } 94 | do 95 | echo "Checking $i $FEATURES" 96 | if [ $i = "iroh-cli" ]; then 97 | targets="--bins" 98 | else 99 | targets="--lib --bins" 100 | fi 101 | echo cargo check -p $i $FEATURES $targets 102 | cargo check -p $i $FEATURES $targets 103 | done 104 | env: 105 | RUST_LOG: ${{ runner.debug && 'TRACE' || 'DEBUG'}} 106 | 107 | - name: build tests 108 | run: | 109 | cargo nextest run --workspace ${{ env.FEATURES }} --lib --bins --tests --no-run 110 | 111 | - name: list ignored tests 112 | run: | 113 | cargo nextest list --workspace ${{ env.FEATURES }} --lib --bins --tests --run-ignored ignored-only 114 | 115 | - name: run tests 116 | run: | 117 | mkdir -p output 118 | cargo nextest run --workspace ${{ env.FEATURES }} --lib --bins --tests --profile ci --run-ignored ${{ inputs.flaky && 'all' || 'default' }} --no-fail-fast --message-format ${{ inputs.flaky && 'libtest-json' || 'human' }} > output/${{ matrix.name }}_${{ matrix.features }}_${{ matrix.rust }}.json 119 | env: 120 | RUST_LOG: ${{ runner.debug && 'TRACE' || 'DEBUG'}} 121 | NEXTEST_EXPERIMENTAL_LIBTEST_JSON: 1 122 | 123 | - name: upload results 124 | if: ${{ failure() && inputs.flaky }} 125 | uses: actions/upload-artifact@v4 126 | with: 127 | name: libtest_run_${{ github.run_number }}-${{ github.run_attempt }}-${{ matrix.name }}_${{ matrix.features }}_${{ matrix.rust }}.json 128 | path: output 129 | retention-days: 45 130 | compression-level: 0 131 | 132 | - name: doctests 133 | if: ${{ (! inputs.flaky) && matrix.features == 'all' }} 134 | run: | 135 | if [ -n "${{ runner.debug }}" ]; then 136 | export RUST_LOG=TRACE 137 | else 138 | export RUST_LOG=DEBUG 139 | fi 140 | cargo test --workspace --all-features --doc 141 | 142 | build_and_test_windows: 143 | timeout-minutes: 30 144 | name: "Tests" 145 | runs-on: ${{ matrix.runner }} 146 | strategy: 147 | fail-fast: false 148 | matrix: 149 | name: [windows-latest] 150 | rust: [ '${{ inputs.rust-version}}' ] 151 | features: [all, none, default] 152 | target: 153 | - x86_64-pc-windows-msvc 154 | include: 155 | - name: windows-latest 156 | os: windows 157 | runner: [self-hosted, windows, x64] 158 | env: 159 | # Using self-hosted runners so use local cache for sccache and 160 | # not SCCACHE_GHA_ENABLED. 161 | RUSTC_WRAPPER: "sccache" 162 | steps: 163 | - name: Checkout 164 | uses: actions/checkout@v4 165 | with: 166 | ref: ${{ inputs.git-ref }} 167 | 168 | - name: Install ${{ matrix.rust }} 169 | run: | 170 | rustup toolchain install ${{ matrix.rust }} 171 | rustup toolchain default ${{ matrix.rust }} 172 | rustup target add ${{ matrix.target }} 173 | rustup set default-host ${{ matrix.target }} 174 | 175 | - name: Install cargo-nextest 176 | shell: powershell 177 | run: | 178 | $tmp = New-TemporaryFile | Rename-Item -NewName { $_ -replace 'tmp$', 'zip' } -PassThru 179 | Invoke-WebRequest -OutFile $tmp https://get.nexte.st/latest/windows 180 | $outputDir = if ($Env:CARGO_HOME) { Join-Path $Env:CARGO_HOME "bin" } else { "~/.cargo/bin" } 181 | $tmp | Expand-Archive -DestinationPath $outputDir -Force 182 | $tmp | Remove-Item 183 | 184 | - name: Select features 185 | run: | 186 | switch ("${{ matrix.features }}") { 187 | "all" { 188 | echo "FEATURES=--all-features" | Out-File -FilePath $Env:GITHUB_ENV -Encoding utf8 -Append 189 | } 190 | "none" { 191 | echo "FEATURES=--no-default-features" | Out-File -FilePath $Env:GITHUB_ENV -Encoding utf8 -Append 192 | } 193 | "default" { 194 | echo "FEATURES=" | Out-File -FilePath $Env:GITHUB_ENV -Encoding utf8 -Append 195 | } 196 | default { 197 | Exit 1 198 | } 199 | } 200 | 201 | - name: Install sccache 202 | uses: mozilla-actions/sccache-action@v0.0.9 203 | 204 | - uses: msys2/setup-msys2@v2 205 | 206 | - name: build tests 207 | run: | 208 | cargo nextest run --workspace ${{ env.FEATURES }} --lib --bins --tests --target ${{ matrix.target }} --no-run 209 | 210 | - name: list ignored tests 211 | run: | 212 | cargo nextest list --workspace ${{ env.FEATURES }} --lib --bins --tests --target ${{ matrix.target }} --run-ignored ignored-only 213 | 214 | - name: tests 215 | run: | 216 | mkdir -p output 217 | cargo nextest run --workspace ${{ env.FEATURES }} --lib --bins --tests --profile ci --target ${{ matrix.target }} --run-ignored ${{ inputs.flaky && 'all' || 'default' }} --no-fail-fast --message-format ${{ inputs.flaky && 'libtest-json' || 'human' }} > output/${{ matrix.name }}_${{ matrix.features }}_${{ matrix.rust }}.json 218 | env: 219 | RUST_LOG: ${{ runner.debug && 'TRACE' || 'DEBUG'}} 220 | NEXTEST_EXPERIMENTAL_LIBTEST_JSON: 1 221 | 222 | - name: upload results 223 | if: ${{ failure() && inputs.flaky }} 224 | uses: actions/upload-artifact@v4 225 | with: 226 | name: libtest_run_${{ github.run_number }}-${{ github.run_attempt }}-${{ matrix.name }}_${{ matrix.features }}_${{ matrix.rust }}.json 227 | path: output 228 | retention-days: 1 229 | compression-level: 0 230 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /target -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Changelog 2 | 3 | All notable changes to iroh-docs will be documented in this file. 4 | 5 | ## [0.35.0](https://github.com/n0-computer/iroh-docs/compare/v0.34.0..0.35.0) - 2025-05-12 6 | 7 | ### 🐛 Bug Fixes 8 | 9 | - Flaky tests ([#40](https://github.com/n0-computer/iroh-docs/issues/40)) - ([faa929c](https://github.com/n0-computer/iroh-docs/commit/faa929ccfea052d2c3f14bea2b35aef7aa893f9a)) 10 | 11 | ### 🚜 Refactor 12 | 13 | - [**breaking**] Update to iroh 0.35 and non-global metrics tracking ([#41](https://github.com/n0-computer/iroh-docs/issues/41)) - ([8721c39](https://github.com/n0-computer/iroh-docs/commit/8721c39a2b33a8b9d3858778f1e983494919cc56)) 14 | 15 | ### ⚙️ Miscellaneous Tasks 16 | 17 | - Update sccache github action ([#42](https://github.com/n0-computer/iroh-docs/issues/42)) - ([e8b89a8](https://github.com/n0-computer/iroh-docs/commit/e8b89a82ab05c1174c2e20180d226f0fcfc18277)) 18 | 19 | ## [0.34.0](https://github.com/n0-computer/iroh-docs/compare/v0.33.0..0.34.0) - 2025-03-18 20 | 21 | ### ⚙️ Miscellaneous Tasks 22 | 23 | - Patch to use main branch of iroh dependencies ([#37](https://github.com/n0-computer/iroh-docs/issues/37)) - ([2f79a3f](https://github.com/n0-computer/iroh-docs/commit/2f79a3fa6d1394b5d3eeca72d477a79bb41dce07)) 24 | - Update to latest iroh ([#39](https://github.com/n0-computer/iroh-docs/issues/39)) - ([e6432d1](https://github.com/n0-computer/iroh-docs/commit/e6432d1d5b3fa818c08997e74fbfdfe347a82872)) 25 | 26 | ## [0.33.0](https://github.com/n0-computer/iroh-docs/compare/v0.32.0..0.33.0) - 2025-02-25 27 | 28 | ### ⚙️ Miscellaneous Tasks 29 | 30 | - Patch to use main branch of iroh dependencies ([#35](https://github.com/n0-computer/iroh-docs/issues/35)) - ([8c6951a](https://github.com/n0-computer/iroh-docs/commit/8c6951abef4f62d633e592bd25a18827243d4e85)) 31 | - Upgrade to latest `iroh`, `iroh-gossip` and `iroh-blobs` ([#36](https://github.com/n0-computer/iroh-docs/issues/36)) - ([911a8cc](https://github.com/n0-computer/iroh-docs/commit/911a8cc4553cb7805aa3f92db81b29759c73b25e)) 32 | 33 | ## [0.32.0](https://github.com/n0-computer/iroh-docs/compare/v0.31.0..0.32.0) - 2025-02-05 34 | 35 | ### ⚙️ Miscellaneous Tasks 36 | 37 | - Remove individual repo project tracking ([#29](https://github.com/n0-computer/iroh-docs/issues/29)) - ([7c4c871](https://github.com/n0-computer/iroh-docs/commit/7c4c8715cc2dd76649bd89d8f667d516a15a7a9f)) 38 | - Upgrade to `iroh`, `iroh-gossip`, and `iroh-blobs` v0.32.0 ([#33](https://github.com/n0-computer/iroh-docs/issues/33)) - ([85ea65d](https://github.com/n0-computer/iroh-docs/commit/85ea65d033ed604a9332f4c3a3ac5b003692c461)) 39 | 40 | ## [0.31.0](https://github.com/n0-computer/iroh-docs/compare/v0.30.0..0.31.0) - 2025-01-14 41 | 42 | ### ⚙️ Miscellaneous Tasks 43 | 44 | - Add project tracking ([#24](https://github.com/n0-computer/iroh-docs/issues/24)) - ([a5219d1](https://github.com/n0-computer/iroh-docs/commit/a5219d186eeeed0c05f9c8082adf02719fdf8e1f)) 45 | - Pin nextest version ([#25](https://github.com/n0-computer/iroh-docs/issues/25)) - ([9340e32](https://github.com/n0-computer/iroh-docs/commit/9340e32db71a634e06026c4861e8981774736c16)) 46 | - Fix URL to actions by using a variable ([#27](https://github.com/n0-computer/iroh-docs/issues/27)) - ([7667e46](https://github.com/n0-computer/iroh-docs/commit/7667e46a03d9ac34e23b4e2e25c8f9b9a0b414a3)) 47 | - Upgrade to `iroh@v0.31.0` ([#28](https://github.com/n0-computer/iroh-docs/issues/28)) - ([7911992](https://github.com/n0-computer/iroh-docs/commit/7911992383b8bfe633a6f56f749beaaeb5a96f64)) 48 | 49 | ## [0.30.0](https://github.com/n0-computer/iroh-docs/compare/v0.29.0..0.30.0) - 2024-12-17 50 | 51 | ### ⛰️ Features 52 | 53 | - [**breaking**] Remove rpc from the default feature flag set - ([5897668](https://github.com/n0-computer/iroh-docs/commit/5897668996cf509fcbcb0f9801925a3d3ad89148)) 54 | - [**breaking**] Add Docs which wraps the Engine ([#18](https://github.com/n0-computer/iroh-docs/issues/18)) - ([857f0dc](https://github.com/n0-computer/iroh-docs/commit/857f0dc7e9b14d27b2a21e0f12ff00609b077a52)) 55 | - [**breaking**] Update to iroh@0.30.0 & MSRV 1.81 - ([31b6698](https://github.com/n0-computer/iroh-docs/commit/31b66980a8216a7f52183a6f46bb9455c58726ae)) 56 | 57 | ### 🚜 Refactor 58 | 59 | - Adapt to new ProtocolHandler ([#15](https://github.com/n0-computer/iroh-docs/issues/15)) - ([b6c7616](https://github.com/n0-computer/iroh-docs/commit/b6c7616c401ac7c465beafebdc49f866263cf026)) 60 | 61 | ### 📚 Documentation 62 | 63 | - Add "Getting Started" section to the README and add the readme to the docs ([#19](https://github.com/n0-computer/iroh-docs/issues/19)) - ([98e2e17](https://github.com/n0-computer/iroh-docs/commit/98e2e170bcf4c1f23aa1479ae6c2b21b92c1146d)) 64 | 65 | ## [0.29.0](https://github.com/n0-computer/iroh-docs/compare/v0.28.0..0.29.0) - 2024-12-04 66 | 67 | ### ⛰️ Features 68 | 69 | - Update to latest iroh - ([c146f7a](https://github.com/n0-computer/iroh-docs/commit/c146f7a969926eb2624fbe3ba4f93f157fc04864)) 70 | - Update to iroh@0.29.0 - ([a625e88](https://github.com/n0-computer/iroh-docs/commit/a625e88889853dd98837982b8344af20b54ac606)) 71 | 72 | ### 🚜 Refactor 73 | 74 | - Extract RPC definitions into here - ([176715a](https://github.com/n0-computer/iroh-docs/commit/176715a567c7f5c660a980dcc334b5d5892d2cb1)) 75 | - Remove all blobs gc related tests - ([c63bae3](https://github.com/n0-computer/iroh-docs/commit/c63bae32e5a6fc78db6b4b2549bc56f82fbf94db)) 76 | - Switch to hex - ([e8fa3da](https://github.com/n0-computer/iroh-docs/commit/e8fa3dae967540dc4d6ed7361357d23500e5932a)) 77 | 78 | ### ⚙️ Miscellaneous Tasks 79 | 80 | - Prune some deps - ([b6fc71d](https://github.com/n0-computer/iroh-docs/commit/b6fc71df5dbdb745afe1852315c1176109360079)) 81 | - Init changelog - ([dd2d6d2](https://github.com/n0-computer/iroh-docs/commit/dd2d6d286f91c78262c66133a0304d270123d402)) 82 | - Fix release config - ([0ac3fbe](https://github.com/n0-computer/iroh-docs/commit/0ac3fbe60403edd3a302a5c53500299360e31797)) 83 | 84 | ## [0.28.0] - 2024-11-04 85 | 86 | ### ⛰️ Features 87 | 88 | - *(iroh-gossip)* Configure the max message size ([#2340](https://github.com/n0-computer/iroh-docs/issues/2340)) - ([8815bed](https://github.com/n0-computer/iroh-docs/commit/8815bedd3009a7013ea6eae3447622696e0bee0f)) 89 | - *(iroh-net)* [**breaking**] Improve initial connection latency ([#2234](https://github.com/n0-computer/iroh-docs/issues/2234)) - ([62afe07](https://github.com/n0-computer/iroh-docs/commit/62afe077adc67bd13db38cf52d875ea5c96e8f07)) 90 | - *(iroh-net)* Own the public QUIC API ([#2279](https://github.com/n0-computer/iroh-docs/issues/2279)) - ([6f4c35e](https://github.com/n0-computer/iroh-docs/commit/6f4c35ef9c51c671c4594ea828a3dd29bd952ce5)) 91 | - *(iroh-net)* [**breaking**] Upgrade to Quinn 0.11 and Rustls 0.23 ([#2595](https://github.com/n0-computer/iroh-docs/issues/2595)) - ([add0bc3](https://github.com/n0-computer/iroh-docs/commit/add0bc3a9a002d685e755f8fce03241dc9fa500a)) 92 | - [**breaking**] New quic-rpc, simlified generics, bump MSRV to 1.76 ([#2268](https://github.com/n0-computer/iroh-docs/issues/2268)) - ([9c49883](https://github.com/n0-computer/iroh-docs/commit/9c498836f8de4c9c42f15d422f282fa3efdbae10)) 93 | - Set derive_more to 1.0.0 (no beta!) ([#2736](https://github.com/n0-computer/iroh-docs/issues/2736)) - ([9e517d0](https://github.com/n0-computer/iroh-docs/commit/9e517d09e361978f3a95983f6c906068d1cf6512)) 94 | - Ci ([#1](https://github.com/n0-computer/iroh-docs/issues/1)) - ([6b2c973](https://github.com/n0-computer/iroh-docs/commit/6b2c973f8ba9acefce1b36df93a8e6a5f8d7392a)) 95 | 96 | ### 🐛 Bug Fixes 97 | 98 | - *(docs)* Prevent deadlocks with streams returned from docs actor ([#2346](https://github.com/n0-computer/iroh-docs/issues/2346)) - ([6251369](https://github.com/n0-computer/iroh-docs/commit/625136937a2fa63479c760ce9a13d24f8c97771c)) 99 | - *(iroh-docs)* Ensure docs db write txn gets closed regularly under all circumstances ([#2474](https://github.com/n0-computer/iroh-docs/issues/2474)) - ([ea226dd](https://github.com/n0-computer/iroh-docs/commit/ea226dd48c0dcc0397f319aeb767563cbec298c6)) 100 | - *(iroh-docs)* [**breaking**] Add `flush_store` and use it to make sure the default author is persisted ([#2471](https://github.com/n0-computer/iroh-docs/issues/2471)) - ([f67a8b1](https://github.com/n0-computer/iroh-docs/commit/f67a8b143a36a18903c1338e8412cdd7f1729582)) 101 | - *(iroh-docs)* Do not dial invalid peers ([#2470](https://github.com/n0-computer/iroh-docs/issues/2470)) - ([dc7645e](https://github.com/n0-computer/iroh-docs/commit/dc7645e45ce875b9c19d32d65544abefd0417e80)) 102 | - *(iroh-net)* Prevent adding addressing info that points back to us ([#2333](https://github.com/n0-computer/iroh-docs/issues/2333)) - ([54d5991](https://github.com/n0-computer/iroh-docs/commit/54d5991f5499ed52749e420eb647d726a3674eb4)) 103 | - *(iroh-net)* Fix a compiler error with newer `derive_more` versions ([#2578](https://github.com/n0-computer/iroh-docs/issues/2578)) - ([3f93765](https://github.com/n0-computer/iroh-docs/commit/3f93765ea36f47554123bfb48c3ac06068b970b6)) 104 | - *(iroh-net)* [**breaking**] DiscoveredDirectAddrs need to update the timestamp ([#2808](https://github.com/n0-computer/iroh-docs/issues/2808)) - ([916aedd](https://github.com/n0-computer/iroh-docs/commit/916aedd4a7a0f458be3d19d1d3f3d46d0bb074ad)) 105 | - Properly wait for docs engine shutdown ([#2389](https://github.com/n0-computer/iroh-docs/issues/2389)) - ([5b48493](https://github.com/n0-computer/iroh-docs/commit/5b48493917a5a87b21955fefc2f341a2f9fa0a1e)) 106 | - Pin derive_more to avoid sudden breakages ([#2584](https://github.com/n0-computer/iroh-docs/issues/2584)) - ([cfda981](https://github.com/n0-computer/iroh-docs/commit/cfda981a0ad075111e0c64e3e9a145d4490d1cc5)) 107 | 108 | ### 🚜 Refactor 109 | 110 | - *(iroh)* [**breaking**] Remove tags from downloader ([#2348](https://github.com/n0-computer/iroh-docs/issues/2348)) - ([38f75de](https://github.com/n0-computer/iroh-docs/commit/38f75de7cbde0a39582afcac12c73a571705ea58)) 111 | - *(iroh-docs)* Replace flume with async_channel in docs ([#2540](https://github.com/n0-computer/iroh-docs/issues/2540)) - ([335b6b1](https://github.com/n0-computer/iroh-docs/commit/335b6b15bf07471c672320127c205a34788462bb)) 112 | - *(iroh-net)* [**breaking**] Rename MagicEndpoint -> Endpoint ([#2287](https://github.com/n0-computer/iroh-docs/issues/2287)) - ([86e7c07](https://github.com/n0-computer/iroh-docs/commit/86e7c07a41953f07994dd5d31b9ed3d529a519c3)) 113 | - Renames iroh-sync & iroh-bytes ([#2271](https://github.com/n0-computer/iroh-docs/issues/2271)) - ([f1c3cf9](https://github.com/n0-computer/iroh-docs/commit/f1c3cf9b96fab2340df3dc0f5355137234772d95)) 114 | - Move docs engine into iroh-docs ([#2343](https://github.com/n0-computer/iroh-docs/issues/2343)) - ([28cf40b](https://github.com/n0-computer/iroh-docs/commit/28cf40b295c3adca75f9328a23be3ff0e6e8a57f)) 115 | - [**breaking**] Metrics ([#2464](https://github.com/n0-computer/iroh-docs/issues/2464)) - ([4938055](https://github.com/n0-computer/iroh-docs/commit/4938055577fb6a975e547983ebdfb1c3b6b03df9)) 116 | - [**breaking**] Migrate to tokio's AbortOnDropHandle ([#2701](https://github.com/n0-computer/iroh-docs/issues/2701)) - ([cf05227](https://github.com/n0-computer/iroh-docs/commit/cf05227141cacace21fe914c0479786000989869)) 117 | - Extract `ProtocolHandler` impl into here - ([16bc7fe](https://github.com/n0-computer/iroh-docs/commit/16bc7fe4c7dee1b1b88f54390856c3fafa3d7656)) 118 | 119 | ### 📚 Documentation 120 | 121 | - *(*)* Document cargo features in docs ([#2761](https://github.com/n0-computer/iroh-docs/issues/2761)) - ([8346d50](https://github.com/n0-computer/iroh-docs/commit/8346d506fd2553bc813548a7d41f04adc984f7d2)) 122 | - Fix typos discovered by codespell ([#2534](https://github.com/n0-computer/iroh-docs/issues/2534)) - ([b5b7072](https://github.com/n0-computer/iroh-docs/commit/b5b70726f10cd33d354e07b8985e171cdd3cfc0f)) 123 | 124 | ### ⚙️ Miscellaneous Tasks 125 | 126 | - Release - ([b8dd924](https://github.com/n0-computer/iroh-docs/commit/b8dd9243361b708a41b15cad4809ad5b34d73c87)) 127 | - Release - ([045e3ce](https://github.com/n0-computer/iroh-docs/commit/045e3ce6dd56bf57b2127960fd1ebad4b9858026)) 128 | - Release - ([a9a8700](https://github.com/n0-computer/iroh-docs/commit/a9a87007b597f4f41c68dcbebe8b30bdb2cd3119)) 129 | - Release - ([08fe6f3](https://github.com/n0-computer/iroh-docs/commit/08fe6f3ad6fc4ded76991b059a8ffefaf06c1129)) 130 | - Introduce crate-ci/typos ([#2430](https://github.com/n0-computer/iroh-docs/issues/2430)) - ([b97e407](https://github.com/n0-computer/iroh-docs/commit/b97e4071508191894581e48882a53cf6b095f86f)) 131 | - Release - ([73c22f7](https://github.com/n0-computer/iroh-docs/commit/73c22f7f13ce0276677fc599e80c11f470127966)) 132 | - Release - ([9b53b1b](https://github.com/n0-computer/iroh-docs/commit/9b53b1bee105952d883a81bb2238b1e569f78332)) 133 | - Fix clippy warnings ([#2550](https://github.com/n0-computer/iroh-docs/issues/2550)) - ([956c51a](https://github.com/n0-computer/iroh-docs/commit/956c51a21b49e15e136434205dbc21040aa8b62b)) 134 | - Release - ([5fd07fa](https://github.com/n0-computer/iroh-docs/commit/5fd07fabf313828d5484b7ea315199f9bd7178b9)) 135 | - Release - ([92d8493](https://github.com/n0-computer/iroh-docs/commit/92d8493071ac9c5642358daea93b47b1c620e312)) 136 | - Release - ([f929d15](https://github.com/n0-computer/iroh-docs/commit/f929d15e76bfabb6d713402daff3b399f122e016)) 137 | - Release - ([7e3f028](https://github.com/n0-computer/iroh-docs/commit/7e3f028c65028c1b16c41a01fcc25ff6e2ad2e63)) 138 | - Release - ([5f98043](https://github.com/n0-computer/iroh-docs/commit/5f980439a5305bf781ea8c04f3d20976d26f88a8)) 139 | - Format imports using rustfmt ([#2812](https://github.com/n0-computer/iroh-docs/issues/2812)) - ([e6aadbd](https://github.com/n0-computer/iroh-docs/commit/e6aadbd337dda7697a34526a5a17fb38d15978c6)) 140 | - Increase version numbers and update ([#2821](https://github.com/n0-computer/iroh-docs/issues/2821)) - ([5ac936f](https://github.com/n0-computer/iroh-docs/commit/5ac936f0d5a7a8a6ffe9931638dbb009dc339494)) 141 | - Release - ([d52df5f](https://github.com/n0-computer/iroh-docs/commit/d52df5f46d1c9fea2b2012a32f8fe9a3b86a14e3)) 142 | - Copy missing files - ([61d8e6a](https://github.com/n0-computer/iroh-docs/commit/61d8e6ada870d736182333d3dc4f2c2387e1dbb4)) 143 | - Update Cargo.toml - ([6be9cb0](https://github.com/n0-computer/iroh-docs/commit/6be9cb01a3dc8ba0d9cbd74572231b1a18df36ae)) 144 | - Upgrade 0.28 iroh-net - ([7e48c6a](https://github.com/n0-computer/iroh-docs/commit/7e48c6a20e411c8e5928b5f09fe0ba46d5880ce5)) 145 | - Upgrade 0.28 iroh-router - ([933af7a](https://github.com/n0-computer/iroh-docs/commit/933af7af0e38281853042764aaa99ad95327d320)) 146 | - Release iroh-docs version 0.28.0 - ([c3017de](https://github.com/n0-computer/iroh-docs/commit/c3017de4573930fd03a05ec4aa6c7ab7a84f1890)) 147 | 148 | 149 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "iroh-docs" 3 | version = "0.35.0" 4 | edition = "2021" 5 | readme = "README.md" 6 | description = "Iroh sync" 7 | license = "MIT/Apache-2.0" 8 | authors = ["n0 team"] 9 | repository = "https://github.com/n0-computer/iroh-docs" 10 | 11 | # Sadly this also needs to be updated in .github/workflows/ci.yml 12 | rust-version = "1.81" 13 | 14 | [lints.rust] 15 | missing_debug_implementations = "warn" 16 | 17 | # We use this --cfg for documenting the cargo features on which an API 18 | # is available. To preview this locally use: RUSTFLAGS="--cfg 19 | # iroh_docsrs cargo +nightly doc --all-features". We use our own 20 | # iroh_docsrs instead of the common docsrs to avoid also enabling this 21 | # feature in any dependencies, because some indirect dependencies 22 | # require a feature enabled when using `--cfg docsrs` which we can not 23 | # do. To enable for a crate set `#![cfg_attr(iroh_docsrs, 24 | # feature(doc_cfg))]` in the crate. 25 | unexpected_cfgs = { level = "warn", check-cfg = ["cfg(iroh_docsrs)"] } 26 | 27 | [dependencies] 28 | anyhow = "1" 29 | async-channel = "2.3.1" 30 | blake3 = { package = "iroh-blake3", version = "1.4.5"} 31 | bytes = { version = "1.7", features = ["serde"] } 32 | derive_more = { version = "1.0.0", features = ["debug", "deref", "display", "from", "try_into", "into", "as_ref"] } 33 | ed25519-dalek = { version = "2.0.0", features = ["serde", "rand_core"] } 34 | futures-buffered = "0.2.4" 35 | futures-lite = "2.3.0" 36 | futures-util = { version = "0.3.25" } 37 | hex = "0.4" 38 | iroh-base = { version = "0.35", features = ["ticket"] } 39 | iroh-blobs = { version = "0.35" } 40 | iroh-gossip = { version = "0.35", optional = true, features = ["net"] } 41 | iroh-metrics = { version = "0.34", default-features = false } 42 | iroh = { version = "0.35", optional = true } 43 | num_enum = "0.7" 44 | postcard = { version = "1", default-features = false, features = ["alloc", "use-std", "experimental-derive"] } 45 | rand = "0.8.5" 46 | rand_core = "0.6.4" 47 | redb = { version = "2.0.0" } 48 | redb_v1 = { package = "redb", version = "1.5.1" } 49 | self_cell = "1.0.3" 50 | serde = { version = "1.0.164", features = ["derive"] } 51 | strum = { version = "0.26", features = ["derive"] } 52 | tempfile = { version = "3.4" } 53 | thiserror = "2" 54 | tokio = { version = "1", features = ["sync", "rt", "time", "macros"] } 55 | tokio-stream = { version = "0.1", optional = true, features = ["sync"]} 56 | tokio-util = { version = "0.7.12", optional = true, features = ["codec", "io-util", "io", "rt"] } 57 | tracing = "0.1" 58 | 59 | # rpc 60 | nested_enum_utils = { version = "0.1.0", optional = true } 61 | quic-rpc = { version = "0.20", optional = true } 62 | quic-rpc-derive = { version = "0.20", optional = true } 63 | serde-error = { version = "0.1.3", optional = true } 64 | portable-atomic = { version = "1.9.0", optional = true } 65 | 66 | # cli 67 | clap = { version = "4", features = ["derive"], optional = true } 68 | console = { version = "0.15", optional = true } 69 | data-encoding = { version = "2.3.3", optional = true } 70 | indicatif = { version = "0.17", features = ["tokio"], optional = true } 71 | dialoguer = { version = "0.11", optional = true } 72 | colored = { version = "2.1", optional = true } 73 | shellexpand = { version = "3.1", optional = true } 74 | 75 | [dev-dependencies] 76 | rand_chacha = "0.3.1" 77 | tokio = { version = "1", features = ["sync", "macros"] } 78 | proptest = "1.2.0" 79 | tempfile = "3.4" 80 | tracing-test = "0.2.5" 81 | test-strategy = "0.4" 82 | tracing-subscriber = { version = "0.3.18", features = ["env-filter"] } 83 | parking_lot = "0.12.3" 84 | testresult = "0.4.1" 85 | nested_enum_utils = "0.1.0" 86 | iroh-io = "0.6.1" 87 | testdir = "0.7" 88 | data-encoding = "2.6.0" 89 | 90 | [features] 91 | default = ["net", "metrics", "engine", "test-utils"] 92 | net = ["dep:iroh", "tokio/io-util", "dep:tokio-stream", "dep:tokio-util"] 93 | metrics = ["iroh-metrics/metrics", "iroh/metrics"] 94 | engine = ["net", "dep:iroh-gossip", "iroh-blobs/downloader"] 95 | test-utils = ["iroh/test-utils"] 96 | cli = [ 97 | "rpc", 98 | "dep:clap", 99 | "dep:indicatif", 100 | "dep:console", 101 | "dep:colored", 102 | "dep:dialoguer", 103 | "dep:shellexpand", 104 | "dep:data-encoding", 105 | "iroh-blobs/rpc", 106 | ] 107 | rpc = [ 108 | "engine", 109 | "dep:nested_enum_utils", 110 | "dep:quic-rpc", 111 | "dep:quic-rpc-derive", 112 | "dep:serde-error", 113 | "dep:portable-atomic", 114 | "iroh-blobs/rpc", 115 | ] 116 | 117 | [package.metadata.docs.rs] 118 | all-features = true 119 | rustdoc-args = ["--cfg", "iroh_docsrs"] 120 | -------------------------------------------------------------------------------- /LICENSE-APACHE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [2023] [N0, INC] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /LICENSE-MIT: -------------------------------------------------------------------------------- 1 | Copyright 2023 N0, INC. 2 | 3 | Permission is hereby granted, free of charge, to any 4 | person obtaining a copy of this software and associated 5 | documentation files (the "Software"), to deal in the 6 | Software without restriction, including without 7 | limitation the rights to use, copy, modify, merge, 8 | publish, distribute, sublicense, and/or sell copies of 9 | the Software, and to permit persons to whom the Software 10 | is furnished to do so, subject to the following 11 | conditions: 12 | 13 | The above copyright notice and this permission notice 14 | shall be included in all copies or substantial portions 15 | of the Software. 16 | 17 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF 18 | ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED 19 | TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A 20 | PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT 21 | SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY 22 | CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 23 | OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR 24 | IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 25 | DEALINGS IN THE SOFTWARE. 26 | -------------------------------------------------------------------------------- /Makefile.toml: -------------------------------------------------------------------------------- 1 | # Use cargo-make to run tasks here: https://crates.io/crates/cargo-make 2 | 3 | [tasks.format] 4 | workspace = false 5 | command = "cargo" 6 | args = [ 7 | "fmt", 8 | "--all", 9 | "--", 10 | "--config", 11 | "unstable_features=true", 12 | "--config", 13 | "imports_granularity=Crate,group_imports=StdExternalCrate,reorder_imports=true", 14 | ] 15 | 16 | [tasks.format-check] 17 | workspace = false 18 | command = "cargo" 19 | args = [ 20 | "fmt", 21 | "--all", 22 | "--check", 23 | "--", 24 | "--config", 25 | "unstable_features=true", 26 | "--config", 27 | "imports_granularity=Crate,group_imports=StdExternalCrate,reorder_imports=true", 28 | ] 29 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # iroh-docs 2 | 3 | Multi-dimensional key-value documents with an efficient synchronization protocol. 4 | 5 | The crate operates on *Replicas*. A replica contains an unlimited number of 6 | *Entries*. Each entry is identified by a key, its author, and the replica's 7 | namespace. Its value is the 32-byte BLAKE3 hash of the entry's content data, 8 | the size of this content data, and a timestamp. 9 | The content data itself is not stored or transferred through a replica. 10 | 11 | All entries in a replica are signed with two keypairs: 12 | 13 | * The *Namespace* key, as a token of write capability. The public key is the *NamespaceId*, which 14 | also serves as the unique identifier for a replica. 15 | * The *Author* key, as a proof of authorship. Any number of authors may be created, and 16 | their semantic meaning is application-specific. The public key of an author is the [AuthorId]. 17 | 18 | Replicas can be synchronized between peers by exchanging messages. The synchronization algorithm 19 | is based on a technique called *range-based set reconciliation*, based on [this paper][paper] by 20 | Aljoscha Meyer: 21 | 22 | > Range-based set reconciliation is a simple approach to efficiently compute the union of two 23 | sets over a network, based on recursively partitioning the sets and comparing fingerprints of 24 | the partitions to probabilistically detect whether a partition requires further work. 25 | 26 | The crate exposes a generic storage interface with in-memory and persistent, file-based 27 | implementations. The latter makes use of [`redb`], an embedded key-value store, and persists 28 | the whole store with all replicas to a single file. 29 | 30 | [paper]: https://arxiv.org/abs/2212.13567 31 | 32 | # Getting Started 33 | 34 | The entry into the `iroh-docs` protocol is the `Docs` struct, which uses an [`Engine`](https://docs.rs/iroh-docs/latest/iroh_docs/engine/struct.Engine.html) to power the protocol. 35 | 36 | `Docs` was designed to be used in conjunction with `iroh`. [Iroh](https://docs.rs/iroh) is a networking library for making direct connections, these connections are peers send sync messages and transfer data. 37 | 38 | Iroh provides a [`Router`](https://docs.rs/iroh/latest/iroh/protocol/struct.Router.html) that takes an [`Endpoint`](https://docs.rs/iroh/latest/iroh/endpoint/struct.Endpoint.html) and any protocols needed for the application. Similar to a router in webserver library, it runs a loop accepting incoming connections and routes them to the specific protocol handler, based on `ALPN`. 39 | 40 | `Docs` is a "meta protocol" that relies on the [`iroh-blobs`](https://docs.rs/iroh-blobs) and [`iroh-gossip`](https://docs.rs/iroh-gossip) protocols. Setting up `Docs` will require setting up `Blobs` and `Gossip` as well. 41 | 42 | Here is a basic example of how to set up `iroh-docs` with `iroh`: 43 | 44 | ```rust 45 | use iroh::{protocol::Router, Endpoint}; 46 | use iroh_blobs::{net_protocol::Blobs, util::local_pool::LocalPool, ALPN as BLOBS_ALPN}; 47 | use iroh_docs::{protocol::Docs, ALPN as DOCS_ALPN}; 48 | use iroh_gossip::{net::Gossip, ALPN as GOSSIP_ALPN}; 49 | 50 | #[tokio::main] 51 | async fn main() -> anyhow::Result<()> { 52 | // create an iroh endpoint that includes the standard discovery mechanisms 53 | // we've built at number0 54 | let endpoint = Endpoint::builder().discovery_n0().bind().await?; 55 | 56 | // create a router builder, we will add the 57 | // protocols to this builder and then spawn 58 | // the router 59 | let builder = Router::builder(endpoint); 60 | 61 | // build the blobs protocol 62 | let blobs = Blobs::memory().build(builder.endpoint()); 63 | 64 | // build the gossip protocol 65 | let gossip = Gossip::builder().spawn(builder.endpoint().clone()).await?; 66 | 67 | // build the docs protocol 68 | let docs = Docs::memory().spawn(&blobs, &gossip).await?; 69 | 70 | // setup router 71 | let router = builder 72 | .accept(BLOBS_ALPN, blobs) 73 | .accept(GOSSIP_ALPN, gossip) 74 | .accept(DOCS_ALPN, docs) 75 | .spawn(); 76 | 77 | // do fun stuff with docs! 78 | Ok(()) 79 | } 80 | ``` 81 | 82 | # License 83 | 84 | This project is licensed under either of 85 | 86 | * Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or 87 | ) 88 | * MIT license ([LICENSE-MIT](LICENSE-MIT) or 89 | ) 90 | 91 | at your option. 92 | 93 | ### Contribution 94 | 95 | Unless you explicitly state otherwise, any contribution intentionally submitted 96 | for inclusion in this project by you, as defined in the Apache-2.0 license, 97 | shall be dual licensed as above, without any additional terms or conditions. 98 | -------------------------------------------------------------------------------- /cliff.toml: -------------------------------------------------------------------------------- 1 | [changelog] 2 | # changelog header 3 | header = """ 4 | # Changelog\n 5 | All notable changes to iroh-docs will be documented in this file.\n 6 | """ 7 | 8 | body = """ 9 | {% if version %}\ 10 | {% if previous.version %}\ 11 | ## [{{ version | trim_start_matches(pat="v") }}](/compare/{{ previous.version }}..{{ version }}) - {{ timestamp | date(format="%Y-%m-%d") }} 12 | {% else %}\ 13 | ## [{{ version | trim_start_matches(pat="v") }}] - {{ timestamp | date(format="%Y-%m-%d") }} 14 | {% endif %}\ 15 | {% else %}\ 16 | ## [unreleased] 17 | {% endif %}\ 18 | 19 | {% macro commit(commit) -%} 20 | - {% if commit.scope %}*({{ commit.scope }})* {% endif %}{% if commit.breaking %}[**breaking**] {% endif %}\ 21 | {{ commit.message | upper_first }} - ([{{ commit.id | truncate(length=7, end="") }}](/commit/{{ commit.id }}))\ 22 | {% endmacro -%} 23 | 24 | {% for group, commits in commits | group_by(attribute="group") %} 25 | ### {{ group | striptags | trim | upper_first }} 26 | {% for commit in commits 27 | | filter(attribute="scope") 28 | | sort(attribute="scope") %} 29 | {{ self::commit(commit=commit) }} 30 | {%- endfor -%} 31 | {% raw %}\n{% endraw %}\ 32 | {%- for commit in commits %} 33 | {%- if not commit.scope -%} 34 | {{ self::commit(commit=commit) }} 35 | {% endif -%} 36 | {% endfor -%} 37 | {% endfor %}\n 38 | """ 39 | 40 | footer = "" 41 | postprocessors = [ 42 | { pattern = '', replace = "https://github.com/n0-computer/iroh-docs" }, 43 | { pattern = "\\(#([0-9]+)\\)", replace = "([#${1}](https://github.com/n0-computer/iroh-docs/issues/${1}))"} 44 | ] 45 | 46 | 47 | [git] 48 | # regex for parsing and grouping commits 49 | commit_parsers = [ 50 | { message = "^feat", group = "⛰️ Features" }, 51 | { message = "^fix", group = "🐛 Bug Fixes" }, 52 | { message = "^doc", group = "📚 Documentation" }, 53 | { message = "^perf", group = "⚡ Performance" }, 54 | { message = "^refactor", group = "🚜 Refactor" }, 55 | { message = "^style", group = "🎨 Styling" }, 56 | { message = "^test", group = "🧪 Testing" }, 57 | { message = "^chore\\(release\\)", skip = true }, 58 | { message = "^chore\\(deps\\)", skip = true }, 59 | { message = "^chore\\(pr\\)", skip = true }, 60 | { message = "^chore\\(pull\\)", skip = true }, 61 | { message = "^chore|ci", group = "⚙️ Miscellaneous Tasks" }, 62 | { body = ".*security", group = "🛡️ Security" }, 63 | { message = "^revert", group = "◀️ Revert" }, 64 | ] 65 | -------------------------------------------------------------------------------- /code_of_conduct.md: -------------------------------------------------------------------------------- 1 | # Code of Conduct 2 | 3 | Online or off, Number Zero is a harassment-free environment for everyone, regardless of gender, gender identity and expression, sexual orientation, disability, physical appearance, body size, race, age or religion or technical skill level. We do not tolerate harassment of participants in any form. 4 | 5 | Harassment includes verbal comments that reinforce social structures of domination related to gender, gender identity and expression, sexual orientation, disability, physical appearance, body size, race, age, religion, sexual images in public spaces, deliberate intimidation, stalking, following, harassing photography or recording, sustained disruption of talks or other events, inappropriate physical contact, and unwelcome sexual attention. Participants asked to stop any harassing behavior are expected to comply immediately. 6 | 7 | If a participant engages in harassing behaviour, the organizers may take any action they deem appropriate, including warning the offender or expulsion from events and online forums. 8 | 9 | If you are being harassed, notice that someone else is being harassed, or have any other concerns, please contact a member of the organizing team immediately. 10 | 11 | At offline events, organizers will identify themselves, and will help participants contact venue security or local law enforcement, provide escorts, or otherwise assist those experiencing harassment to feel safe for the duration of the event. We value your participation! 12 | 13 | This document is based on a similar code from [EDGI](https://envirodatagov.org/) and [Civic Tech Toronto](http://civictech.ca/about-us/), itself derived from the [Recurse Center’s Social Rules](https://www.recurse.com/manual#sec-environment), and the [anti-harassment policy from the Geek Feminism Wiki](http://geekfeminism.wikia.com/wiki/Conference_anti-harassment/Policy). 14 | -------------------------------------------------------------------------------- /deny.toml: -------------------------------------------------------------------------------- 1 | [bans] 2 | multiple-versions = "allow" 3 | deny = [ 4 | "aws-lc", 5 | "aws-lc-rs", 6 | "aws-lc-sys", 7 | "native-tls", 8 | "openssl", 9 | ] 10 | 11 | [licenses] 12 | allow = [ 13 | "Apache-2.0", 14 | "Apache-2.0 WITH LLVM-exception", 15 | "BSD-2-Clause", 16 | "BSD-3-Clause", 17 | "BSL-1.0", # BOSL license 18 | "ISC", 19 | "MIT", 20 | "Zlib", 21 | "MPL-2.0", # https://fossa.com/blog/open-source-software-licenses-101-mozilla-public-license-2-0/ 22 | "Unicode-3.0", 23 | "Unlicense" # https://unlicense.org/ 24 | ] 25 | 26 | [[licenses.clarify]] 27 | name = "ring" 28 | expression = "MIT AND ISC AND OpenSSL" 29 | license-files = [ 30 | { path = "LICENSE", hash = 0xbd0eed23 }, 31 | ] 32 | 33 | [advisories] 34 | ignore = [ 35 | "RUSTSEC-2024-0370", # unmaintained, no upgrade available 36 | "RUSTSEC-2024-0384", # unmaintained, no upgrade available 37 | "RUSTSEC-2024-0436", # paste 38 | ] 39 | 40 | [sources] 41 | allow-git = [] 42 | -------------------------------------------------------------------------------- /proptest-regressions/ranger.txt: -------------------------------------------------------------------------------- 1 | # Seeds for failure cases proptest has generated in the past. It is 2 | # automatically read and these particular cases re-run before any 3 | # novel cases are generated. 4 | # 5 | # It is recommended to check this file in to source control so that 6 | # everyone who runs the test benefits from these saved cases. 7 | cc 797e83179f8684388880e25a6fac7b4047eb15b03c55c1fb725b82bdbd0a4369 # shrinks to a = {TestKey("3"): ()}, b = {TestKey(""): (), TestKey("3"): (), TestKey("4"): (), TestKey("5"): (), TestKey("a"): (), TestKey("b"): (), TestKey("c"): ()} 8 | cc f5b7604319ead6181c2ff42e53f05e2c6f0298adf0b38ea4ae4710c43abb7663 # shrinks to input = _SimpleStoreSyncArgs { alice: [(3, ()), (a, ())], bob: [(, ()), (0, ()), (b, ())] } 9 | cc 41d9d33f002235dfe4bed83621fe79348725bbe00931451782025d98c1b81522 # shrinks to input = _SimpleStoreSyncU8Args { alice: [("", 58)], bob: [("", 0)] } 10 | -------------------------------------------------------------------------------- /release.toml: -------------------------------------------------------------------------------- 1 | pre-release-hook = ["git", "cliff", "--prepend", "CHANGELOG.md", "--tag", "{{version}}", "--unreleased" ] 2 | -------------------------------------------------------------------------------- /src/cli/authors.rs: -------------------------------------------------------------------------------- 1 | //! Define the commands to manage authors. 2 | 3 | use anyhow::{bail, Result}; 4 | use clap::Parser; 5 | use derive_more::FromStr; 6 | use futures_lite::StreamExt; 7 | 8 | use super::{AuthorsClient, ConsoleEnv}; 9 | use crate::{cli::fmt_short, Author, AuthorId}; 10 | 11 | #[allow(missing_docs)] 12 | /// Commands to manage authors. 13 | #[derive(Debug, Clone, Parser)] 14 | pub enum AuthorCommands { 15 | /// Set the active author (Note: only works within the Iroh console). 16 | Switch { author: AuthorId }, 17 | /// Create a new author. 18 | Create { 19 | /// Switch to the created author (Note: only works in the Iroh console). 20 | #[clap(long)] 21 | switch: bool, 22 | }, 23 | /// Delete an author. 24 | Delete { author: AuthorId }, 25 | /// Export an author. 26 | Export { author: AuthorId }, 27 | /// Import an author. 28 | Import { author: String }, 29 | /// Print the default author for this node. 30 | Default { 31 | /// Switch to the default author (Note: only works in the Iroh console). 32 | #[clap(long)] 33 | switch: bool, 34 | }, 35 | /// List authors. 36 | #[clap(alias = "ls")] 37 | List, 38 | } 39 | 40 | impl AuthorCommands { 41 | /// Runs the author command given an iroh client and console environment. 42 | pub async fn run(self, authors: &AuthorsClient, env: &ConsoleEnv) -> Result<()> { 43 | match self { 44 | Self::Switch { author } => { 45 | env.set_author(author)?; 46 | println!("Active author is now {}", fmt_short(author.as_bytes())); 47 | } 48 | Self::List => { 49 | let mut stream = authors.list().await?; 50 | while let Some(author_id) = stream.try_next().await? { 51 | println!("{}", author_id); 52 | } 53 | } 54 | Self::Default { switch } => { 55 | if switch && !env.is_console() { 56 | bail!("The --switch flag is only supported within the Iroh console."); 57 | } 58 | let author_id = authors.default().await?; 59 | println!("{}", author_id); 60 | if switch { 61 | env.set_author(author_id)?; 62 | println!("Active author is now {}", fmt_short(author_id.as_bytes())); 63 | } 64 | } 65 | Self::Create { switch } => { 66 | if switch && !env.is_console() { 67 | bail!("The --switch flag is only supported within the Iroh console."); 68 | } 69 | 70 | let author_id = authors.create().await?; 71 | println!("{}", author_id); 72 | 73 | if switch { 74 | env.set_author(author_id)?; 75 | println!("Active author is now {}", fmt_short(author_id.as_bytes())); 76 | } 77 | } 78 | Self::Delete { author } => { 79 | authors.delete(author).await?; 80 | println!("Deleted author {}", fmt_short(author.as_bytes())); 81 | } 82 | Self::Export { author } => match authors.export(author).await? { 83 | Some(author) => { 84 | println!("{}", author); 85 | } 86 | None => { 87 | println!("No author found {}", fmt_short(author.as_bytes())); 88 | } 89 | }, 90 | Self::Import { author } => match Author::from_str(&author) { 91 | Ok(author) => { 92 | let id = author.id(); 93 | authors.import(author).await?; 94 | println!("Imported {}", fmt_short(id.as_bytes())); 95 | } 96 | Err(err) => { 97 | eprintln!("Invalid author key: {}", err); 98 | } 99 | }, 100 | } 101 | Ok(()) 102 | } 103 | } 104 | -------------------------------------------------------------------------------- /src/engine/gossip.rs: -------------------------------------------------------------------------------- 1 | use std::collections::{hash_map, HashMap}; 2 | 3 | use anyhow::{Context, Result}; 4 | use bytes::Bytes; 5 | use futures_lite::StreamExt; 6 | use futures_util::FutureExt; 7 | use iroh::NodeId; 8 | use iroh_gossip::net::{Event, Gossip, GossipEvent, GossipReceiver, GossipSender, JoinOptions}; 9 | use tokio::{ 10 | sync::mpsc, 11 | task::{AbortHandle, JoinSet}, 12 | }; 13 | use tracing::{debug, instrument, warn}; 14 | 15 | use super::live::{Op, ToLiveActor}; 16 | use crate::{actor::SyncHandle, ContentStatus, NamespaceId}; 17 | 18 | #[derive(Debug)] 19 | struct ActiveState { 20 | sender: GossipSender, 21 | abort_handle: AbortHandle, 22 | } 23 | 24 | #[derive(Debug)] 25 | pub struct GossipState { 26 | gossip: Gossip, 27 | sync: SyncHandle, 28 | to_live_actor: mpsc::Sender, 29 | active: HashMap, 30 | active_tasks: JoinSet<(NamespaceId, Result<()>)>, 31 | } 32 | 33 | impl GossipState { 34 | pub fn new(gossip: Gossip, sync: SyncHandle, to_live_actor: mpsc::Sender) -> Self { 35 | Self { 36 | gossip, 37 | sync, 38 | to_live_actor, 39 | active: Default::default(), 40 | active_tasks: Default::default(), 41 | } 42 | } 43 | 44 | pub async fn join(&mut self, namespace: NamespaceId, bootstrap: Vec) -> Result<()> { 45 | match self.active.entry(namespace) { 46 | hash_map::Entry::Occupied(entry) => { 47 | if !bootstrap.is_empty() { 48 | entry.get().sender.join_peers(bootstrap).await?; 49 | } 50 | } 51 | hash_map::Entry::Vacant(entry) => { 52 | let sub = self 53 | .gossip 54 | .subscribe_with_opts(namespace.into(), JoinOptions::with_bootstrap(bootstrap)); 55 | 56 | let (sender, stream) = sub.split(); 57 | let abort_handle = self.active_tasks.spawn( 58 | receive_loop( 59 | namespace, 60 | stream, 61 | self.to_live_actor.clone(), 62 | self.sync.clone(), 63 | ) 64 | .map(move |res| (namespace, res)), 65 | ); 66 | entry.insert(ActiveState { 67 | sender, 68 | abort_handle, 69 | }); 70 | } 71 | } 72 | Ok(()) 73 | } 74 | 75 | pub fn quit(&mut self, topic: &NamespaceId) { 76 | if let Some(state) = self.active.remove(topic) { 77 | state.abort_handle.abort(); 78 | } 79 | } 80 | 81 | pub async fn shutdown(&mut self) -> Result<()> { 82 | for (_, state) in self.active.drain() { 83 | state.abort_handle.abort(); 84 | } 85 | self.progress().await 86 | } 87 | 88 | pub async fn broadcast(&self, namespace: &NamespaceId, message: Bytes) { 89 | if let Some(state) = self.active.get(namespace) { 90 | state.sender.broadcast(message).await.ok(); 91 | } 92 | } 93 | 94 | pub async fn broadcast_neighbors(&self, namespace: &NamespaceId, message: Bytes) { 95 | if let Some(state) = self.active.get(namespace) { 96 | state.sender.broadcast_neighbors(message).await.ok(); 97 | } 98 | } 99 | 100 | pub fn max_message_size(&self) -> usize { 101 | self.gossip.max_message_size() 102 | } 103 | 104 | pub fn is_empty(&self) -> bool { 105 | self.active.is_empty() 106 | } 107 | 108 | /// Progress the internal task queues. 109 | /// 110 | /// Returns an error if any of the active tasks panic. 111 | /// 112 | /// ## Cancel safety 113 | /// 114 | /// This function is fully cancel-safe. 115 | pub async fn progress(&mut self) -> Result<()> { 116 | while let Some(res) = self.active_tasks.join_next().await { 117 | match res { 118 | Err(err) if err.is_cancelled() => continue, 119 | Err(err) => return Err(err).context("gossip receive loop panicked"), 120 | Ok((namespace, res)) => { 121 | self.active.remove(&namespace); 122 | if let Err(err) = res { 123 | warn!(?err, ?namespace, "gossip receive loop failed") 124 | } 125 | } 126 | } 127 | } 128 | Ok(()) 129 | } 130 | } 131 | 132 | #[instrument("gossip-recv", skip_all, fields(namespace=%namespace.fmt_short()))] 133 | async fn receive_loop( 134 | namespace: NamespaceId, 135 | mut recv: GossipReceiver, 136 | to_sync_actor: mpsc::Sender, 137 | sync: SyncHandle, 138 | ) -> Result<()> { 139 | for peer in recv.neighbors() { 140 | to_sync_actor 141 | .send(ToLiveActor::NeighborUp { namespace, peer }) 142 | .await?; 143 | } 144 | while let Some(event) = recv.try_next().await? { 145 | let event = match event { 146 | Event::Gossip(event) => event, 147 | Event::Lagged => { 148 | debug!("gossip loop lagged - dropping gossip event"); 149 | continue; 150 | } 151 | }; 152 | match event { 153 | GossipEvent::Received(msg) => { 154 | let op: Op = postcard::from_bytes(&msg.content)?; 155 | match op { 156 | Op::Put(entry) => { 157 | debug!(peer = %msg.delivered_from.fmt_short(), namespace = %namespace.fmt_short(), "received entry via gossip"); 158 | // Insert the entry into our replica. 159 | // If the message was broadcast with neighbor scope, or is received 160 | // directly from the author, we assume that the content is available at 161 | // that peer. Otherwise we don't. 162 | // The download is not triggered here, but in the `on_replica_event` 163 | // handler for the `InsertRemote` event. 164 | let content_status = match msg.scope.is_direct() { 165 | true => ContentStatus::Complete, 166 | false => ContentStatus::Missing, 167 | }; 168 | let from = *msg.delivered_from.as_bytes(); 169 | if let Err(err) = sync 170 | .insert_remote(namespace, entry, from, content_status) 171 | .await 172 | { 173 | debug!("ignoring entry received via gossip: {err}"); 174 | } 175 | } 176 | Op::ContentReady(hash) => { 177 | to_sync_actor 178 | .send(ToLiveActor::NeighborContentReady { 179 | namespace, 180 | node: msg.delivered_from, 181 | hash, 182 | }) 183 | .await?; 184 | } 185 | Op::SyncReport(report) => { 186 | to_sync_actor 187 | .send(ToLiveActor::IncomingSyncReport { 188 | from: msg.delivered_from, 189 | report, 190 | }) 191 | .await?; 192 | } 193 | } 194 | } 195 | GossipEvent::NeighborUp(peer) => { 196 | to_sync_actor 197 | .send(ToLiveActor::NeighborUp { namespace, peer }) 198 | .await?; 199 | } 200 | GossipEvent::NeighborDown(peer) => { 201 | to_sync_actor 202 | .send(ToLiveActor::NeighborDown { namespace, peer }) 203 | .await?; 204 | } 205 | GossipEvent::Joined(peers) => { 206 | for peer in peers { 207 | to_sync_actor 208 | .send(ToLiveActor::NeighborUp { namespace, peer }) 209 | .await?; 210 | } 211 | } 212 | } 213 | } 214 | Ok(()) 215 | } 216 | -------------------------------------------------------------------------------- /src/engine/state.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | collections::BTreeMap, 3 | time::{Instant, SystemTime}, 4 | }; 5 | 6 | use anyhow::Result; 7 | use iroh::NodeId; 8 | use serde::{Deserialize, Serialize}; 9 | use tracing::{debug, warn}; 10 | 11 | use crate::{ 12 | net::{AbortReason, AcceptOutcome, SyncFinished}, 13 | NamespaceId, 14 | }; 15 | 16 | /// Why we started a sync request 17 | #[derive(Debug, Clone, Serialize, Deserialize, Eq, PartialEq, Copy)] 18 | pub enum SyncReason { 19 | /// Direct join request via API 20 | DirectJoin, 21 | /// Peer showed up as new neighbor in the gossip swarm 22 | NewNeighbor, 23 | /// We synced after receiving a sync report that indicated news for us 24 | SyncReport, 25 | /// We received a sync report while a sync was running, so run again afterwars 26 | Resync, 27 | } 28 | 29 | /// Why we performed a sync exchange 30 | #[derive(Debug, Clone, Serialize, Deserialize, Eq, PartialEq)] 31 | pub enum Origin { 32 | /// We initiated the exchange 33 | Connect(SyncReason), 34 | /// A node connected to us and we accepted the exchange 35 | Accept, 36 | } 37 | 38 | /// The state we're in for a node and a namespace 39 | #[derive(Debug, Clone)] 40 | pub enum SyncState { 41 | Idle, 42 | Running { start: SystemTime, origin: Origin }, 43 | } 44 | 45 | impl Default for SyncState { 46 | fn default() -> Self { 47 | Self::Idle 48 | } 49 | } 50 | 51 | /// Contains an entry for each active (syncing) namespace, and in there an entry for each node we 52 | /// synced with. 53 | #[derive(Default)] 54 | pub struct NamespaceStates(BTreeMap); 55 | 56 | #[derive(Default)] 57 | struct NamespaceState { 58 | nodes: BTreeMap, 59 | may_emit_ready: bool, 60 | } 61 | 62 | impl NamespaceStates { 63 | /// Are we syncing this namespace? 64 | pub fn is_syncing(&self, namespace: &NamespaceId) -> bool { 65 | self.0.contains_key(namespace) 66 | } 67 | 68 | /// Insert a namespace into the set of syncing namespaces. 69 | pub fn insert(&mut self, namespace: NamespaceId) { 70 | self.0.entry(namespace).or_default(); 71 | } 72 | 73 | /// Start a sync request. 74 | /// 75 | /// Returns true if the request should be performed, and false if it should be aborted. 76 | pub fn start_connect( 77 | &mut self, 78 | namespace: &NamespaceId, 79 | node: NodeId, 80 | reason: SyncReason, 81 | ) -> bool { 82 | match self.entry(namespace, node) { 83 | None => { 84 | debug!("abort connect: namespace is not in sync set"); 85 | false 86 | } 87 | Some(state) => state.start_connect(reason), 88 | } 89 | } 90 | 91 | /// Accept a sync request. 92 | /// 93 | /// Returns the [`AcceptOutcome`] to be performed. 94 | pub fn accept_request( 95 | &mut self, 96 | me: &NodeId, 97 | namespace: &NamespaceId, 98 | node: NodeId, 99 | ) -> AcceptOutcome { 100 | let Some(state) = self.entry(namespace, node) else { 101 | return AcceptOutcome::Reject(AbortReason::NotFound); 102 | }; 103 | state.accept_request(me, &node) 104 | } 105 | 106 | /// Insert a finished sync operation into the state. 107 | /// 108 | /// Returns the time when the operation was started, and a `bool` that is true if another sync 109 | /// request should be triggered right afterwards. 110 | /// 111 | /// Returns `None` if the namespace is not syncing or the sync state doesn't expect a finish 112 | /// event. 113 | pub fn finish( 114 | &mut self, 115 | namespace: &NamespaceId, 116 | node: NodeId, 117 | origin: &Origin, 118 | result: Result, 119 | ) -> Option<(SystemTime, bool)> { 120 | let state = self.entry(namespace, node)?; 121 | state.finish(origin, result) 122 | } 123 | 124 | /// Set whether a [`super::live::Event::PendingContentReady`] may be emitted once the pending queue 125 | /// becomes empty. 126 | /// 127 | /// This should be set to `true` if there are pending content hashes after a sync finished, and 128 | /// to `false` whenever a `PendingContentReady` was emitted. 129 | pub fn set_may_emit_ready(&mut self, namespace: &NamespaceId, value: bool) -> Option<()> { 130 | let state = self.0.get_mut(namespace)?; 131 | state.may_emit_ready = value; 132 | Some(()) 133 | } 134 | /// Returns whether a [`super::live::Event::PendingContentReady`] event may be emitted once the 135 | /// pending queue becomes empty. 136 | /// 137 | /// If this returns `false`, an event should not be emitted even if the queue becomes empty, 138 | /// because a currently running sync did not yet terminate. Once it terminates, the event will 139 | /// be emitted from the handler for finished syncs. 140 | pub fn may_emit_ready(&mut self, namespace: &NamespaceId) -> Option { 141 | let state = self.0.get_mut(namespace)?; 142 | if state.may_emit_ready { 143 | state.may_emit_ready = false; 144 | Some(true) 145 | } else { 146 | Some(false) 147 | } 148 | } 149 | 150 | /// Remove a namespace from the set of syncing namespaces. 151 | pub fn remove(&mut self, namespace: &NamespaceId) -> bool { 152 | self.0.remove(namespace).is_some() 153 | } 154 | 155 | /// Get the [`PeerState`] for a namespace and node. 156 | /// If the namespace is syncing and the node so far unknown, initialize and return a default [`PeerState`]. 157 | /// If the namespace is not syncing return None. 158 | fn entry(&mut self, namespace: &NamespaceId, node: NodeId) -> Option<&mut PeerState> { 159 | self.0 160 | .get_mut(namespace) 161 | .map(|n| n.nodes.entry(node).or_default()) 162 | } 163 | } 164 | 165 | /// State of a node with regard to a namespace. 166 | #[derive(Default)] 167 | struct PeerState { 168 | state: SyncState, 169 | resync_requested: bool, 170 | last_sync: Option<(Instant, Result)>, 171 | } 172 | 173 | impl PeerState { 174 | fn finish( 175 | &mut self, 176 | origin: &Origin, 177 | result: Result, 178 | ) -> Option<(SystemTime, bool)> { 179 | let start = match &self.state { 180 | SyncState::Running { 181 | start, 182 | origin: origin2, 183 | } => { 184 | if origin2 != origin { 185 | warn!(actual = ?origin, expected = ?origin2, "finished sync origin does not match state") 186 | } 187 | Some(*start) 188 | } 189 | SyncState::Idle => { 190 | warn!("sync state finish called but not in running state"); 191 | None 192 | } 193 | }; 194 | 195 | self.last_sync = Some((Instant::now(), result)); 196 | self.state = SyncState::Idle; 197 | start.map(|s| (s, self.resync_requested)) 198 | } 199 | 200 | fn start_connect(&mut self, reason: SyncReason) -> bool { 201 | debug!(?reason, "start connect"); 202 | match self.state { 203 | // never run two syncs at the same time 204 | SyncState::Running { .. } => { 205 | debug!("abort connect: sync already running"); 206 | if matches!(reason, SyncReason::SyncReport) { 207 | debug!("resync queued"); 208 | self.resync_requested = true; 209 | } 210 | false 211 | } 212 | SyncState::Idle => { 213 | self.set_sync_running(Origin::Connect(reason)); 214 | true 215 | } 216 | } 217 | } 218 | 219 | fn accept_request(&mut self, me: &NodeId, node: &NodeId) -> AcceptOutcome { 220 | let outcome = match &self.state { 221 | SyncState::Idle => AcceptOutcome::Allow, 222 | SyncState::Running { origin, .. } => match origin { 223 | Origin::Accept => AcceptOutcome::Reject(AbortReason::AlreadySyncing), 224 | // Incoming sync request while we are dialing ourselves. 225 | // In this case, compare the binary representations of our and the other node's id 226 | // to deterministically decide which of the two concurrent connections will succeed. 227 | Origin::Connect(_reason) => match expected_sync_direction(me, node) { 228 | SyncDirection::Accept => AcceptOutcome::Allow, 229 | SyncDirection::Connect => AcceptOutcome::Reject(AbortReason::AlreadySyncing), 230 | }, 231 | }, 232 | }; 233 | if let AcceptOutcome::Allow = outcome { 234 | self.set_sync_running(Origin::Accept); 235 | } 236 | outcome 237 | } 238 | 239 | fn set_sync_running(&mut self, origin: Origin) { 240 | self.state = SyncState::Running { 241 | origin, 242 | start: SystemTime::now(), 243 | }; 244 | self.resync_requested = false; 245 | } 246 | } 247 | 248 | #[derive(Debug)] 249 | enum SyncDirection { 250 | Accept, 251 | Connect, 252 | } 253 | 254 | fn expected_sync_direction(self_node_id: &NodeId, other_node_id: &NodeId) -> SyncDirection { 255 | if self_node_id.as_bytes() > other_node_id.as_bytes() { 256 | SyncDirection::Accept 257 | } else { 258 | SyncDirection::Connect 259 | } 260 | } 261 | -------------------------------------------------------------------------------- /src/heads.rs: -------------------------------------------------------------------------------- 1 | //! Author heads 2 | 3 | use std::{collections::BTreeMap, num::NonZeroU64}; 4 | 5 | use anyhow::Result; 6 | 7 | use crate::AuthorId; 8 | 9 | type Timestamp = u64; 10 | 11 | /// Timestamps of the latest entry for each author. 12 | #[derive(Debug, Clone, Eq, PartialEq, Default)] 13 | pub struct AuthorHeads { 14 | heads: BTreeMap, 15 | } 16 | 17 | impl AuthorHeads { 18 | /// Insert a new timestamp. 19 | pub fn insert(&mut self, author: AuthorId, timestamp: Timestamp) { 20 | self.heads 21 | .entry(author) 22 | .and_modify(|t| *t = (*t).max(timestamp)) 23 | .or_insert(timestamp); 24 | } 25 | 26 | /// Number of author-timestamp pairs. 27 | pub fn len(&self) -> usize { 28 | self.heads.len() 29 | } 30 | 31 | /// Whether this [`AuthorHeads`] is empty. 32 | pub fn is_empty(&self) -> bool { 33 | self.heads.is_empty() 34 | } 35 | 36 | /// Get the timestamp for an author. 37 | pub fn get(&self, author: &AuthorId) -> Option { 38 | self.heads.get(author).copied() 39 | } 40 | 41 | /// Can this state offer newer stuff to `other`? 42 | pub fn has_news_for(&self, other: &Self) -> Option { 43 | let mut updates = 0; 44 | for (author, ts_ours) in self.iter() { 45 | if other 46 | .get(author) 47 | .map(|ts_theirs| *ts_ours > ts_theirs) 48 | .unwrap_or(true) 49 | { 50 | updates += 1; 51 | } 52 | } 53 | NonZeroU64::new(updates) 54 | } 55 | 56 | /// Merge another author head state into this one. 57 | pub fn merge(&mut self, other: &Self) { 58 | for (a, t) in other.iter() { 59 | self.insert(*a, *t); 60 | } 61 | } 62 | 63 | /// Create an iterator over the entries in this state. 64 | pub fn iter(&self) -> std::collections::btree_map::Iter { 65 | self.heads.iter() 66 | } 67 | 68 | /// Encode into a byte array with a limited size. 69 | /// 70 | /// Will skip oldest entries if the size limit is reached. 71 | /// Returns a byte array with a maximum length of `size_limit`. 72 | pub fn encode(&self, size_limit: Option) -> Result> { 73 | let mut by_timestamp = BTreeMap::new(); 74 | for (author, ts) in self.iter() { 75 | by_timestamp.insert(*ts, *author); 76 | } 77 | let mut items = Vec::new(); 78 | for (ts, author) in by_timestamp.into_iter().rev() { 79 | items.push((ts, author)); 80 | if let Some(size_limit) = size_limit { 81 | if postcard::experimental::serialized_size(&items)? > size_limit { 82 | items.pop(); 83 | break; 84 | } 85 | } 86 | } 87 | let encoded = postcard::to_stdvec(&items)?; 88 | debug_assert!(size_limit.map(|s| encoded.len() <= s).unwrap_or(true)); 89 | Ok(encoded) 90 | } 91 | 92 | /// Decode from byte slice created with [`Self::encode`]. 93 | pub fn decode(bytes: &[u8]) -> Result { 94 | let items: Vec<(Timestamp, AuthorId)> = postcard::from_bytes(bytes)?; 95 | let mut heads = AuthorHeads::default(); 96 | for (ts, author) in items { 97 | heads.insert(author, ts); 98 | } 99 | Ok(heads) 100 | } 101 | } 102 | 103 | impl FromIterator<(AuthorId, Timestamp)> for AuthorHeads { 104 | fn from_iter>(iter: T) -> Self { 105 | Self { 106 | heads: iter.into_iter().collect(), 107 | } 108 | } 109 | } 110 | 111 | impl FromIterator<(Timestamp, AuthorId)> for AuthorHeads { 112 | fn from_iter>(iter: T) -> Self { 113 | Self { 114 | heads: iter.into_iter().map(|(ts, author)| (author, ts)).collect(), 115 | } 116 | } 117 | } 118 | 119 | #[cfg(test)] 120 | mod tests { 121 | use super::*; 122 | use crate::Record; 123 | #[test] 124 | fn author_heads_encode_decode() -> Result<()> { 125 | let mut heads = AuthorHeads::default(); 126 | let start = Record::empty_current().timestamp(); 127 | for i in 0..10u64 { 128 | heads.insert(AuthorId::from(&[i as u8; 32]), start + i); 129 | } 130 | let encoded = heads.encode(Some(256))?; 131 | let decoded = AuthorHeads::decode(&encoded)?; 132 | assert_eq!(decoded.len(), 6); 133 | let expected: AuthorHeads = (0u64..6) 134 | .map(|n| (AuthorId::from(&[9 - n as u8; 32]), start + (9 - n))) 135 | .collect(); 136 | assert_eq!(expected, decoded); 137 | Ok(()) 138 | } 139 | 140 | #[test] 141 | fn author_heads_compare() -> Result<()> { 142 | let a = [ 143 | (AuthorId::from(&[0u8; 32]), 5), 144 | (AuthorId::from(&[1u8; 32]), 7), 145 | ]; 146 | let b = [ 147 | (AuthorId::from(&[0u8; 32]), 4), 148 | (AuthorId::from(&[1u8; 32]), 6), 149 | (AuthorId::from(&[2u8; 32]), 7), 150 | ]; 151 | let a: AuthorHeads = a.into_iter().collect(); 152 | let b: AuthorHeads = b.into_iter().collect(); 153 | assert_eq!(a.has_news_for(&b), NonZeroU64::new(2)); 154 | assert_eq!(b.has_news_for(&a), NonZeroU64::new(1)); 155 | Ok(()) 156 | } 157 | } 158 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | #![doc = include_str!("../README.md")] 2 | //! Multi-dimensional key-value documents with an efficient synchronization protocol 3 | //! 4 | //! The crate operates on [Replicas](Replica). A replica contains an unlimited number of 5 | //! [Entries][Entry]. Each entry is identified by a key, its author, and the replica's 6 | //! namespace. Its value is the [32-byte BLAKE3 hash](iroh_blobs::Hash) 7 | //! of the entry's content data, the size of this content data, and a timestamp. 8 | //! The content data itself is not stored or transferred through a replica. 9 | //! 10 | //! All entries in a replica are signed with two keypairs: 11 | //! 12 | //! * The [`NamespaceSecret`] key, as a token of write capability. The public key is the 13 | //! [`NamespaceId`], which also serves as the unique identifier for a replica. 14 | //! * The [Author] key, as a proof of authorship. Any number of authors may be created, and 15 | //! their semantic meaning is application-specific. The public key of an author is the [AuthorId]. 16 | //! 17 | //! Replicas can be synchronized between peers by exchanging messages. The synchronization algorithm 18 | //! is based on a technique called *range-based set reconciliation*, based on [this paper][paper] by 19 | //! Aljoscha Meyer: 20 | //! 21 | //! > Range-based set reconciliation is a simple approach to efficiently compute the union of two 22 | //! > sets over a network, based on recursively partitioning the sets and comparing fingerprints of 23 | //! > the partitions to probabilistically detect whether a partition requires further work. 24 | //! 25 | //! The crate exposes a [generic storage interface](store::Store). There is an implementation 26 | //! of this interface, [store::fs::Store], that can be used either 27 | //! [in-memory](store::fs::Store::memory) or in 28 | //! [persistent, file-based](store::fs::Store::persistent) mode. 29 | //! 30 | //! Both modes make use of [`redb`], an embedded key-value store. When used 31 | //! in-memory, the store is backed by a `Vec`. When used in persistent mode, 32 | //! the store is backed by a single file on disk. 33 | //! 34 | //! [paper]: https://arxiv.org/abs/2212.13567 35 | #![deny(missing_docs, rustdoc::broken_intra_doc_links)] 36 | #![cfg_attr(iroh_docsrs, feature(doc_auto_cfg))] 37 | 38 | pub mod metrics; 39 | #[cfg(feature = "net")] 40 | pub mod net; 41 | #[cfg(feature = "engine")] 42 | pub mod protocol; 43 | #[cfg(feature = "net")] 44 | mod ticket; 45 | 46 | #[cfg(feature = "engine")] 47 | pub mod engine; 48 | #[cfg(feature = "rpc")] 49 | pub mod rpc; 50 | 51 | #[cfg(feature = "cli")] 52 | pub mod cli; 53 | 54 | pub mod actor; 55 | pub mod store; 56 | pub mod sync; 57 | 58 | mod heads; 59 | mod keys; 60 | mod ranger; 61 | 62 | #[cfg(feature = "net")] 63 | #[doc(inline)] 64 | pub use net::ALPN; 65 | 66 | #[cfg(feature = "net")] 67 | pub use self::ticket::DocTicket; 68 | pub use self::{heads::*, keys::*, sync::*}; 69 | -------------------------------------------------------------------------------- /src/metrics.rs: -------------------------------------------------------------------------------- 1 | //! Metrics for iroh-docs 2 | 3 | use iroh_metrics::{Counter, MetricsGroup}; 4 | 5 | /// Metrics for iroh-docs 6 | #[derive(Debug, Default, MetricsGroup)] 7 | pub struct Metrics { 8 | /// Number of document entries added locally 9 | pub new_entries_local: Counter, 10 | /// Number of document entries added by peers 11 | pub new_entries_remote: Counter, 12 | /// Total size of entry contents added locally 13 | pub new_entries_local_size: Counter, 14 | /// Total size of entry contents added by peers 15 | pub new_entries_remote_size: Counter, 16 | /// Number of successful syncs (via accept) 17 | pub sync_via_accept_success: Counter, 18 | /// Number of failed syncs (via accept) 19 | pub sync_via_accept_failure: Counter, 20 | /// Number of successful syncs (via connect) 21 | pub sync_via_connect_success: Counter, 22 | /// Number of failed syncs (via connect) 23 | pub sync_via_connect_failure: Counter, 24 | 25 | /// Number of times the main actor loop ticked 26 | pub actor_tick_main: Counter, 27 | 28 | /// Number of times the gossip actor loop ticked 29 | pub doc_gossip_tick_main: Counter, 30 | /// Number of times the gossip actor processed an event 31 | pub doc_gossip_tick_event: Counter, 32 | /// Number of times the gossip actor processed an actor event 33 | pub doc_gossip_tick_actor: Counter, 34 | /// Number of times the gossip actor processed a pending join 35 | pub doc_gossip_tick_pending_join: Counter, 36 | 37 | /// Number of times the live actor loop ticked 38 | pub doc_live_tick_main: Counter, 39 | /// Number of times the live actor processed an actor event 40 | pub doc_live_tick_actor: Counter, 41 | /// Number of times the live actor processed a replica event 42 | pub doc_live_tick_replica_event: Counter, 43 | /// Number of times the live actor processed a running sync connect 44 | pub doc_live_tick_running_sync_connect: Counter, 45 | /// Number of times the live actor processed a running sync accept 46 | pub doc_live_tick_running_sync_accept: Counter, 47 | /// Number of times the live actor processed a pending download 48 | pub doc_live_tick_pending_downloads: Counter, 49 | } 50 | -------------------------------------------------------------------------------- /src/net.rs: -------------------------------------------------------------------------------- 1 | //! Network implementation of the iroh-docs protocol 2 | 3 | use std::{ 4 | future::Future, 5 | time::{Duration, Instant}, 6 | }; 7 | 8 | use iroh::{Endpoint, NodeAddr, PublicKey}; 9 | use serde::{Deserialize, Serialize}; 10 | use tracing::{debug, error_span, trace, Instrument}; 11 | 12 | use crate::{ 13 | actor::SyncHandle, 14 | metrics::Metrics, 15 | net::codec::{run_alice, BobState}, 16 | NamespaceId, SyncOutcome, 17 | }; 18 | 19 | /// The ALPN identifier for the iroh-docs protocol 20 | pub const ALPN: &[u8] = b"/iroh-sync/1"; 21 | 22 | mod codec; 23 | 24 | /// Connect to a peer and sync a replica 25 | pub async fn connect_and_sync( 26 | endpoint: &Endpoint, 27 | sync: &SyncHandle, 28 | namespace: NamespaceId, 29 | peer: NodeAddr, 30 | metrics: Option<&Metrics>, 31 | ) -> Result { 32 | let t_start = Instant::now(); 33 | let peer_id = peer.node_id; 34 | trace!("connect"); 35 | let connection = endpoint 36 | .connect(peer, crate::ALPN) 37 | .await 38 | .map_err(ConnectError::connect)?; 39 | 40 | let (mut send_stream, mut recv_stream) = 41 | connection.open_bi().await.map_err(ConnectError::connect)?; 42 | 43 | let t_connect = t_start.elapsed(); 44 | debug!(?t_connect, "connected"); 45 | 46 | let res = run_alice(&mut send_stream, &mut recv_stream, sync, namespace, peer_id).await; 47 | 48 | send_stream.finish().map_err(ConnectError::close)?; 49 | send_stream.stopped().await.map_err(ConnectError::close)?; 50 | recv_stream 51 | .read_to_end(0) 52 | .await 53 | .map_err(ConnectError::close)?; 54 | 55 | if let Some(metrics) = metrics { 56 | if res.is_ok() { 57 | metrics.sync_via_connect_success.inc(); 58 | } else { 59 | metrics.sync_via_connect_failure.inc(); 60 | } 61 | } 62 | 63 | let t_process = t_start.elapsed() - t_connect; 64 | match &res { 65 | Ok(res) => { 66 | debug!( 67 | ?t_connect, 68 | ?t_process, 69 | sent = %res.num_sent, 70 | recv = %res.num_recv, 71 | "done, ok" 72 | ); 73 | } 74 | Err(err) => { 75 | debug!(?t_connect, ?t_process, ?err, "done, failed"); 76 | } 77 | } 78 | 79 | let outcome = res?; 80 | 81 | let timings = Timings { 82 | connect: t_connect, 83 | process: t_process, 84 | }; 85 | 86 | let res = SyncFinished { 87 | namespace, 88 | peer: peer_id, 89 | outcome, 90 | timings, 91 | }; 92 | 93 | Ok(res) 94 | } 95 | 96 | /// Whether we want to accept or reject an incoming sync request. 97 | #[derive(Debug, Clone)] 98 | pub enum AcceptOutcome { 99 | /// Accept the sync request. 100 | Allow, 101 | /// Decline the sync request 102 | Reject(AbortReason), 103 | } 104 | 105 | /// Handle an iroh-docs connection and sync all shared documents in the replica store. 106 | pub async fn handle_connection( 107 | sync: SyncHandle, 108 | connection: iroh::endpoint::Connection, 109 | accept_cb: F, 110 | metrics: Option<&Metrics>, 111 | ) -> Result 112 | where 113 | F: Fn(NamespaceId, PublicKey) -> Fut, 114 | Fut: Future, 115 | { 116 | let t_start = Instant::now(); 117 | let peer = connection.remote_node_id().map_err(AcceptError::connect)?; 118 | let (mut send_stream, mut recv_stream) = connection 119 | .accept_bi() 120 | .await 121 | .map_err(|e| AcceptError::open(peer, e))?; 122 | 123 | let t_connect = t_start.elapsed(); 124 | let span = error_span!("accept", peer = %peer.fmt_short(), namespace = tracing::field::Empty); 125 | span.in_scope(|| { 126 | debug!(?t_connect, "connection established"); 127 | }); 128 | 129 | let mut state = BobState::new(peer); 130 | let res = state 131 | .run(&mut send_stream, &mut recv_stream, sync, accept_cb) 132 | .instrument(span.clone()) 133 | .await; 134 | 135 | if let Some(metrics) = metrics { 136 | if res.is_ok() { 137 | metrics.sync_via_accept_success.inc(); 138 | } else { 139 | metrics.sync_via_accept_failure.inc(); 140 | } 141 | } 142 | 143 | let namespace = state.namespace(); 144 | let outcome = state.into_outcome(); 145 | 146 | send_stream 147 | .finish() 148 | .map_err(|error| AcceptError::close(peer, namespace, error))?; 149 | send_stream 150 | .stopped() 151 | .await 152 | .map_err(|error| AcceptError::close(peer, namespace, error))?; 153 | recv_stream 154 | .read_to_end(0) 155 | .await 156 | .map_err(|error| AcceptError::close(peer, namespace, error))?; 157 | 158 | let t_process = t_start.elapsed() - t_connect; 159 | span.in_scope(|| match &res { 160 | Ok(_res) => { 161 | debug!( 162 | ?t_connect, 163 | ?t_process, 164 | sent = %outcome.num_sent, 165 | recv = %outcome.num_recv, 166 | "done, ok" 167 | ); 168 | } 169 | Err(err) => { 170 | debug!(?t_connect, ?t_process, ?err, "done, failed"); 171 | } 172 | }); 173 | 174 | let namespace = res?; 175 | 176 | let timings = Timings { 177 | connect: t_connect, 178 | process: t_process, 179 | }; 180 | let res = SyncFinished { 181 | namespace, 182 | outcome, 183 | peer, 184 | timings, 185 | }; 186 | 187 | Ok(res) 188 | } 189 | 190 | /// Details of a finished sync operation. 191 | #[derive(Debug, Clone)] 192 | pub struct SyncFinished { 193 | /// The namespace that was synced. 194 | pub namespace: NamespaceId, 195 | /// The peer we syned with. 196 | pub peer: PublicKey, 197 | /// The outcome of the sync operation 198 | pub outcome: SyncOutcome, 199 | /// The time this operation took 200 | pub timings: Timings, 201 | } 202 | 203 | /// Time a sync operation took 204 | #[derive(Debug, Default, Clone)] 205 | pub struct Timings { 206 | /// Time to establish connection 207 | pub connect: Duration, 208 | /// Time to run sync exchange 209 | pub process: Duration, 210 | } 211 | 212 | /// Errors that may occur on handling incoming sync connections. 213 | #[derive(thiserror::Error, Debug)] 214 | #[allow(missing_docs)] 215 | pub enum AcceptError { 216 | /// Failed to establish connection 217 | #[error("Failed to establish connection")] 218 | Connect { 219 | #[source] 220 | error: anyhow::Error, 221 | }, 222 | /// Failed to open replica 223 | #[error("Failed to open replica with {peer:?}")] 224 | Open { 225 | peer: PublicKey, 226 | #[source] 227 | error: anyhow::Error, 228 | }, 229 | /// We aborted the sync request. 230 | #[error("Aborted sync of {namespace:?} with {peer:?}: {reason:?}")] 231 | Abort { 232 | peer: PublicKey, 233 | namespace: NamespaceId, 234 | reason: AbortReason, 235 | }, 236 | /// Failed to run sync 237 | #[error("Failed to sync {namespace:?} with {peer:?}")] 238 | Sync { 239 | peer: PublicKey, 240 | namespace: Option, 241 | #[source] 242 | error: anyhow::Error, 243 | }, 244 | /// Failed to close 245 | #[error("Failed to close {namespace:?} with {peer:?}")] 246 | Close { 247 | peer: PublicKey, 248 | namespace: Option, 249 | #[source] 250 | error: anyhow::Error, 251 | }, 252 | } 253 | 254 | /// Errors that may occur on outgoing sync requests. 255 | #[derive(thiserror::Error, Debug)] 256 | #[allow(missing_docs)] 257 | pub enum ConnectError { 258 | /// Failed to establish connection 259 | #[error("Failed to establish connection")] 260 | Connect { 261 | #[source] 262 | error: anyhow::Error, 263 | }, 264 | /// The remote peer aborted the sync request. 265 | #[error("Remote peer aborted sync: {0:?}")] 266 | RemoteAbort(AbortReason), 267 | /// Failed to run sync 268 | #[error("Failed to sync")] 269 | Sync { 270 | #[source] 271 | error: anyhow::Error, 272 | }, 273 | /// Failed to close 274 | #[error("Failed to close connection1")] 275 | Close { 276 | #[source] 277 | error: anyhow::Error, 278 | }, 279 | } 280 | 281 | /// Reason why we aborted an incoming sync request. 282 | #[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq, Eq)] 283 | pub enum AbortReason { 284 | /// Namespace is not available. 285 | NotFound, 286 | /// We are already syncing this namespace. 287 | AlreadySyncing, 288 | /// We experienced an error while trying to provide the requested resource 289 | InternalServerError, 290 | } 291 | 292 | impl AcceptError { 293 | fn connect(error: impl Into) -> Self { 294 | Self::Connect { 295 | error: error.into(), 296 | } 297 | } 298 | fn open(peer: PublicKey, error: impl Into) -> Self { 299 | Self::Open { 300 | peer, 301 | error: error.into(), 302 | } 303 | } 304 | pub(crate) fn sync( 305 | peer: PublicKey, 306 | namespace: Option, 307 | error: impl Into, 308 | ) -> Self { 309 | Self::Sync { 310 | peer, 311 | namespace, 312 | error: error.into(), 313 | } 314 | } 315 | fn close( 316 | peer: PublicKey, 317 | namespace: Option, 318 | error: impl Into, 319 | ) -> Self { 320 | Self::Close { 321 | peer, 322 | namespace, 323 | error: error.into(), 324 | } 325 | } 326 | /// Get the peer's node ID (if available) 327 | pub fn peer(&self) -> Option { 328 | match self { 329 | AcceptError::Connect { .. } => None, 330 | AcceptError::Open { peer, .. } => Some(*peer), 331 | AcceptError::Sync { peer, .. } => Some(*peer), 332 | AcceptError::Close { peer, .. } => Some(*peer), 333 | AcceptError::Abort { peer, .. } => Some(*peer), 334 | } 335 | } 336 | 337 | /// Get the namespace (if available) 338 | pub fn namespace(&self) -> Option { 339 | match self { 340 | AcceptError::Connect { .. } => None, 341 | AcceptError::Open { .. } => None, 342 | AcceptError::Sync { namespace, .. } => namespace.to_owned(), 343 | AcceptError::Close { namespace, .. } => namespace.to_owned(), 344 | AcceptError::Abort { namespace, .. } => Some(*namespace), 345 | } 346 | } 347 | } 348 | 349 | impl ConnectError { 350 | fn connect(error: impl Into) -> Self { 351 | Self::Connect { 352 | error: error.into(), 353 | } 354 | } 355 | fn close(error: impl Into) -> Self { 356 | Self::Close { 357 | error: error.into(), 358 | } 359 | } 360 | pub(crate) fn sync(error: impl Into) -> Self { 361 | Self::Sync { 362 | error: error.into(), 363 | } 364 | } 365 | pub(crate) fn remote_abort(reason: AbortReason) -> Self { 366 | Self::RemoteAbort(reason) 367 | } 368 | } 369 | -------------------------------------------------------------------------------- /src/protocol.rs: -------------------------------------------------------------------------------- 1 | //! [`ProtocolHandler`] implementation for the docs [`Engine`]. 2 | 3 | use std::{path::PathBuf, sync::Arc}; 4 | 5 | use anyhow::Result; 6 | use futures_lite::future::Boxed as BoxedFuture; 7 | use iroh::{endpoint::Connection, protocol::ProtocolHandler}; 8 | use iroh_blobs::net_protocol::{Blobs, ProtectCb}; 9 | use iroh_gossip::net::Gossip; 10 | 11 | use crate::{ 12 | engine::{DefaultAuthorStorage, Engine}, 13 | store::Store, 14 | }; 15 | 16 | impl ProtocolHandler for Docs { 17 | fn accept(&self, conn: Connection) -> BoxedFuture> { 18 | let this = self.engine.clone(); 19 | Box::pin(async move { this.handle_connection(conn).await }) 20 | } 21 | 22 | fn shutdown(&self) -> BoxedFuture<()> { 23 | let this = self.engine.clone(); 24 | Box::pin(async move { 25 | if let Err(err) = this.shutdown().await { 26 | tracing::warn!("shutdown error: {:?}", err); 27 | } 28 | }) 29 | } 30 | } 31 | 32 | /// Docs protocol. 33 | #[derive(Debug, Clone)] 34 | pub struct Docs { 35 | engine: Arc>, 36 | #[cfg(feature = "rpc")] 37 | pub(crate) rpc_handler: Arc>, 38 | } 39 | 40 | impl Docs<()> { 41 | /// Create a new [`Builder`] for the docs protocol, using in memory replica and author storage. 42 | pub fn memory() -> Builder { 43 | Builder::default() 44 | } 45 | 46 | /// Create a new [`Builder`] for the docs protocol, using a persistent replica and author storage 47 | /// in the given directory. 48 | pub fn persistent(path: PathBuf) -> Builder { 49 | Builder { path: Some(path) } 50 | } 51 | } 52 | 53 | impl Docs { 54 | /// Get an in memory client to interact with the docs engine. 55 | #[cfg(feature = "rpc")] 56 | pub fn client(&self) -> &crate::rpc::client::docs::MemClient { 57 | &self 58 | .rpc_handler 59 | .get_or_init(|| crate::rpc::RpcHandler::new(self.engine.clone())) 60 | .client 61 | } 62 | 63 | /// Create a new docs protocol with the given engine. 64 | /// 65 | /// Note that usually you would use the [`Builder`] to create a new docs protocol. 66 | pub fn new(engine: Engine) -> Self { 67 | Self { 68 | engine: Arc::new(engine), 69 | #[cfg(feature = "rpc")] 70 | rpc_handler: Default::default(), 71 | } 72 | } 73 | 74 | /// Handle a docs request from the RPC server. 75 | #[cfg(feature = "rpc")] 76 | pub async fn handle_rpc_request< 77 | C: quic_rpc::server::ChannelTypes, 78 | >( 79 | self, 80 | msg: crate::rpc::proto::Request, 81 | chan: quic_rpc::server::RpcChannel, 82 | ) -> Result<(), quic_rpc::server::RpcServerError> { 83 | crate::rpc::Handler(self.engine.clone()) 84 | .handle_rpc_request(msg, chan) 85 | .await 86 | } 87 | 88 | /// Get the protect callback for the docs engine. 89 | pub fn protect_cb(&self) -> ProtectCb { 90 | self.engine.protect_cb() 91 | } 92 | } 93 | 94 | /// Builder for the docs protocol. 95 | #[derive(Debug, Default)] 96 | pub struct Builder { 97 | path: Option, 98 | } 99 | 100 | impl Builder { 101 | /// Build a [`Docs`] protocol given a [`Blobs`] and [`Gossip`] protocol. 102 | pub async fn spawn( 103 | self, 104 | blobs: &Blobs, 105 | gossip: &Gossip, 106 | ) -> anyhow::Result> { 107 | let replica_store = match self.path { 108 | Some(ref path) => Store::persistent(path.join("docs.redb"))?, 109 | None => Store::memory(), 110 | }; 111 | let author_store = match self.path { 112 | Some(ref path) => DefaultAuthorStorage::Persistent(path.join("default-author")), 113 | None => DefaultAuthorStorage::Mem, 114 | }; 115 | let engine = Engine::spawn( 116 | blobs.endpoint().clone(), 117 | gossip.clone(), 118 | replica_store, 119 | blobs.store().clone(), 120 | blobs.downloader().clone(), 121 | author_store, 122 | blobs.rt().clone(), 123 | ) 124 | .await?; 125 | Ok(Docs::new(engine)) 126 | } 127 | } 128 | -------------------------------------------------------------------------------- /src/rpc.rs: -------------------------------------------------------------------------------- 1 | //! Quic RPC implementation for docs. 2 | 3 | use std::{ops::Deref, sync::Arc}; 4 | 5 | use iroh::NodeAddr; 6 | use proto::{Request, RpcService}; 7 | use quic_rpc::{ 8 | server::{ChannelTypes, RpcChannel}, 9 | RpcClient, RpcServer, 10 | }; 11 | use serde::{Deserialize, Serialize}; 12 | use tokio_util::task::AbortOnDropHandle; 13 | 14 | use crate::engine::Engine; 15 | 16 | pub mod client; 17 | pub mod proto; 18 | 19 | mod docs_handle_request; 20 | 21 | type RpcError = serde_error::Error; 22 | type RpcResult = std::result::Result; 23 | 24 | #[derive(Debug, Clone)] 25 | pub(crate) struct Handler(pub(crate) Arc>); 26 | 27 | impl Deref for Handler { 28 | type Target = Engine; 29 | 30 | fn deref(&self) -> &Self::Target { 31 | &self.0 32 | } 33 | } 34 | 35 | impl Handler { 36 | /// Handle a docs request from the RPC server. 37 | pub async fn handle_rpc_request>( 38 | self, 39 | msg: Request, 40 | chan: RpcChannel, 41 | ) -> Result<(), quic_rpc::server::RpcServerError> { 42 | use Request::*; 43 | let this = self; 44 | match msg { 45 | Open(msg) => chan.rpc(msg, this, Self::doc_open).await, 46 | Close(msg) => chan.rpc(msg, this, Self::doc_close).await, 47 | Status(msg) => chan.rpc(msg, this, Self::doc_status).await, 48 | List(msg) => chan.server_streaming(msg, this, Self::doc_list).await, 49 | Create(msg) => chan.rpc(msg, this, Self::doc_create).await, 50 | Drop(msg) => chan.rpc(msg, this, Self::doc_drop).await, 51 | Import(msg) => chan.rpc(msg, this, Self::doc_import).await, 52 | Set(msg) => chan.rpc(msg, this, Self::doc_set).await, 53 | ImportFile(msg) => { 54 | chan.server_streaming(msg, this, Self::doc_import_file) 55 | .await 56 | } 57 | ExportFile(msg) => { 58 | chan.server_streaming(msg, this, Self::doc_export_file) 59 | .await 60 | } 61 | Del(msg) => chan.rpc(msg, this, Self::doc_del).await, 62 | SetHash(msg) => chan.rpc(msg, this, Self::doc_set_hash).await, 63 | Get(msg) => chan.server_streaming(msg, this, Self::doc_get_many).await, 64 | GetExact(msg) => chan.rpc(msg, this, Self::doc_get_exact).await, 65 | StartSync(msg) => chan.rpc(msg, this, Self::doc_start_sync).await, 66 | Leave(msg) => chan.rpc(msg, this, Self::doc_leave).await, 67 | Share(msg) => chan.rpc(msg, this, Self::doc_share).await, 68 | Subscribe(msg) => { 69 | chan.try_server_streaming(msg, this, Self::doc_subscribe) 70 | .await 71 | } 72 | SetDownloadPolicy(msg) => chan.rpc(msg, this, Self::doc_set_download_policy).await, 73 | GetDownloadPolicy(msg) => chan.rpc(msg, this, Self::doc_get_download_policy).await, 74 | GetSyncPeers(msg) => chan.rpc(msg, this, Self::doc_get_sync_peers).await, 75 | 76 | AuthorList(msg) => chan.server_streaming(msg, this, Self::author_list).await, 77 | AuthorCreate(msg) => chan.rpc(msg, this, Self::author_create).await, 78 | AuthorImport(msg) => chan.rpc(msg, this, Self::author_import).await, 79 | AuthorExport(msg) => chan.rpc(msg, this, Self::author_export).await, 80 | AuthorDelete(msg) => chan.rpc(msg, this, Self::author_delete).await, 81 | AuthorGetDefault(msg) => chan.rpc(msg, this, Self::author_default).await, 82 | AuthorSetDefault(msg) => chan.rpc(msg, this, Self::author_set_default).await, 83 | } 84 | } 85 | } 86 | 87 | #[derive(Debug)] 88 | pub(crate) struct RpcHandler { 89 | /// Client to hand out 90 | pub(crate) client: client::docs::MemClient, 91 | /// Handler task 92 | _handler: AbortOnDropHandle<()>, 93 | } 94 | 95 | impl RpcHandler { 96 | pub fn new(engine: Arc>) -> Self { 97 | let engine = Handler(engine); 98 | let (listener, connector) = quic_rpc::transport::flume::channel(1); 99 | let listener = RpcServer::new(listener); 100 | let client = client::docs::MemClient::new(RpcClient::new(connector)); 101 | let _handler = listener 102 | .spawn_accept_loop(move |req, chan| engine.clone().handle_rpc_request(req, chan)); 103 | Self { client, _handler } 104 | } 105 | } 106 | 107 | /// Options to configure what is included in a [`NodeAddr`]. 108 | #[derive( 109 | Copy, 110 | Clone, 111 | PartialEq, 112 | Eq, 113 | Default, 114 | Debug, 115 | derive_more::Display, 116 | derive_more::FromStr, 117 | Serialize, 118 | Deserialize, 119 | )] 120 | pub enum AddrInfoOptions { 121 | /// Only the Node ID is added. 122 | /// 123 | /// This usually means that iroh-dns discovery is used to find address information. 124 | #[default] 125 | Id, 126 | /// Includes the Node ID and both the relay URL, and the direct addresses. 127 | RelayAndAddresses, 128 | /// Includes the Node ID and the relay URL. 129 | Relay, 130 | /// Includes the Node ID and the direct addresses. 131 | Addresses, 132 | } 133 | 134 | impl AddrInfoOptions { 135 | /// Apply the options to the given address. 136 | pub fn apply( 137 | &self, 138 | NodeAddr { 139 | node_id, 140 | relay_url, 141 | direct_addresses, 142 | }: &NodeAddr, 143 | ) -> NodeAddr { 144 | match self { 145 | Self::Id => NodeAddr { 146 | node_id: *node_id, 147 | relay_url: None, 148 | direct_addresses: Default::default(), 149 | }, 150 | Self::Relay => NodeAddr { 151 | node_id: *node_id, 152 | relay_url: relay_url.clone(), 153 | direct_addresses: Default::default(), 154 | }, 155 | Self::Addresses => NodeAddr { 156 | node_id: *node_id, 157 | relay_url: None, 158 | direct_addresses: direct_addresses.clone(), 159 | }, 160 | Self::RelayAndAddresses => NodeAddr { 161 | node_id: *node_id, 162 | relay_url: relay_url.clone(), 163 | direct_addresses: direct_addresses.clone(), 164 | }, 165 | } 166 | } 167 | } 168 | -------------------------------------------------------------------------------- /src/rpc/client.rs: -------------------------------------------------------------------------------- 1 | //! RPC Client for docs and authors 2 | use anyhow::Result; 3 | use futures_util::{Stream, StreamExt}; 4 | 5 | pub mod authors; 6 | pub mod docs; 7 | 8 | fn flatten( 9 | s: impl Stream, E2>>, 10 | ) -> impl Stream> 11 | where 12 | E1: std::error::Error + Send + Sync + 'static, 13 | E2: std::error::Error + Send + Sync + 'static, 14 | { 15 | s.map(|res| match res { 16 | Ok(Ok(res)) => Ok(res), 17 | Ok(Err(err)) => Err(err.into()), 18 | Err(err) => Err(err.into()), 19 | }) 20 | } 21 | -------------------------------------------------------------------------------- /src/rpc/client/authors.rs: -------------------------------------------------------------------------------- 1 | //! API for document management. 2 | //! 3 | //! The main entry point is the [`Client`]. 4 | 5 | use anyhow::Result; 6 | use futures_lite::{Stream, StreamExt}; 7 | use quic_rpc::{client::BoxedConnector, Connector}; 8 | 9 | use super::flatten; 10 | #[doc(inline)] 11 | pub use crate::engine::{Origin, SyncEvent, SyncReason}; 12 | use crate::{ 13 | rpc::proto::{ 14 | AuthorCreateRequest, AuthorDeleteRequest, AuthorExportRequest, AuthorGetDefaultRequest, 15 | AuthorImportRequest, AuthorListRequest, AuthorSetDefaultRequest, RpcService, 16 | }, 17 | Author, AuthorId, 18 | }; 19 | 20 | /// Iroh docs client. 21 | #[derive(Debug, Clone)] 22 | #[repr(transparent)] 23 | pub struct Client> { 24 | pub(super) rpc: quic_rpc::RpcClient, 25 | } 26 | 27 | impl> Client { 28 | /// Creates a new docs client. 29 | pub fn new(rpc: quic_rpc::RpcClient) -> Self { 30 | Self { rpc } 31 | } 32 | 33 | /// Creates a new document author. 34 | /// 35 | /// You likely want to save the returned [`AuthorId`] somewhere so that you can use this author 36 | /// again. 37 | /// 38 | /// If you need only a single author, use [`Self::default`]. 39 | pub async fn create(&self) -> Result { 40 | let res = self.rpc.rpc(AuthorCreateRequest).await??; 41 | Ok(res.author_id) 42 | } 43 | 44 | /// Returns the default document author of this node. 45 | /// 46 | /// On persistent nodes, the author is created on first start and its public key is saved 47 | /// in the data directory. 48 | /// 49 | /// The default author can be set with [`Self::set_default`]. 50 | pub async fn default(&self) -> Result { 51 | let res = self.rpc.rpc(AuthorGetDefaultRequest).await??; 52 | Ok(res.author_id) 53 | } 54 | 55 | /// Sets the node-wide default author. 56 | /// 57 | /// If the author does not exist, an error is returned. 58 | /// 59 | /// On a persistent node, the author id will be saved to a file in the data directory and 60 | /// reloaded after a restart. 61 | pub async fn set_default(&self, author_id: AuthorId) -> Result<()> { 62 | self.rpc 63 | .rpc(AuthorSetDefaultRequest { author_id }) 64 | .await??; 65 | Ok(()) 66 | } 67 | 68 | /// Lists document authors for which we have a secret key. 69 | /// 70 | /// It's only possible to create writes from authors that we have the secret key of. 71 | pub async fn list(&self) -> Result>> { 72 | let stream = self.rpc.server_streaming(AuthorListRequest {}).await?; 73 | Ok(flatten(stream).map(|res| res.map(|res| res.author_id))) 74 | } 75 | 76 | /// Exports the given author. 77 | /// 78 | /// Warning: The [`Author`] struct contains sensitive data. 79 | pub async fn export(&self, author: AuthorId) -> Result> { 80 | let res = self.rpc.rpc(AuthorExportRequest { author }).await??; 81 | Ok(res.author) 82 | } 83 | 84 | /// Imports the given author. 85 | /// 86 | /// Warning: The [`Author`] struct contains sensitive data. 87 | pub async fn import(&self, author: Author) -> Result<()> { 88 | self.rpc.rpc(AuthorImportRequest { author }).await??; 89 | Ok(()) 90 | } 91 | 92 | /// Deletes the given author by id. 93 | /// 94 | /// Warning: This permanently removes this author. 95 | /// 96 | /// Returns an error if attempting to delete the default author. 97 | pub async fn delete(&self, author: AuthorId) -> Result<()> { 98 | self.rpc.rpc(AuthorDeleteRequest { author }).await??; 99 | Ok(()) 100 | } 101 | } 102 | -------------------------------------------------------------------------------- /src/store.rs: -------------------------------------------------------------------------------- 1 | //! Storage trait and implementation for iroh-docs documents 2 | use std::num::NonZeroUsize; 3 | 4 | use anyhow::Result; 5 | use bytes::Bytes; 6 | use serde::{Deserialize, Serialize}; 7 | 8 | use crate::{AuthorId, Entry, NamespaceId}; 9 | 10 | pub mod fs; 11 | mod pubkeys; 12 | mod util; 13 | pub use fs::Store; 14 | pub use pubkeys::*; 15 | 16 | /// Number of peers to cache per document. 17 | pub(crate) const PEERS_PER_DOC_CACHE_SIZE: NonZeroUsize = match NonZeroUsize::new(5) { 18 | Some(val) => val, 19 | None => panic!("this is clearly non zero"), 20 | }; 21 | 22 | /// Error return from [`Store::open_replica`] 23 | #[derive(Debug, thiserror::Error)] 24 | pub enum OpenError { 25 | /// The replica does not exist. 26 | #[error("Replica not found")] 27 | NotFound, 28 | /// Other error while opening the replica. 29 | #[error("{0}")] 30 | Other(#[from] anyhow::Error), 31 | } 32 | 33 | /// Store that gives read access to download policies for a document. 34 | pub trait DownloadPolicyStore { 35 | /// Get the download policy for a document. 36 | fn get_download_policy(&mut self, namespace: &NamespaceId) -> Result; 37 | } 38 | 39 | impl DownloadPolicyStore for &mut T { 40 | fn get_download_policy(&mut self, namespace: &NamespaceId) -> Result { 41 | DownloadPolicyStore::get_download_policy(*self, namespace) 42 | } 43 | } 44 | 45 | impl DownloadPolicyStore for crate::store::Store { 46 | fn get_download_policy(&mut self, namespace: &NamespaceId) -> Result { 47 | self.get_download_policy(namespace) 48 | } 49 | } 50 | 51 | /// Outcome of [`Store::import_namespace`] 52 | #[derive(Debug, Clone, Copy)] 53 | pub enum ImportNamespaceOutcome { 54 | /// The namespace did not exist before and is now inserted. 55 | Inserted, 56 | /// The namespace existed and now has an upgraded capability. 57 | Upgraded, 58 | /// The namespace existed and its capability remains unchanged. 59 | NoChange, 60 | } 61 | 62 | /// Download policy to decide which content blobs shall be downloaded. 63 | #[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] 64 | pub enum DownloadPolicy { 65 | /// Do not download any key unless it matches one of the filters. 66 | NothingExcept(Vec), 67 | /// Download every key unless it matches one of the filters. 68 | EverythingExcept(Vec), 69 | } 70 | 71 | impl Default for DownloadPolicy { 72 | fn default() -> Self { 73 | DownloadPolicy::EverythingExcept(Vec::default()) 74 | } 75 | } 76 | 77 | /// Filter strategy used in download policies. 78 | #[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] 79 | pub enum FilterKind { 80 | /// Matches if the contained bytes are a prefix of the key. 81 | Prefix(Bytes), 82 | /// Matches if the contained bytes and the key are the same. 83 | Exact(Bytes), 84 | } 85 | 86 | impl std::fmt::Display for FilterKind { 87 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 88 | // hardly usable but good enough as a poc 89 | let (kind, bytes) = match self { 90 | FilterKind::Prefix(bytes) => ("prefix", bytes), 91 | FilterKind::Exact(bytes) => ("exact", bytes), 92 | }; 93 | let (encoding, repr) = match String::from_utf8(bytes.to_vec()) { 94 | Ok(repr) => ("utf8", repr), 95 | Err(_) => ("hex", hex::encode(bytes)), 96 | }; 97 | write!(f, "{kind}:{encoding}:{repr}") 98 | } 99 | } 100 | 101 | impl std::str::FromStr for FilterKind { 102 | type Err = anyhow::Error; 103 | 104 | fn from_str(s: &str) -> std::result::Result { 105 | let Some((kind, rest)) = s.split_once(':') else { 106 | anyhow::bail!("missing filter kind, either \"prefix:\" or \"exact:\"") 107 | }; 108 | let Some((encoding, rest)) = rest.split_once(':') else { 109 | anyhow::bail!("missing encoding: either \"hex:\" or \"utf8:\"") 110 | }; 111 | 112 | let is_exact = match kind { 113 | "exact" => true, 114 | "prefix" => false, 115 | other => { 116 | anyhow::bail!("expected filter kind \"prefix:\" or \"exact:\", found {other}") 117 | } 118 | }; 119 | 120 | let decoded = match encoding { 121 | "utf8" => Bytes::from(rest.to_owned()), 122 | "hex" => match hex::decode(rest) { 123 | Ok(bytes) => Bytes::from(bytes), 124 | Err(_) => anyhow::bail!("failed to decode hex"), 125 | }, 126 | other => { 127 | anyhow::bail!("expected encoding: either \"hex:\" or \"utf8:\", found {other}") 128 | } 129 | }; 130 | 131 | if is_exact { 132 | Ok(FilterKind::Exact(decoded)) 133 | } else { 134 | Ok(FilterKind::Prefix(decoded)) 135 | } 136 | } 137 | } 138 | 139 | impl FilterKind { 140 | /// Verifies whether this filter matches a given key 141 | pub fn matches(&self, key: impl AsRef<[u8]>) -> bool { 142 | match self { 143 | FilterKind::Prefix(prefix) => key.as_ref().starts_with(prefix), 144 | FilterKind::Exact(expected) => expected == key.as_ref(), 145 | } 146 | } 147 | } 148 | 149 | impl DownloadPolicy { 150 | /// Check if an entry should be downloaded according to this policy. 151 | pub fn matches(&self, entry: &Entry) -> bool { 152 | let key = entry.key(); 153 | match self { 154 | DownloadPolicy::NothingExcept(patterns) => { 155 | patterns.iter().any(|pattern| pattern.matches(key)) 156 | } 157 | DownloadPolicy::EverythingExcept(patterns) => { 158 | patterns.iter().all(|pattern| !pattern.matches(key)) 159 | } 160 | } 161 | } 162 | } 163 | 164 | /// A query builder for document queries. 165 | #[derive(Debug, Default)] 166 | pub struct QueryBuilder { 167 | kind: K, 168 | filter_author: AuthorFilter, 169 | filter_key: KeyFilter, 170 | limit: Option, 171 | offset: u64, 172 | include_empty: bool, 173 | sort_direction: SortDirection, 174 | } 175 | 176 | impl QueryBuilder { 177 | /// Call to include empty entries (deletion markers). 178 | pub fn include_empty(mut self) -> Self { 179 | self.include_empty = true; 180 | self 181 | } 182 | /// Filter by exact key match. 183 | pub fn key_exact(mut self, key: impl AsRef<[u8]>) -> Self { 184 | self.filter_key = KeyFilter::Exact(key.as_ref().to_vec().into()); 185 | self 186 | } 187 | /// Filter by key prefix. 188 | pub fn key_prefix(mut self, key: impl AsRef<[u8]>) -> Self { 189 | self.filter_key = KeyFilter::Prefix(key.as_ref().to_vec().into()); 190 | self 191 | } 192 | /// Filter by author. 193 | pub fn author(mut self, author: AuthorId) -> Self { 194 | self.filter_author = AuthorFilter::Exact(author); 195 | self 196 | } 197 | /// Set the maximum number of entries to be returned. 198 | pub fn limit(mut self, limit: u64) -> Self { 199 | self.limit = Some(limit); 200 | self 201 | } 202 | /// Set the offset within the result set from where to start returning results. 203 | pub fn offset(mut self, offset: u64) -> Self { 204 | self.offset = offset; 205 | self 206 | } 207 | } 208 | 209 | /// Query on all entries without aggregation. 210 | #[derive(Debug, Clone, Default, Serialize, Deserialize)] 211 | pub struct FlatQuery { 212 | sort_by: SortBy, 213 | } 214 | 215 | /// Query that only returns the latest entry for a key which has entries from multiple authors. 216 | #[derive(Debug, Clone, Default, Serialize, Deserialize)] 217 | pub struct SingleLatestPerKeyQuery {} 218 | 219 | impl QueryBuilder { 220 | /// Set the sort for the query. 221 | /// 222 | /// The default is to sort by author, then by key, in ascending order. 223 | pub fn sort_by(mut self, sort_by: SortBy, direction: SortDirection) -> Self { 224 | self.kind.sort_by = sort_by; 225 | self.sort_direction = direction; 226 | self 227 | } 228 | 229 | /// Build the query. 230 | pub fn build(self) -> Query { 231 | Query::from(self) 232 | } 233 | } 234 | 235 | impl QueryBuilder { 236 | /// Set the order direction for the query. 237 | /// 238 | /// Ordering is always by key for this query type. 239 | /// Default direction is ascending. 240 | pub fn sort_direction(mut self, direction: SortDirection) -> Self { 241 | self.sort_direction = direction; 242 | self 243 | } 244 | 245 | /// Build the query. 246 | pub fn build(self) -> Query { 247 | Query::from(self) 248 | } 249 | } 250 | 251 | impl From> for Query { 252 | fn from(builder: QueryBuilder) -> Query { 253 | Query { 254 | kind: QueryKind::SingleLatestPerKey(builder.kind), 255 | filter_author: builder.filter_author, 256 | filter_key: builder.filter_key, 257 | limit: builder.limit, 258 | offset: builder.offset, 259 | include_empty: builder.include_empty, 260 | sort_direction: builder.sort_direction, 261 | } 262 | } 263 | } 264 | 265 | impl From> for Query { 266 | fn from(builder: QueryBuilder) -> Query { 267 | Query { 268 | kind: QueryKind::Flat(builder.kind), 269 | filter_author: builder.filter_author, 270 | filter_key: builder.filter_key, 271 | limit: builder.limit, 272 | offset: builder.offset, 273 | include_empty: builder.include_empty, 274 | sort_direction: builder.sort_direction, 275 | } 276 | } 277 | } 278 | 279 | /// Note: When using the `SingleLatestPerKey` query kind, the key filter is applied *before* the 280 | /// grouping, the author filter is applied *after* the grouping. 281 | #[derive(Debug, Clone, Serialize, Deserialize)] 282 | pub struct Query { 283 | kind: QueryKind, 284 | filter_author: AuthorFilter, 285 | filter_key: KeyFilter, 286 | limit: Option, 287 | offset: u64, 288 | include_empty: bool, 289 | sort_direction: SortDirection, 290 | } 291 | 292 | impl Query { 293 | /// Query all records. 294 | pub fn all() -> QueryBuilder { 295 | Default::default() 296 | } 297 | /// Query only the latest entry for each key, omitting older entries if the entry was written 298 | /// to by multiple authors. 299 | pub fn single_latest_per_key() -> QueryBuilder { 300 | Default::default() 301 | } 302 | 303 | /// Create a [`Query::all`] query filtered by a single author. 304 | pub fn author(author: AuthorId) -> QueryBuilder { 305 | Self::all().author(author) 306 | } 307 | 308 | /// Create a [`Query::all`] query filtered by a single key. 309 | pub fn key_exact(key: impl AsRef<[u8]>) -> QueryBuilder { 310 | Self::all().key_exact(key) 311 | } 312 | 313 | /// Create a [`Query::all`] query filtered by a key prefix. 314 | pub fn key_prefix(prefix: impl AsRef<[u8]>) -> QueryBuilder { 315 | Self::all().key_prefix(prefix) 316 | } 317 | 318 | /// Get the limit for this query (max. number of entries to emit). 319 | pub fn limit(&self) -> Option { 320 | self.limit 321 | } 322 | 323 | /// Get the offset for this query (number of entries to skip at the beginning). 324 | pub fn offset(&self) -> u64 { 325 | self.offset 326 | } 327 | } 328 | 329 | /// Sort direction 330 | #[derive(Debug, Clone, Copy, Default, Serialize, Deserialize)] 331 | pub enum SortDirection { 332 | /// Sort ascending 333 | #[default] 334 | Asc, 335 | /// Sort descending 336 | Desc, 337 | } 338 | 339 | #[derive(derive_more::Debug, Clone, Serialize, Deserialize)] 340 | enum QueryKind { 341 | #[debug("Flat {{ sort_by: {:?}}}", _0)] 342 | Flat(FlatQuery), 343 | #[debug("SingleLatestPerKey")] 344 | SingleLatestPerKey(SingleLatestPerKeyQuery), 345 | } 346 | 347 | /// Fields by which the query can be sorted 348 | #[derive(Debug, Clone, Copy, Default, Serialize, Deserialize)] 349 | pub enum SortBy { 350 | /// Sort by key, then author. 351 | KeyAuthor, 352 | /// Sort by author, then key. 353 | #[default] 354 | AuthorKey, 355 | } 356 | 357 | /// Key matching. 358 | #[derive(Debug, Serialize, Deserialize, Clone, Default, Eq, PartialEq)] 359 | pub enum KeyFilter { 360 | /// Matches any key. 361 | #[default] 362 | Any, 363 | /// Only keys that are exactly the provided value. 364 | Exact(Bytes), 365 | /// All keys that start with the provided value. 366 | Prefix(Bytes), 367 | } 368 | 369 | impl> From for KeyFilter { 370 | fn from(value: T) -> Self { 371 | KeyFilter::Exact(Bytes::copy_from_slice(value.as_ref())) 372 | } 373 | } 374 | 375 | impl KeyFilter { 376 | /// Test if a key is matched by this [`KeyFilter`]. 377 | pub fn matches(&self, key: &[u8]) -> bool { 378 | match self { 379 | Self::Any => true, 380 | Self::Exact(k) => &k[..] == key, 381 | Self::Prefix(p) => key.starts_with(p), 382 | } 383 | } 384 | } 385 | 386 | /// Author matching. 387 | #[derive(Debug, Serialize, Deserialize, Clone, Default, Eq, PartialEq)] 388 | pub enum AuthorFilter { 389 | /// Matches any author. 390 | #[default] 391 | Any, 392 | /// Matches exactly the provided author. 393 | Exact(AuthorId), 394 | } 395 | 396 | impl AuthorFilter { 397 | /// Test if an author is matched by this [`AuthorFilter`]. 398 | pub fn matches(&self, author: &AuthorId) -> bool { 399 | match self { 400 | Self::Any => true, 401 | Self::Exact(a) => a == author, 402 | } 403 | } 404 | } 405 | 406 | impl From for AuthorFilter { 407 | fn from(value: AuthorId) -> Self { 408 | AuthorFilter::Exact(value) 409 | } 410 | } 411 | 412 | #[cfg(test)] 413 | mod tests { 414 | use super::*; 415 | 416 | #[test] 417 | fn test_filter_kind_encode_decode() { 418 | const REPR: &str = "prefix:utf8:memes/futurama"; 419 | let filter: FilterKind = REPR.parse().expect("should decode"); 420 | assert_eq!( 421 | filter, 422 | FilterKind::Prefix(Bytes::from(String::from("memes/futurama"))) 423 | ); 424 | assert_eq!(filter.to_string(), REPR) 425 | } 426 | } 427 | -------------------------------------------------------------------------------- /src/store/fs/bounds.rs: -------------------------------------------------------------------------------- 1 | use std::ops::{Bound, RangeBounds}; 2 | 3 | use bytes::Bytes; 4 | 5 | use super::tables::{RecordsByKeyId, RecordsByKeyIdOwned, RecordsId, RecordsIdOwned}; 6 | use crate::{store::KeyFilter, AuthorId, NamespaceId}; 7 | 8 | /// Bounds on the records table. 9 | /// 10 | /// Supports bounds by author, key 11 | pub struct RecordsBounds(Bound, Bound); 12 | 13 | impl RecordsBounds { 14 | pub fn new(start: Bound, end: Bound) -> Self { 15 | Self(start, end) 16 | } 17 | 18 | pub fn author_key(ns: NamespaceId, author: AuthorId, key_matcher: KeyFilter) -> Self { 19 | let key_is_exact = matches!(key_matcher, KeyFilter::Exact(_)); 20 | let key = match key_matcher { 21 | KeyFilter::Any => Bytes::new(), 22 | KeyFilter::Exact(key) => key, 23 | KeyFilter::Prefix(prefix) => prefix, 24 | }; 25 | let author = author.to_bytes(); 26 | let ns = ns.to_bytes(); 27 | let mut author_end = author; 28 | let mut ns_end = ns; 29 | let mut key_end = key.to_vec(); 30 | 31 | let start = (ns, author, key); 32 | 33 | let end = if key_is_exact { 34 | Bound::Included(start.clone()) 35 | } else if increment_by_one(&mut key_end) { 36 | Bound::Excluded((ns, author, key_end.into())) 37 | } else if increment_by_one(&mut author_end) { 38 | Bound::Excluded((ns, author_end, Bytes::new())) 39 | } else if increment_by_one(&mut ns_end) { 40 | Bound::Excluded((ns_end, [0u8; 32], Bytes::new())) 41 | } else { 42 | Bound::Unbounded 43 | }; 44 | 45 | Self(Bound::Included(start), end) 46 | } 47 | 48 | pub fn author_prefix(ns: NamespaceId, author: AuthorId, prefix: Bytes) -> Self { 49 | RecordsBounds::author_key(ns, author, KeyFilter::Prefix(prefix)) 50 | } 51 | 52 | pub fn namespace(ns: NamespaceId) -> Self { 53 | Self::new(Self::namespace_start(&ns), Self::namespace_end(&ns)) 54 | } 55 | 56 | pub fn from_start(ns: &NamespaceId, end: Bound) -> Self { 57 | Self::new(Self::namespace_start(ns), end) 58 | } 59 | 60 | pub fn to_end(ns: &NamespaceId, start: Bound) -> Self { 61 | Self::new(start, Self::namespace_end(ns)) 62 | } 63 | 64 | pub fn as_ref(&self) -> (Bound, Bound) { 65 | fn map(id: &RecordsIdOwned) -> RecordsId { 66 | (&id.0, &id.1, &id.2[..]) 67 | } 68 | (map_bound(&self.0, map), map_bound(&self.1, map)) 69 | } 70 | 71 | fn namespace_start(namespace: &NamespaceId) -> Bound { 72 | Bound::Included((namespace.to_bytes(), [0u8; 32], Bytes::new())) 73 | } 74 | 75 | fn namespace_end(namespace: &NamespaceId) -> Bound { 76 | let mut ns_end = namespace.to_bytes(); 77 | if increment_by_one(&mut ns_end) { 78 | Bound::Excluded((ns_end, [0u8; 32], Bytes::new())) 79 | } else { 80 | Bound::Unbounded 81 | } 82 | } 83 | } 84 | 85 | impl RangeBounds for RecordsBounds { 86 | fn start_bound(&self) -> Bound<&RecordsIdOwned> { 87 | map_bound(&self.0, |s| s) 88 | } 89 | 90 | fn end_bound(&self) -> Bound<&RecordsIdOwned> { 91 | map_bound(&self.1, |s| s) 92 | } 93 | } 94 | 95 | impl From<(Bound, Bound)> for RecordsBounds { 96 | fn from(value: (Bound, Bound)) -> Self { 97 | Self::new(value.0, value.1) 98 | } 99 | } 100 | 101 | /// Bounds for the by-key index table. 102 | /// 103 | /// Supports bounds by key. 104 | pub struct ByKeyBounds(Bound, Bound); 105 | impl ByKeyBounds { 106 | pub fn new(ns: NamespaceId, matcher: &KeyFilter) -> Self { 107 | match matcher { 108 | KeyFilter::Any => Self::namespace(ns), 109 | KeyFilter::Exact(key) => { 110 | let start = (ns.to_bytes(), key.clone(), [0u8; 32]); 111 | let end = (ns.to_bytes(), key.clone(), [255u8; 32]); 112 | Self(Bound::Included(start), Bound::Included(end)) 113 | } 114 | KeyFilter::Prefix(ref prefix) => { 115 | let start = Bound::Included((ns.to_bytes(), prefix.clone(), [0u8; 32])); 116 | 117 | let mut ns_end = ns.to_bytes(); 118 | let mut key_end = prefix.to_vec(); 119 | let end = if increment_by_one(&mut key_end) { 120 | Bound::Excluded((ns.to_bytes(), key_end.into(), [0u8; 32])) 121 | } else if increment_by_one(&mut ns_end) { 122 | Bound::Excluded((ns_end, Bytes::new(), [0u8; 32])) 123 | } else { 124 | Bound::Unbounded 125 | }; 126 | Self(start, end) 127 | } 128 | } 129 | } 130 | 131 | pub fn namespace(ns: NamespaceId) -> Self { 132 | let start = Bound::Included((ns.to_bytes(), Bytes::new(), [0u8; 32])); 133 | let mut ns_end = ns.to_bytes(); 134 | let end = if increment_by_one(&mut ns_end) { 135 | Bound::Excluded((ns_end, Bytes::new(), [0u8; 32])) 136 | } else { 137 | Bound::Unbounded 138 | }; 139 | Self(start, end) 140 | } 141 | 142 | pub fn as_ref(&self) -> (Bound, Bound) { 143 | fn map(id: &RecordsByKeyIdOwned) -> RecordsByKeyId { 144 | (&id.0, &id.1[..], &id.2) 145 | } 146 | (map_bound(&self.0, map), map_bound(&self.1, map)) 147 | } 148 | } 149 | 150 | impl RangeBounds for ByKeyBounds { 151 | fn start_bound(&self) -> Bound<&RecordsByKeyIdOwned> { 152 | map_bound(&self.0, |s| s) 153 | } 154 | 155 | fn end_bound(&self) -> Bound<&RecordsByKeyIdOwned> { 156 | map_bound(&self.1, |s| s) 157 | } 158 | } 159 | 160 | /// Increment a byte string by one, by incrementing the last byte that is not 255 by one. 161 | /// 162 | /// Returns false if all bytes are 255. 163 | fn increment_by_one(value: &mut [u8]) -> bool { 164 | for char in value.iter_mut().rev() { 165 | if *char != 255 { 166 | *char += 1; 167 | return true; 168 | } else { 169 | *char = 0; 170 | } 171 | } 172 | false 173 | } 174 | 175 | fn map_bound<'a, T, U: 'a>(bound: &'a Bound, f: impl Fn(&'a T) -> U) -> Bound { 176 | match bound { 177 | Bound::Unbounded => Bound::Unbounded, 178 | Bound::Included(t) => Bound::Included(f(t)), 179 | Bound::Excluded(t) => Bound::Excluded(f(t)), 180 | } 181 | } 182 | 183 | #[cfg(test)] 184 | mod tests { 185 | use super::*; 186 | 187 | #[test] 188 | fn records_bounds() { 189 | let ns = NamespaceId::from(&[255u8; 32]); 190 | 191 | let bounds = RecordsBounds::namespace(ns); 192 | assert_eq!( 193 | bounds.start_bound(), 194 | Bound::Included(&(ns.to_bytes(), [0u8; 32], Bytes::new())) 195 | ); 196 | assert_eq!(bounds.end_bound(), Bound::Unbounded); 197 | 198 | let a = AuthorId::from(&[255u8; 32]); 199 | 200 | let bounds = RecordsBounds::author_key(ns, a, KeyFilter::Any); 201 | assert_eq!( 202 | bounds.start_bound(), 203 | Bound::Included(&(ns.to_bytes(), a.to_bytes(), Bytes::new())) 204 | ); 205 | assert_eq!(bounds.end_bound(), Bound::Unbounded); 206 | 207 | let a = AuthorId::from(&[0u8; 32]); 208 | let mut a_end = a.to_bytes(); 209 | a_end[31] = 1; 210 | let bounds = RecordsBounds::author_key(ns, a, KeyFilter::Any); 211 | assert_eq!( 212 | bounds.end_bound(), 213 | Bound::Excluded(&(ns.to_bytes(), a_end, Default::default())) 214 | ); 215 | 216 | let bounds = RecordsBounds::author_key(ns, a, KeyFilter::Prefix(vec![1u8].into())); 217 | assert_eq!( 218 | bounds.start_bound(), 219 | Bound::Included(&(ns.to_bytes(), a.to_bytes(), vec![1u8].into())) 220 | ); 221 | assert_eq!( 222 | bounds.end_bound(), 223 | Bound::Excluded(&(ns.to_bytes(), a.to_bytes(), vec![2u8].into())) 224 | ); 225 | 226 | let bounds = RecordsBounds::author_key(ns, a, KeyFilter::Exact(vec![1u8].into())); 227 | assert_eq!( 228 | bounds.start_bound(), 229 | Bound::Included(&(ns.to_bytes(), a.to_bytes(), vec![1u8].into())) 230 | ); 231 | assert_eq!( 232 | bounds.end_bound(), 233 | Bound::Included(&(ns.to_bytes(), a.to_bytes(), vec![1u8].into())) 234 | ); 235 | } 236 | 237 | #[test] 238 | fn by_key_bounds() { 239 | let ns = NamespaceId::from(&[255u8; 32]); 240 | 241 | let bounds = ByKeyBounds::namespace(ns); 242 | assert_eq!( 243 | bounds.start_bound(), 244 | Bound::Included(&(ns.to_bytes(), Bytes::new(), [0u8; 32])) 245 | ); 246 | assert_eq!(bounds.end_bound(), Bound::Unbounded); 247 | 248 | let bounds = ByKeyBounds::new(ns, &KeyFilter::Any); 249 | assert_eq!( 250 | bounds.start_bound(), 251 | Bound::Included(&(ns.to_bytes(), Bytes::new(), [0u8; 32])) 252 | ); 253 | assert_eq!(bounds.end_bound(), Bound::Unbounded); 254 | 255 | let bounds = ByKeyBounds::new(ns, &KeyFilter::Prefix(vec![1u8].into())); 256 | assert_eq!( 257 | bounds.start_bound(), 258 | Bound::Included(&(ns.to_bytes(), vec![1u8].into(), [0u8; 32])) 259 | ); 260 | assert_eq!( 261 | bounds.end_bound(), 262 | Bound::Excluded(&(ns.to_bytes(), vec![2u8].into(), [0u8; 32])) 263 | ); 264 | 265 | let bounds = ByKeyBounds::new(ns, &KeyFilter::Prefix(vec![255u8].into())); 266 | assert_eq!( 267 | bounds.start_bound(), 268 | Bound::Included(&(ns.to_bytes(), vec![255u8].into(), [0u8; 32])) 269 | ); 270 | assert_eq!(bounds.end_bound(), Bound::Unbounded); 271 | 272 | let ns = NamespaceId::from(&[2u8; 32]); 273 | let mut ns_end = ns.to_bytes(); 274 | ns_end[31] = 3u8; 275 | let bounds = ByKeyBounds::new(ns, &KeyFilter::Prefix(vec![255u8].into())); 276 | assert_eq!( 277 | bounds.start_bound(), 278 | Bound::Included(&(ns.to_bytes(), vec![255u8].into(), [0u8; 32])) 279 | ); 280 | assert_eq!( 281 | bounds.end_bound(), 282 | Bound::Excluded(&(ns_end, Bytes::new(), [0u8; 32])) 283 | ); 284 | 285 | let bounds = ByKeyBounds::new(ns, &KeyFilter::Exact(vec![1u8].into())); 286 | assert_eq!( 287 | bounds.start_bound(), 288 | Bound::Included(&(ns.to_bytes(), vec![1u8].into(), [0u8; 32])) 289 | ); 290 | assert_eq!( 291 | bounds.end_bound(), 292 | Bound::Included(&(ns.to_bytes(), vec![1u8].into(), [255u8; 32])) 293 | ); 294 | } 295 | } 296 | -------------------------------------------------------------------------------- /src/store/fs/migrate_v1_v2.rs: -------------------------------------------------------------------------------- 1 | use std::path::{Path, PathBuf}; 2 | 3 | use anyhow::Result; 4 | use redb::{MultimapTableHandle, TableHandle}; 5 | use redb_v1::{ReadableMultimapTable, ReadableTable}; 6 | use tempfile::NamedTempFile; 7 | use tracing::info; 8 | 9 | macro_rules! migrate_table { 10 | ($rtx:expr, $wtx:expr, $old:expr, $new:expr) => {{ 11 | let old_table = $rtx.open_table($old)?; 12 | let mut new_table = $wtx.open_table($new)?; 13 | let name = $new.name(); 14 | let len = old_table.len()?; 15 | info!("migrate {name} ({len} rows).."); 16 | let ind = (len as usize / 1000) + 1; 17 | for (i, entry) in old_table.iter()?.enumerate() { 18 | let (key, value) = entry?; 19 | let key = key.value(); 20 | let value = value.value(); 21 | if i > 0 && i % 1000 == 0 { 22 | info!(" {name} {i:>ind$}/{len}"); 23 | } 24 | new_table.insert(key, value)?; 25 | } 26 | info!("migrate {name} done"); 27 | }}; 28 | } 29 | 30 | macro_rules! migrate_multimap_table { 31 | ($rtx:expr, $wtx:expr, $old:expr, $new:expr) => {{ 32 | let old_table = $rtx.open_multimap_table($old)?; 33 | let mut new_table = $wtx.open_multimap_table($new)?; 34 | let name = $new.name(); 35 | let len = old_table.len()?; 36 | info!("migrate {name} ({len} rows)"); 37 | let ind = (len as usize / 1000) + 1; 38 | for (i, entry) in old_table.iter()?.enumerate() { 39 | let (key, values) = entry?; 40 | let key = key.value(); 41 | if i > 0 && i % 1000 == 0 { 42 | info!(" {name} {i:>ind$}/{len}"); 43 | } 44 | for value in values { 45 | let value = value?; 46 | new_table.insert(key, value.value())?; 47 | } 48 | } 49 | info!("migrate {name} done"); 50 | }}; 51 | } 52 | 53 | pub fn run(source: impl AsRef) -> Result { 54 | let source = source.as_ref(); 55 | let dir = source.parent().expect("database is not in root"); 56 | // create the new database in a tempfile in the same directory as the old db 57 | let target = NamedTempFile::with_prefix_in("docs.db.migrate", dir)?; 58 | let target = target.into_temp_path(); 59 | info!("migrate {} to {}", source.display(), target.display()); 60 | let old_db = redb_v1::Database::open(source)?; 61 | let new_db = redb::Database::create(&target)?; 62 | 63 | let rtx = old_db.begin_read()?; 64 | let wtx = new_db.begin_write()?; 65 | 66 | migrate_table!(rtx, wtx, old::AUTHORS_TABLE, new::tables::AUTHORS_TABLE); 67 | migrate_table!( 68 | rtx, 69 | wtx, 70 | old::NAMESPACES_TABLE, 71 | new::tables::NAMESPACES_TABLE 72 | ); 73 | migrate_table!(rtx, wtx, old::RECORDS_TABLE, new::tables::RECORDS_TABLE); 74 | migrate_table!( 75 | rtx, 76 | wtx, 77 | old::LATEST_PER_AUTHOR_TABLE, 78 | new::tables::LATEST_PER_AUTHOR_TABLE 79 | ); 80 | migrate_table!( 81 | rtx, 82 | wtx, 83 | old::RECORDS_BY_KEY_TABLE, 84 | new::tables::RECORDS_BY_KEY_TABLE 85 | ); 86 | migrate_multimap_table!( 87 | rtx, 88 | wtx, 89 | old::NAMESPACE_PEERS_TABLE, 90 | new::tables::NAMESPACE_PEERS_TABLE 91 | ); 92 | migrate_table!( 93 | rtx, 94 | wtx, 95 | old::DOWNLOAD_POLICY_TABLE, 96 | new::tables::DOWNLOAD_POLICY_TABLE 97 | ); 98 | 99 | wtx.commit()?; 100 | drop(rtx); 101 | drop(old_db); 102 | drop(new_db); 103 | 104 | let backup_path: PathBuf = { 105 | let mut p = source.to_owned().into_os_string(); 106 | p.push(".backup-redb-v1"); 107 | p.into() 108 | }; 109 | info!("rename {} to {}", source.display(), backup_path.display()); 110 | std::fs::rename(source, &backup_path)?; 111 | info!("rename {} to {}", target.display(), source.display()); 112 | target.persist_noclobber(source)?; 113 | info!("opening migrated database from {}", source.display()); 114 | let db = redb::Database::open(source)?; 115 | Ok(db) 116 | } 117 | 118 | mod new { 119 | pub use super::super::*; 120 | } 121 | 122 | mod old { 123 | use redb_v1::{MultimapTableDefinition, TableDefinition}; 124 | 125 | use super::new::tables::{ 126 | LatestPerAuthorKey, LatestPerAuthorValue, Nanos, RecordsByKeyId, RecordsId, RecordsValue, 127 | }; 128 | use crate::PeerIdBytes; 129 | 130 | pub const AUTHORS_TABLE: TableDefinition<&[u8; 32], &[u8; 32]> = 131 | TableDefinition::new("authors-1"); 132 | pub const NAMESPACES_TABLE: TableDefinition<&[u8; 32], (u8, &[u8; 32])> = 133 | TableDefinition::new("namespaces-2"); 134 | pub const RECORDS_TABLE: TableDefinition = 135 | TableDefinition::new("records-1"); 136 | pub const LATEST_PER_AUTHOR_TABLE: TableDefinition = 137 | TableDefinition::new("latest-by-author-1"); 138 | pub const RECORDS_BY_KEY_TABLE: TableDefinition = 139 | TableDefinition::new("records-by-key-1"); 140 | pub const NAMESPACE_PEERS_TABLE: MultimapTableDefinition<&[u8; 32], (Nanos, &PeerIdBytes)> = 141 | MultimapTableDefinition::new("sync-peers-1"); 142 | pub const DOWNLOAD_POLICY_TABLE: TableDefinition<&[u8; 32], &[u8]> = 143 | TableDefinition::new("download-policy-1"); 144 | } 145 | -------------------------------------------------------------------------------- /src/store/fs/migrations.rs: -------------------------------------------------------------------------------- 1 | use std::collections::HashMap; 2 | 3 | use anyhow::Result; 4 | use redb::{Database, ReadableTable, ReadableTableMetadata, TableHandle, WriteTransaction}; 5 | use tracing::{debug, info}; 6 | 7 | use super::tables::{ 8 | LATEST_PER_AUTHOR_TABLE, NAMESPACES_TABLE, NAMESPACES_TABLE_V1, RECORDS_BY_KEY_TABLE, 9 | RECORDS_TABLE, 10 | }; 11 | use crate::{Capability, NamespaceSecret}; 12 | 13 | /// Run all database migrations, if needed. 14 | pub fn run_migrations(db: &Database) -> Result<()> { 15 | run_migration(db, migration_001_populate_latest_table)?; 16 | run_migration(db, migration_002_namespaces_populate_v2)?; 17 | run_migration(db, migration_003_namespaces_delete_v1)?; 18 | run_migration(db, migration_004_populate_by_key_index)?; 19 | Ok(()) 20 | } 21 | 22 | fn run_migration(db: &Database, f: F) -> Result<()> 23 | where 24 | F: Fn(&WriteTransaction) -> Result, 25 | { 26 | let name = std::any::type_name::(); 27 | let name = name.split("::").last().unwrap(); 28 | let tx = db.begin_write()?; 29 | debug!("Start migration {name}"); 30 | match f(&tx)? { 31 | MigrateOutcome::Execute(len) => { 32 | tx.commit()?; 33 | info!("Executed migration {name} ({len} rows affected)"); 34 | } 35 | MigrateOutcome::Skip => debug!("Skip migration {name}: Not needed"), 36 | } 37 | Ok(()) 38 | } 39 | 40 | enum MigrateOutcome { 41 | Skip, 42 | Execute(usize), 43 | } 44 | 45 | /// migration 001: populate the latest table (which did not exist before) 46 | fn migration_001_populate_latest_table(tx: &WriteTransaction) -> Result { 47 | let mut latest_table = tx.open_table(LATEST_PER_AUTHOR_TABLE)?; 48 | let records_table = tx.open_table(RECORDS_TABLE)?; 49 | if !latest_table.is_empty()? || records_table.is_empty()? { 50 | return Ok(MigrateOutcome::Skip); 51 | } 52 | 53 | #[allow(clippy::type_complexity)] 54 | let mut heads: HashMap<([u8; 32], [u8; 32]), (u64, Vec)> = HashMap::new(); 55 | let iter = records_table.iter()?; 56 | 57 | for next in iter { 58 | let next = next?; 59 | let (namespace, author, key) = next.0.value(); 60 | let (timestamp, _namespace_sig, _author_sig, _len, _hash) = next.1.value(); 61 | heads 62 | .entry((*namespace, *author)) 63 | .and_modify(|e| { 64 | if timestamp >= e.0 { 65 | *e = (timestamp, key.to_vec()); 66 | } 67 | }) 68 | .or_insert_with(|| (timestamp, key.to_vec())); 69 | } 70 | let len = heads.len(); 71 | for ((namespace, author), (timestamp, key)) in heads { 72 | latest_table.insert((&namespace, &author), (timestamp, key.as_slice()))?; 73 | } 74 | Ok(MigrateOutcome::Execute(len)) 75 | } 76 | 77 | /// Copy the namespaces data from V1 to V2. 78 | fn migration_002_namespaces_populate_v2(tx: &WriteTransaction) -> Result { 79 | let namespaces_v1_exists = tx 80 | .list_tables()? 81 | .any(|handle| handle.name() == NAMESPACES_TABLE_V1.name()); 82 | if !namespaces_v1_exists { 83 | return Ok(MigrateOutcome::Skip); 84 | } 85 | let namespaces_v1 = tx.open_table(NAMESPACES_TABLE_V1)?; 86 | let mut namespaces_v2 = tx.open_table(NAMESPACES_TABLE)?; 87 | let mut entries = 0; 88 | for res in namespaces_v1.iter()? { 89 | let db_value = res?.1; 90 | let secret_bytes = db_value.value(); 91 | let capability = Capability::Write(NamespaceSecret::from_bytes(secret_bytes)); 92 | let id = capability.id().to_bytes(); 93 | let (raw_kind, raw_bytes) = capability.raw(); 94 | namespaces_v2.insert(&id, (raw_kind, &raw_bytes))?; 95 | entries += 1; 96 | } 97 | Ok(MigrateOutcome::Execute(entries)) 98 | } 99 | 100 | /// Delete the v1 namespaces table. 101 | /// 102 | /// This should be part of [`migration_002_namespaces_populate_v2`] but due to a limitation in 103 | /// [`redb`] up to v1.3.0 a table cannot be deleted in a transaction that also opens this table. 104 | /// Therefore the table deletion has to be in a separate transaction. 105 | /// 106 | /// This limitation was removed in so this can be merged 107 | /// back into [`migration_002_namespaces_populate_v2`] once we upgrade to the next redb version 108 | /// after 1.3. 109 | fn migration_003_namespaces_delete_v1(tx: &WriteTransaction) -> Result { 110 | let namespaces_v1_exists = tx 111 | .list_tables()? 112 | .any(|handle| handle.name() == NAMESPACES_TABLE_V1.name()); 113 | if !namespaces_v1_exists { 114 | return Ok(MigrateOutcome::Skip); 115 | } 116 | tx.delete_table(NAMESPACES_TABLE_V1)?; 117 | Ok(MigrateOutcome::Execute(1)) 118 | } 119 | 120 | /// migration 004: populate the by_key index table(which did not exist before) 121 | fn migration_004_populate_by_key_index(tx: &WriteTransaction) -> Result { 122 | let mut by_key_table = tx.open_table(RECORDS_BY_KEY_TABLE)?; 123 | let records_table = tx.open_table(RECORDS_TABLE)?; 124 | if !by_key_table.is_empty()? { 125 | return Ok(MigrateOutcome::Skip); 126 | } 127 | 128 | let iter = records_table.iter()?; 129 | let mut len = 0; 130 | for next in iter { 131 | let next = next?; 132 | let (namespace, author, key) = next.0.value(); 133 | let id = (namespace, key, author); 134 | by_key_table.insert(id, ())?; 135 | len += 1; 136 | } 137 | Ok(MigrateOutcome::Execute(len)) 138 | } 139 | -------------------------------------------------------------------------------- /src/store/fs/query.rs: -------------------------------------------------------------------------------- 1 | use anyhow::Result; 2 | use iroh_blobs::Hash; 3 | 4 | use super::{ 5 | bounds::{ByKeyBounds, RecordsBounds}, 6 | ranges::{RecordsByKeyRange, RecordsRange}, 7 | RecordsValue, 8 | }; 9 | use crate::{ 10 | store::{ 11 | fs::tables::ReadOnlyTables, 12 | util::{IndexKind, LatestPerKeySelector, SelectorRes}, 13 | AuthorFilter, KeyFilter, Query, 14 | }, 15 | AuthorId, NamespaceId, SignedEntry, 16 | }; 17 | 18 | /// A query iterator for entry queries. 19 | #[derive(Debug)] 20 | pub struct QueryIterator { 21 | range: QueryRange, 22 | query: Query, 23 | offset: u64, 24 | count: u64, 25 | } 26 | 27 | #[derive(Debug)] 28 | enum QueryRange { 29 | AuthorKey { 30 | range: RecordsRange<'static>, 31 | key_filter: KeyFilter, 32 | }, 33 | KeyAuthor { 34 | range: RecordsByKeyRange, 35 | author_filter: AuthorFilter, 36 | selector: Option, 37 | }, 38 | } 39 | 40 | impl QueryIterator { 41 | pub fn new(tables: ReadOnlyTables, namespace: NamespaceId, query: Query) -> Result { 42 | let index_kind = IndexKind::from(&query); 43 | let range = match index_kind { 44 | IndexKind::AuthorKey { range, key_filter } => { 45 | let (bounds, filter) = match range { 46 | // single author: both author and key are selected via the range. therefore 47 | // set `filter` to `Any`. 48 | AuthorFilter::Exact(author) => ( 49 | RecordsBounds::author_key(namespace, author, key_filter), 50 | KeyFilter::Any, 51 | ), 52 | // no author set => full table scan with the provided key filter 53 | AuthorFilter::Any => (RecordsBounds::namespace(namespace), key_filter), 54 | }; 55 | let range = RecordsRange::with_bounds_static(&tables.records, bounds)?; 56 | QueryRange::AuthorKey { 57 | range, 58 | key_filter: filter, 59 | } 60 | } 61 | IndexKind::KeyAuthor { 62 | range, 63 | author_filter, 64 | latest_per_key, 65 | } => { 66 | let bounds = ByKeyBounds::new(namespace, &range); 67 | let range = 68 | RecordsByKeyRange::with_bounds(tables.records_by_key, tables.records, bounds)?; 69 | let selector = latest_per_key.then(LatestPerKeySelector::default); 70 | QueryRange::KeyAuthor { 71 | author_filter, 72 | range, 73 | selector, 74 | } 75 | } 76 | }; 77 | 78 | Ok(Self { 79 | range, 80 | query, 81 | offset: 0, 82 | count: 0, 83 | }) 84 | } 85 | } 86 | 87 | impl Iterator for QueryIterator { 88 | type Item = Result; 89 | 90 | fn next(&mut self) -> Option> { 91 | // early-return if we reached the query limit. 92 | if let Some(limit) = self.query.limit() { 93 | if self.count >= limit { 94 | return None; 95 | } 96 | } 97 | loop { 98 | let next = match &mut self.range { 99 | QueryRange::AuthorKey { range, key_filter } => { 100 | // get the next entry from the query range, filtered by the key and empty filters 101 | range.next_filtered(&self.query.sort_direction, |(_ns, _author, key), value| { 102 | key_filter.matches(key) 103 | && (self.query.include_empty || !value_is_empty(&value)) 104 | }) 105 | } 106 | 107 | QueryRange::KeyAuthor { 108 | range, 109 | author_filter, 110 | selector, 111 | } => loop { 112 | // get the next entry from the query range, filtered by the author filter 113 | let next = range 114 | .next_filtered(&self.query.sort_direction, |(_ns, _key, author)| { 115 | author_filter.matches(&(AuthorId::from(author))) 116 | }); 117 | 118 | // early-break if next contains Err 119 | let next = match next.transpose() { 120 | Err(err) => break Some(Err(err)), 121 | Ok(next) => next, 122 | }; 123 | 124 | // push the entry into the selector. if active, only the latest entry 125 | // for each key will be emitted. 126 | let next = match selector { 127 | None => next, 128 | Some(selector) => match selector.push(next) { 129 | SelectorRes::Continue => continue, 130 | SelectorRes::Finished => None, 131 | SelectorRes::Some(res) => Some(res), 132 | }, 133 | }; 134 | 135 | // skip the entry if empty and no empty entries requested 136 | if !self.query.include_empty && matches!(&next, Some(e) if e.is_empty()) { 137 | continue; 138 | } 139 | 140 | break next.map(Result::Ok); 141 | }, 142 | }; 143 | 144 | // skip the entry if we didn't get past the requested offset yet. 145 | if self.offset < self.query.offset() && matches!(next, Some(Ok(_))) { 146 | self.offset += 1; 147 | continue; 148 | } 149 | 150 | self.count += 1; 151 | return next; 152 | } 153 | } 154 | } 155 | 156 | fn value_is_empty(value: &RecordsValue) -> bool { 157 | let (_timestamp, _namespace_sig, _author_sig, _len, hash) = value; 158 | *hash == Hash::EMPTY.as_bytes() 159 | } 160 | -------------------------------------------------------------------------------- /src/store/fs/ranges.rs: -------------------------------------------------------------------------------- 1 | //! Ranges and helpers for working with [`redb`] tables 2 | 3 | use redb::{Key, Range, ReadOnlyTable, ReadableTable, Value}; 4 | 5 | use super::{ 6 | bounds::{ByKeyBounds, RecordsBounds}, 7 | into_entry, 8 | tables::{RecordsByKeyId, RecordsId, RecordsValue}, 9 | }; 10 | use crate::{store::SortDirection, SignedEntry}; 11 | 12 | /// An extension trait for [`Range`] that provides methods for mapped retrieval. 13 | pub trait RangeExt { 14 | /// Get the next entry and map the item with a callback function. 15 | fn next_map( 16 | &mut self, 17 | map: impl for<'x> Fn(K::SelfType<'x>, V::SelfType<'x>) -> T, 18 | ) -> Option>; 19 | 20 | /// Get the next entry, but only if the callback function returns Some, otherwise continue. 21 | /// 22 | /// With `direction` the range can be either process in forward or backward direction. 23 | fn next_filter_map( 24 | &mut self, 25 | direction: &SortDirection, 26 | filter_map: impl for<'x> Fn(K::SelfType<'x>, V::SelfType<'x>) -> Option, 27 | ) -> Option>; 28 | 29 | /// Like [`Self::next_filter_map`], but the callback returns a `Result`, and the result is 30 | /// flattened with the result from the range operation. 31 | fn next_try_filter_map( 32 | &mut self, 33 | direction: &SortDirection, 34 | filter_map: impl for<'x> Fn(K::SelfType<'x>, V::SelfType<'x>) -> Option>, 35 | ) -> Option> { 36 | Some(self.next_filter_map(direction, filter_map)?.and_then(|r| r)) 37 | } 38 | } 39 | 40 | impl RangeExt for Range<'_, K, V> { 41 | fn next_map( 42 | &mut self, 43 | map: impl for<'x> Fn(K::SelfType<'x>, V::SelfType<'x>) -> T, 44 | ) -> Option> { 45 | self.next() 46 | .map(|r| r.map_err(Into::into).map(|r| map(r.0.value(), r.1.value()))) 47 | } 48 | 49 | fn next_filter_map( 50 | &mut self, 51 | direction: &SortDirection, 52 | filter_map: impl for<'x> Fn(K::SelfType<'x>, V::SelfType<'x>) -> Option, 53 | ) -> Option> { 54 | loop { 55 | let next = match direction { 56 | SortDirection::Asc => self.next(), 57 | SortDirection::Desc => self.next_back(), 58 | }; 59 | match next { 60 | None => break None, 61 | Some(Err(err)) => break Some(Err(err.into())), 62 | Some(Ok(res)) => match filter_map(res.0.value(), res.1.value()) { 63 | None => continue, 64 | Some(item) => break Some(Ok(item)), 65 | }, 66 | } 67 | } 68 | } 69 | } 70 | 71 | /// An iterator over a range of entries from the records table. 72 | #[derive(derive_more::Debug)] 73 | #[debug("RecordsRange")] 74 | pub struct RecordsRange<'a>(Range<'a, RecordsId<'static>, RecordsValue<'static>>); 75 | 76 | // pub type RecordsRange<'a> = Range<'a, RecordsId<'static>, RecordsValue<'static>>; 77 | 78 | impl<'a> RecordsRange<'a> { 79 | pub(super) fn with_bounds( 80 | records: &'a impl ReadableTable, RecordsValue<'static>>, 81 | bounds: RecordsBounds, 82 | ) -> anyhow::Result { 83 | let range = records.range(bounds.as_ref())?; 84 | Ok(Self(range)) 85 | } 86 | 87 | // 88 | /// Get the next item in the range. 89 | /// 90 | /// Omit items for which the `matcher` function returns false. 91 | pub(super) fn next_filtered( 92 | &mut self, 93 | direction: &SortDirection, 94 | filter: impl for<'x> Fn(RecordsId<'x>, RecordsValue<'x>) -> bool, 95 | ) -> Option> { 96 | self.0 97 | .next_filter_map(direction, |k, v| filter(k, v).then(|| into_entry(k, v))) 98 | } 99 | } 100 | 101 | impl RecordsRange<'static> { 102 | pub(super) fn all_static( 103 | records: &ReadOnlyTable, RecordsValue<'static>>, 104 | ) -> anyhow::Result { 105 | let range = records.range::>(..)?; 106 | Ok(Self(range)) 107 | } 108 | pub(super) fn with_bounds_static( 109 | records: &ReadOnlyTable, RecordsValue<'static>>, 110 | bounds: RecordsBounds, 111 | ) -> anyhow::Result { 112 | let range = records.range(bounds.as_ref())?; 113 | Ok(Self(range)) 114 | } 115 | } 116 | 117 | impl Iterator for RecordsRange<'_> { 118 | type Item = anyhow::Result; 119 | fn next(&mut self) -> Option { 120 | self.0.next_map(into_entry) 121 | } 122 | } 123 | 124 | #[derive(derive_more::Debug)] 125 | #[debug("RecordsByKeyRange")] 126 | pub struct RecordsByKeyRange { 127 | records_table: ReadOnlyTable, RecordsValue<'static>>, 128 | by_key_range: Range<'static, RecordsByKeyId<'static>, ()>, 129 | } 130 | 131 | impl RecordsByKeyRange { 132 | pub fn with_bounds( 133 | records_by_key_table: ReadOnlyTable, ()>, 134 | records_table: ReadOnlyTable, RecordsValue<'static>>, 135 | bounds: ByKeyBounds, 136 | ) -> anyhow::Result { 137 | let by_key_range = records_by_key_table.range(bounds.as_ref())?; 138 | Ok(Self { 139 | records_table, 140 | by_key_range, 141 | }) 142 | } 143 | 144 | /// Get the next item in the range. 145 | /// 146 | /// Omit items for which the `filter` function returns false. 147 | pub fn next_filtered( 148 | &mut self, 149 | direction: &SortDirection, 150 | filter: impl for<'x> Fn(RecordsByKeyId<'x>) -> bool, 151 | ) -> Option> { 152 | let entry = self.by_key_range.next_try_filter_map(direction, |k, _v| { 153 | if !filter(k) { 154 | return None; 155 | }; 156 | let (namespace, key, author) = k; 157 | let records_id = (namespace, author, key); 158 | let entry = self.records_table.get(&records_id).transpose()?; 159 | let entry = entry 160 | .map(|value| into_entry(records_id, value.value())) 161 | .map_err(anyhow::Error::from); 162 | Some(entry) 163 | }); 164 | entry 165 | } 166 | } 167 | -------------------------------------------------------------------------------- /src/store/fs/tables.rs: -------------------------------------------------------------------------------- 1 | #![allow(missing_docs)] 2 | // Table Definitions 3 | 4 | use std::time::Instant; 5 | 6 | use bytes::Bytes; 7 | use redb::{ 8 | MultimapTable, MultimapTableDefinition, ReadOnlyMultimapTable, ReadOnlyTable, ReadTransaction, 9 | Table, TableDefinition, WriteTransaction, 10 | }; 11 | 12 | use crate::PeerIdBytes; 13 | 14 | /// Table: Authors 15 | /// Key: `[u8; 32]` # AuthorId 16 | /// Value: `[u8; 32]` # Author 17 | pub const AUTHORS_TABLE: TableDefinition<&[u8; 32], &[u8; 32]> = TableDefinition::new("authors-1"); 18 | 19 | /// Table: Namespaces v1 (replaced by Namespaces v2 in migration ) 20 | /// Key: `[u8; 32]` # NamespaceId 21 | /// Value: `[u8; 32]` # NamespaceSecret 22 | pub const NAMESPACES_TABLE_V1: TableDefinition<&[u8; 32], &[u8; 32]> = 23 | TableDefinition::new("namespaces-1"); 24 | 25 | /// Table: Namespaces v2 26 | /// Key: `[u8; 32]` # NamespaceId 27 | /// Value: `(u8, [u8; 32])` # (CapabilityKind, Capability) 28 | pub const NAMESPACES_TABLE: TableDefinition<&[u8; 32], (u8, &[u8; 32])> = 29 | TableDefinition::new("namespaces-2"); 30 | 31 | /// Table: Records 32 | /// Key: `([u8; 32], [u8; 32], &[u8])` 33 | /// # (NamespaceId, AuthorId, Key) 34 | /// Value: `(u64, [u8; 32], [u8; 32], u64, [u8; 32])` 35 | /// # (timestamp, signature_namespace, signature_author, len, hash) 36 | pub const RECORDS_TABLE: TableDefinition = 37 | TableDefinition::new("records-1"); 38 | pub type RecordsId<'a> = (&'a [u8; 32], &'a [u8; 32], &'a [u8]); 39 | pub type RecordsIdOwned = ([u8; 32], [u8; 32], Bytes); 40 | pub type RecordsValue<'a> = (u64, &'a [u8; 64], &'a [u8; 64], u64, &'a [u8; 32]); 41 | pub type RecordsTable = ReadOnlyTable, RecordsValue<'static>>; 42 | 43 | /// Table: Latest per author 44 | /// Key: `([u8; 32], [u8; 32])` # (NamespaceId, AuthorId) 45 | /// Value: `(u64, Vec)` # (Timestamp, Key) 46 | pub const LATEST_PER_AUTHOR_TABLE: TableDefinition = 47 | TableDefinition::new("latest-by-author-1"); 48 | pub type LatestPerAuthorKey<'a> = (&'a [u8; 32], &'a [u8; 32]); 49 | pub type LatestPerAuthorValue<'a> = (u64, &'a [u8]); 50 | 51 | /// Table: Records by key 52 | /// Key: `([u8; 32], Vec, [u8; 32]])` # (NamespaceId, Key, AuthorId) 53 | /// Value: `()` 54 | pub const RECORDS_BY_KEY_TABLE: TableDefinition = 55 | TableDefinition::new("records-by-key-1"); 56 | pub type RecordsByKeyId<'a> = (&'a [u8; 32], &'a [u8], &'a [u8; 32]); 57 | pub type RecordsByKeyIdOwned = ([u8; 32], Bytes, [u8; 32]); 58 | 59 | /// Table: Peers per document. 60 | /// Key: `[u8; 32]` # NamespaceId 61 | /// Value: `(u64, [u8; 32])` # ([`Nanos`], &[`PeerIdBytes`]) representing the last time a peer was used. 62 | pub const NAMESPACE_PEERS_TABLE: MultimapTableDefinition<&[u8; 32], (Nanos, &PeerIdBytes)> = 63 | MultimapTableDefinition::new("sync-peers-1"); 64 | /// Number of seconds elapsed since [`std::time::SystemTime::UNIX_EPOCH`]. Used to register the 65 | /// last time a peer was useful in a document. 66 | // NOTE: resolution is nanoseconds, stored as a u64 since this covers ~500years from unix epoch, 67 | // which should be more than enough 68 | pub type Nanos = u64; 69 | 70 | /// Table: Download policy 71 | /// Key: `[u8; 32]` # NamespaceId 72 | /// Value: `Vec` # Postcard encoded download policy 73 | pub const DOWNLOAD_POLICY_TABLE: TableDefinition<&[u8; 32], &[u8]> = 74 | TableDefinition::new("download-policy-1"); 75 | 76 | self_cell::self_cell! { 77 | struct TransactionAndTablesInner { 78 | owner: WriteTransaction, 79 | #[covariant] 80 | dependent: Tables, 81 | } 82 | } 83 | 84 | #[derive(derive_more::Debug)] 85 | pub struct TransactionAndTables { 86 | #[debug("TransactionAndTablesInner")] 87 | inner: TransactionAndTablesInner, 88 | pub(crate) since: Instant, 89 | } 90 | 91 | impl TransactionAndTables { 92 | pub fn new(tx: WriteTransaction) -> std::result::Result { 93 | Ok(Self { 94 | inner: TransactionAndTablesInner::try_new(tx, |tx| Tables::new(tx))?, 95 | since: Instant::now(), 96 | }) 97 | } 98 | 99 | pub fn tables(&self) -> &Tables { 100 | self.inner.borrow_dependent() 101 | } 102 | 103 | pub fn with_tables_mut( 104 | &mut self, 105 | f: impl FnOnce(&mut Tables) -> anyhow::Result, 106 | ) -> anyhow::Result { 107 | self.inner.with_dependent_mut(|_, t| f(t)) 108 | } 109 | 110 | pub fn commit(self) -> std::result::Result<(), redb::CommitError> { 111 | self.inner.into_owner().commit() 112 | } 113 | } 114 | 115 | #[derive(derive_more::Debug)] 116 | pub struct Tables<'tx> { 117 | pub records: Table<'tx, RecordsId<'static>, RecordsValue<'static>>, 118 | pub records_by_key: Table<'tx, RecordsByKeyId<'static>, ()>, 119 | pub namespaces: Table<'tx, &'static [u8; 32], (u8, &'static [u8; 32])>, 120 | pub latest_per_author: Table<'tx, LatestPerAuthorKey<'static>, LatestPerAuthorValue<'static>>, 121 | #[debug("MultimapTable")] 122 | pub namespace_peers: MultimapTable<'tx, &'static [u8; 32], (Nanos, &'static PeerIdBytes)>, 123 | pub download_policy: Table<'tx, &'static [u8; 32], &'static [u8]>, 124 | pub authors: Table<'tx, &'static [u8; 32], &'static [u8; 32]>, 125 | } 126 | 127 | impl<'tx> Tables<'tx> { 128 | pub fn new(tx: &'tx WriteTransaction) -> Result { 129 | let records = tx.open_table(RECORDS_TABLE)?; 130 | let records_by_key = tx.open_table(RECORDS_BY_KEY_TABLE)?; 131 | let namespaces = tx.open_table(NAMESPACES_TABLE)?; 132 | let latest_per_author = tx.open_table(LATEST_PER_AUTHOR_TABLE)?; 133 | let namespace_peers = tx.open_multimap_table(NAMESPACE_PEERS_TABLE)?; 134 | let download_policy = tx.open_table(DOWNLOAD_POLICY_TABLE)?; 135 | let authors = tx.open_table(AUTHORS_TABLE)?; 136 | Ok(Self { 137 | records, 138 | records_by_key, 139 | namespaces, 140 | latest_per_author, 141 | namespace_peers, 142 | download_policy, 143 | authors, 144 | }) 145 | } 146 | } 147 | #[derive(derive_more::Debug)] 148 | pub struct ReadOnlyTables { 149 | pub records: ReadOnlyTable, RecordsValue<'static>>, 150 | pub records_by_key: ReadOnlyTable, ()>, 151 | pub namespaces: ReadOnlyTable<&'static [u8; 32], (u8, &'static [u8; 32])>, 152 | pub latest_per_author: 153 | ReadOnlyTable, LatestPerAuthorValue<'static>>, 154 | #[debug("namespace_peers")] 155 | pub namespace_peers: ReadOnlyMultimapTable<&'static [u8; 32], (Nanos, &'static PeerIdBytes)>, 156 | pub download_policy: ReadOnlyTable<&'static [u8; 32], &'static [u8]>, 157 | pub authors: ReadOnlyTable<&'static [u8; 32], &'static [u8; 32]>, 158 | tx: ReadTransaction, 159 | } 160 | 161 | impl ReadOnlyTables { 162 | pub fn new(tx: ReadTransaction) -> Result { 163 | let records = tx.open_table(RECORDS_TABLE)?; 164 | let records_by_key = tx.open_table(RECORDS_BY_KEY_TABLE)?; 165 | let namespaces = tx.open_table(NAMESPACES_TABLE)?; 166 | let latest_per_author = tx.open_table(LATEST_PER_AUTHOR_TABLE)?; 167 | let namespace_peers = tx.open_multimap_table(NAMESPACE_PEERS_TABLE)?; 168 | let download_policy = tx.open_table(DOWNLOAD_POLICY_TABLE)?; 169 | let authors = tx.open_table(AUTHORS_TABLE)?; 170 | Ok(Self { 171 | records, 172 | records_by_key, 173 | namespaces, 174 | latest_per_author, 175 | namespace_peers, 176 | download_policy, 177 | authors, 178 | tx, 179 | }) 180 | } 181 | 182 | /// Create a clone of the records table for use in iterators. 183 | pub fn records_clone(&self) -> Result { 184 | self.tx.open_table(RECORDS_TABLE) 185 | } 186 | } 187 | -------------------------------------------------------------------------------- /src/store/pubkeys.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | collections::HashMap, 3 | sync::{Arc, RwLock}, 4 | }; 5 | 6 | use ed25519_dalek::{SignatureError, VerifyingKey}; 7 | 8 | use crate::{AuthorId, AuthorPublicKey, NamespaceId, NamespacePublicKey}; 9 | 10 | /// Store trait for expanded public keys for authors and namespaces. 11 | /// 12 | /// Used to cache [`ed25519_dalek::VerifyingKey`]. 13 | /// 14 | /// This trait is implemented for the unit type [`()`], where no caching is used. 15 | pub trait PublicKeyStore { 16 | /// Convert a byte array into a [`VerifyingKey`]. 17 | /// 18 | /// New keys are inserted into the [`PublicKeyStore ] and reused on subsequent calls. 19 | fn public_key(&self, id: &[u8; 32]) -> Result; 20 | 21 | /// Convert a [`NamespaceId`] into a [`NamespacePublicKey`]. 22 | /// 23 | /// New keys are inserted into the [`PublicKeyStore ] and reused on subsequent calls. 24 | fn namespace_key(&self, bytes: &NamespaceId) -> Result { 25 | self.public_key(bytes.as_bytes()).map(Into::into) 26 | } 27 | 28 | /// Convert a [`AuthorId`] into a [`AuthorPublicKey`]. 29 | /// 30 | /// New keys are inserted into the [`PublicKeyStore ] and reused on subsequent calls. 31 | fn author_key(&self, bytes: &AuthorId) -> Result { 32 | self.public_key(bytes.as_bytes()).map(Into::into) 33 | } 34 | } 35 | 36 | impl PublicKeyStore for &T { 37 | fn public_key(&self, id: &[u8; 32]) -> Result { 38 | (*self).public_key(id) 39 | } 40 | } 41 | 42 | impl PublicKeyStore for &mut T { 43 | fn public_key(&self, id: &[u8; 32]) -> Result { 44 | PublicKeyStore::public_key(*self, id) 45 | } 46 | } 47 | 48 | impl PublicKeyStore for () { 49 | fn public_key(&self, id: &[u8; 32]) -> Result { 50 | VerifyingKey::from_bytes(id) 51 | } 52 | } 53 | 54 | /// In-memory key storage 55 | // TODO: Make max number of keys stored configurable. 56 | #[derive(Debug, Clone, Default)] 57 | pub struct MemPublicKeyStore { 58 | keys: Arc>>, 59 | } 60 | 61 | impl PublicKeyStore for MemPublicKeyStore { 62 | fn public_key(&self, bytes: &[u8; 32]) -> Result { 63 | if let Some(id) = self.keys.read().unwrap().get(bytes) { 64 | return Ok(*id); 65 | } 66 | let id = VerifyingKey::from_bytes(bytes)?; 67 | self.keys.write().unwrap().insert(*bytes, id); 68 | Ok(id) 69 | } 70 | } 71 | -------------------------------------------------------------------------------- /src/store/util.rs: -------------------------------------------------------------------------------- 1 | //! Utilities useful across different store impls. 2 | 3 | use super::{AuthorFilter, KeyFilter, Query, QueryKind, SortBy}; 4 | use crate::SignedEntry; 5 | 6 | /// A helper for stores that have by-author and by-key indexes for records. 7 | #[derive(Debug)] 8 | pub enum IndexKind { 9 | AuthorKey { 10 | range: AuthorFilter, 11 | key_filter: KeyFilter, 12 | }, 13 | KeyAuthor { 14 | range: KeyFilter, 15 | author_filter: AuthorFilter, 16 | latest_per_key: bool, 17 | }, 18 | } 19 | 20 | impl From<&Query> for IndexKind { 21 | fn from(query: &Query) -> Self { 22 | match &query.kind { 23 | QueryKind::Flat(details) => match (&query.filter_author, details.sort_by) { 24 | (AuthorFilter::Any, SortBy::KeyAuthor) => IndexKind::KeyAuthor { 25 | range: query.filter_key.clone(), 26 | author_filter: AuthorFilter::Any, 27 | latest_per_key: false, 28 | }, 29 | _ => IndexKind::AuthorKey { 30 | range: query.filter_author.clone(), 31 | key_filter: query.filter_key.clone(), 32 | }, 33 | }, 34 | QueryKind::SingleLatestPerKey(_) => IndexKind::KeyAuthor { 35 | range: query.filter_key.clone(), 36 | author_filter: query.filter_author.clone(), 37 | latest_per_key: true, 38 | }, 39 | } 40 | } 41 | } 42 | 43 | /// Helper to extract the latest entry per key from an iterator that yields [`SignedEntry`] items. 44 | /// 45 | /// Items must be pushed in key-sorted order. 46 | #[derive(Debug, Default)] 47 | pub struct LatestPerKeySelector(Option); 48 | 49 | pub enum SelectorRes { 50 | /// The iterator is finished. 51 | Finished, 52 | /// The selection is not yet finished, keep pushing more items. 53 | Continue, 54 | /// The selection yielded an entry. 55 | Some(SignedEntry), 56 | } 57 | 58 | impl LatestPerKeySelector { 59 | /// Push an entry into the selector. 60 | /// 61 | /// Entries must be sorted by key beforehand. 62 | pub fn push(&mut self, entry: Option) -> SelectorRes { 63 | let Some(entry) = entry else { 64 | return match self.0.take() { 65 | Some(entry) => SelectorRes::Some(entry), 66 | None => SelectorRes::Finished, 67 | }; 68 | }; 69 | match self.0.take() { 70 | None => { 71 | self.0 = Some(entry); 72 | SelectorRes::Continue 73 | } 74 | Some(last) if last.key() == entry.key() => { 75 | if entry.timestamp() > last.timestamp() { 76 | self.0 = Some(entry); 77 | } else { 78 | self.0 = Some(last); 79 | } 80 | SelectorRes::Continue 81 | } 82 | Some(last) => { 83 | self.0 = Some(entry); 84 | SelectorRes::Some(last) 85 | } 86 | } 87 | } 88 | } 89 | -------------------------------------------------------------------------------- /src/ticket.rs: -------------------------------------------------------------------------------- 1 | //! Tickets for [`iroh-docs`] documents. 2 | 3 | use iroh::NodeAddr; 4 | use iroh_base::ticket; 5 | use serde::{Deserialize, Serialize}; 6 | 7 | use crate::Capability; 8 | 9 | /// Contains both a key (either secret or public) to a document, and a list of peers to join. 10 | #[derive(Serialize, Deserialize, Clone, Debug, derive_more::Display)] 11 | #[display("{}", ticket::Ticket::serialize(self))] 12 | pub struct DocTicket { 13 | /// either a public or private key 14 | pub capability: Capability, 15 | /// A list of nodes to contact. 16 | pub nodes: Vec, 17 | } 18 | 19 | /// Wire format for [`DocTicket`]. 20 | /// 21 | /// In the future we might have multiple variants (not versions, since they 22 | /// might be both equally valid), so this is a single variant enum to force 23 | /// postcard to add a discriminator. 24 | #[derive(Serialize, Deserialize)] 25 | enum TicketWireFormat { 26 | Variant0(DocTicket), 27 | } 28 | 29 | impl ticket::Ticket for DocTicket { 30 | const KIND: &'static str = "doc"; 31 | 32 | fn to_bytes(&self) -> Vec { 33 | let data = TicketWireFormat::Variant0(self.clone()); 34 | postcard::to_stdvec(&data).expect("postcard serialization failed") 35 | } 36 | 37 | fn from_bytes(bytes: &[u8]) -> Result { 38 | let res: TicketWireFormat = postcard::from_bytes(bytes).map_err(ticket::Error::Postcard)?; 39 | let TicketWireFormat::Variant0(res) = res; 40 | if res.nodes.is_empty() { 41 | return Err(ticket::Error::Verify("addressing info cannot be empty")); 42 | } 43 | Ok(res) 44 | } 45 | } 46 | 47 | impl DocTicket { 48 | /// Create a new doc ticket 49 | pub fn new(capability: Capability, peers: Vec) -> Self { 50 | Self { 51 | capability, 52 | nodes: peers, 53 | } 54 | } 55 | } 56 | 57 | impl std::str::FromStr for DocTicket { 58 | type Err = ticket::Error; 59 | fn from_str(s: &str) -> Result { 60 | ticket::Ticket::deserialize(s) 61 | } 62 | } 63 | 64 | #[cfg(test)] 65 | mod tests { 66 | use std::str::FromStr; 67 | 68 | use anyhow::{ensure, Context, Result}; 69 | use iroh::PublicKey; 70 | 71 | use super::*; 72 | use crate::NamespaceId; 73 | 74 | #[test] 75 | fn test_ticket_base32() { 76 | let node_id = 77 | PublicKey::from_str("ae58ff8833241ac82d6ff7611046ed67b5072d142c588d0063e942d9a75502b6") 78 | .unwrap(); 79 | let namespace_id = NamespaceId::from( 80 | &<[u8; 32]>::try_from( 81 | hex::decode("ae58ff8833241ac82d6ff7611046ed67b5072d142c588d0063e942d9a75502b6") 82 | .unwrap(), 83 | ) 84 | .unwrap(), 85 | ); 86 | 87 | let ticket = DocTicket { 88 | capability: Capability::Read(namespace_id), 89 | nodes: vec![NodeAddr::from_parts(node_id, None, [])], 90 | }; 91 | let s = ticket.to_string(); 92 | let base32 = data_encoding::BASE32_NOPAD 93 | .decode( 94 | s.strip_prefix("doc") 95 | .unwrap() 96 | .to_ascii_uppercase() 97 | .as_bytes(), 98 | ) 99 | .unwrap(); 100 | let expected = parse_hexdump(" 101 | 00 # variant 102 | 01 # capability discriminator, 1 = read 103 | ae58ff8833241ac82d6ff7611046ed67b5072d142c588d0063e942d9a75502b6 # namespace id, 32 bytes, see above 104 | 01 # one node 105 | ae58ff8833241ac82d6ff7611046ed67b5072d142c588d0063e942d9a75502b6 # node id, 32 bytes, see above 106 | 00 # no relay url 107 | 00 # no direct addresses 108 | ").unwrap(); 109 | assert_eq!(base32, expected); 110 | } 111 | 112 | /// Parses a commented multi line hexdump into a vector of bytes. 113 | /// 114 | /// This is useful to write wire level protocol tests. 115 | pub fn parse_hexdump(s: &str) -> Result> { 116 | let mut result = Vec::new(); 117 | 118 | for (line_number, line) in s.lines().enumerate() { 119 | let data_part = line.split('#').next().unwrap_or(""); 120 | let cleaned: String = data_part.chars().filter(|c| !c.is_whitespace()).collect(); 121 | 122 | ensure!( 123 | cleaned.len() % 2 == 0, 124 | "Non-even number of hex chars detected on line {}.", 125 | line_number + 1 126 | ); 127 | 128 | for i in (0..cleaned.len()).step_by(2) { 129 | let byte_str = &cleaned[i..i + 2]; 130 | let byte = u8::from_str_radix(byte_str, 16) 131 | .with_context(|| format!("Invalid hex data on line {}.", line_number + 1))?; 132 | 133 | result.push(byte); 134 | } 135 | } 136 | 137 | Ok(result) 138 | } 139 | } 140 | -------------------------------------------------------------------------------- /tests/client.rs: -------------------------------------------------------------------------------- 1 | #![cfg(feature = "rpc")] 2 | use anyhow::{Context, Result}; 3 | use futures_util::TryStreamExt; 4 | use iroh_blobs::{ 5 | store::ExportMode, 6 | util::fs::{key_to_path, path_to_key}, 7 | }; 8 | use iroh_docs::store::Query; 9 | use rand::RngCore; 10 | use testresult::TestResult; 11 | use tokio::io::AsyncWriteExt; 12 | use tracing_test::traced_test; 13 | use util::Node; 14 | 15 | mod util; 16 | 17 | /// Test that closing a doc does not close other instances. 18 | #[tokio::test] 19 | #[traced_test] 20 | async fn test_doc_close() -> Result<()> { 21 | let node = Node::memory().spawn().await?; 22 | let author = node.authors().default().await?; 23 | // open doc two times 24 | let doc1 = node.docs().create().await?; 25 | let doc2 = node.docs().open(doc1.id()).await?.expect("doc to exist"); 26 | // close doc1 instance 27 | doc1.close().await?; 28 | // operations on doc1 now fail. 29 | assert!(doc1.set_bytes(author, "foo", "bar").await.is_err()); 30 | // dropping doc1 will close the doc if not already closed 31 | // wait a bit because the close-on-drop spawns a task for which we cannot track completion. 32 | drop(doc1); 33 | tokio::time::sleep(tokio::time::Duration::from_millis(100)).await; 34 | 35 | // operations on doc2 still succeed 36 | doc2.set_bytes(author, "foo", "bar").await?; 37 | Ok(()) 38 | } 39 | 40 | #[tokio::test] 41 | #[traced_test] 42 | async fn test_doc_import_export() -> TestResult<()> { 43 | let node = Node::memory().spawn().await?; 44 | 45 | // create temp file 46 | let temp_dir = tempfile::tempdir().context("tempdir")?; 47 | 48 | let in_root = temp_dir.path().join("in"); 49 | tokio::fs::create_dir_all(in_root.clone()) 50 | .await 51 | .context("create dir all")?; 52 | let out_root = temp_dir.path().join("out"); 53 | 54 | let path = in_root.join("test"); 55 | 56 | let size = 100; 57 | let mut buf = vec![0u8; size]; 58 | rand::thread_rng().fill_bytes(&mut buf); 59 | let mut file = tokio::fs::File::create(path.clone()) 60 | .await 61 | .context("create file")?; 62 | file.write_all(&buf.clone()).await.context("write_all")?; 63 | file.flush().await.context("flush")?; 64 | 65 | // create doc & author 66 | let client = node.client(); 67 | let docs_client = client.docs(); 68 | let doc = docs_client.create().await.context("doc create")?; 69 | let author = client.authors().create().await.context("author create")?; 70 | 71 | // import file 72 | let import_outcome = doc 73 | .import_file( 74 | author, 75 | path_to_key(path.clone(), None, Some(in_root))?, 76 | path, 77 | true, 78 | ) 79 | .await 80 | .context("import file")? 81 | .finish() 82 | .await 83 | .context("import finish")?; 84 | 85 | // export file 86 | let entry = doc 87 | .get_one(Query::author(author).key_exact(import_outcome.key)) 88 | .await 89 | .context("get one")? 90 | .unwrap(); 91 | let key = entry.key().to_vec(); 92 | let export_outcome = doc 93 | .export_file( 94 | entry, 95 | key_to_path(key, None, Some(out_root))?, 96 | ExportMode::Copy, 97 | ) 98 | .await 99 | .context("export file")? 100 | .finish() 101 | .await 102 | .context("export finish")?; 103 | 104 | let got_bytes = tokio::fs::read(export_outcome.path) 105 | .await 106 | .context("tokio read")?; 107 | assert_eq!(buf, got_bytes); 108 | 109 | Ok(()) 110 | } 111 | 112 | #[tokio::test] 113 | async fn test_authors() -> Result<()> { 114 | let node = Node::memory().spawn().await?; 115 | 116 | // default author always exists 117 | let authors: Vec<_> = node.authors().list().await?.try_collect().await?; 118 | assert_eq!(authors.len(), 1); 119 | let default_author = node.authors().default().await?; 120 | assert_eq!(authors, vec![default_author]); 121 | 122 | let author_id = node.authors().create().await?; 123 | 124 | let authors: Vec<_> = node.authors().list().await?.try_collect().await?; 125 | assert_eq!(authors.len(), 2); 126 | 127 | let author = node 128 | .authors() 129 | .export(author_id) 130 | .await? 131 | .expect("should have author"); 132 | node.authors().delete(author_id).await?; 133 | let authors: Vec<_> = node.authors().list().await?.try_collect().await?; 134 | assert_eq!(authors.len(), 1); 135 | 136 | node.authors().import(author).await?; 137 | 138 | let authors: Vec<_> = node.authors().list().await?.try_collect().await?; 139 | assert_eq!(authors.len(), 2); 140 | 141 | assert!(node.authors().default().await? != author_id); 142 | node.authors().set_default(author_id).await?; 143 | assert_eq!(node.authors().default().await?, author_id); 144 | 145 | Ok(()) 146 | } 147 | 148 | #[tokio::test] 149 | async fn test_default_author_memory() -> Result<()> { 150 | let iroh = Node::memory().spawn().await?; 151 | let author = iroh.authors().default().await?; 152 | assert!(iroh.authors().export(author).await?.is_some()); 153 | assert!(iroh.authors().delete(author).await.is_err()); 154 | Ok(()) 155 | } 156 | 157 | #[tokio::test] 158 | #[traced_test] 159 | async fn test_default_author_persist() -> TestResult<()> { 160 | let iroh_root_dir = tempfile::TempDir::new()?; 161 | let iroh_root = iroh_root_dir.path(); 162 | 163 | // check that the default author exists and cannot be deleted. 164 | let default_author = { 165 | let iroh = Node::persistent(iroh_root).spawn().await?; 166 | let author = iroh.authors().default().await?; 167 | assert!(iroh.authors().export(author).await?.is_some()); 168 | assert!(iroh.authors().delete(author).await.is_err()); 169 | iroh.shutdown().await?; 170 | author 171 | }; 172 | 173 | // check that the default author is persisted across restarts. 174 | { 175 | let iroh = Node::persistent(iroh_root).spawn().await?; 176 | let author = iroh.authors().default().await?; 177 | assert_eq!(author, default_author); 178 | assert!(iroh.authors().export(author).await?.is_some()); 179 | assert!(iroh.authors().delete(author).await.is_err()); 180 | iroh.shutdown().await?; 181 | }; 182 | 183 | // check that a new default author is created if the default author file is deleted 184 | // manually. 185 | let default_author = { 186 | tokio::fs::remove_file(iroh_root.join("default-author")).await?; 187 | let iroh = Node::persistent(iroh_root).spawn().await?; 188 | let author = iroh.authors().default().await?; 189 | assert!(author != default_author); 190 | assert!(iroh.authors().export(author).await?.is_some()); 191 | assert!(iroh.authors().delete(author).await.is_err()); 192 | iroh.shutdown().await?; 193 | author 194 | }; 195 | 196 | // check that the node fails to start if the default author is missing from the docs store. 197 | { 198 | let mut docs_store = iroh_docs::store::fs::Store::persistent(iroh_root.join("docs.redb"))?; 199 | docs_store.delete_author(default_author)?; 200 | docs_store.flush()?; 201 | drop(docs_store); 202 | let iroh = Node::persistent(iroh_root).spawn().await; 203 | assert!(iroh.is_err()); 204 | 205 | // somehow the blob store is not shutdown correctly (yet?) on macos. 206 | // so we give it some time until we find a proper fix. 207 | #[cfg(target_os = "macos")] 208 | tokio::time::sleep(std::time::Duration::from_secs(1)).await; 209 | 210 | tokio::fs::remove_file(iroh_root.join("default-author")).await?; 211 | drop(iroh); 212 | let iroh = Node::persistent(iroh_root).spawn().await; 213 | if let Err(cause) = iroh.as_ref() { 214 | panic!("failed to start node: {:?}", cause); 215 | } 216 | iroh?.shutdown().await?; 217 | } 218 | 219 | // check that the default author can be set manually and is persisted. 220 | let default_author = { 221 | let iroh = Node::persistent(iroh_root).spawn().await?; 222 | let author = iroh.authors().create().await?; 223 | iroh.authors().set_default(author).await?; 224 | assert_eq!(iroh.authors().default().await?, author); 225 | iroh.shutdown().await?; 226 | author 227 | }; 228 | { 229 | let iroh = Node::persistent(iroh_root).spawn().await?; 230 | assert_eq!(iroh.authors().default().await?, default_author); 231 | iroh.shutdown().await?; 232 | } 233 | 234 | Ok(()) 235 | } 236 | -------------------------------------------------------------------------------- /tests/gc.rs: -------------------------------------------------------------------------------- 1 | #![cfg(feature = "rpc")] 2 | use std::{ 3 | io::{Cursor, Write}, 4 | path::PathBuf, 5 | time::Duration, 6 | }; 7 | 8 | use anyhow::Result; 9 | use bao_tree::{blake3, io::sync::Outboard, ChunkRanges}; 10 | use bytes::Bytes; 11 | use futures_lite::StreamExt; 12 | use iroh_blobs::{ 13 | store::{bao_tree, Map}, 14 | IROH_BLOCK_SIZE, 15 | }; 16 | use iroh_io::AsyncSliceReaderExt; 17 | use rand::RngCore; 18 | use testdir::testdir; 19 | use util::Node; 20 | 21 | mod util; 22 | 23 | pub fn create_test_data(size: usize) -> Bytes { 24 | let mut rand = rand::thread_rng(); 25 | let mut res = vec![0u8; size]; 26 | rand.fill_bytes(&mut res); 27 | res.into() 28 | } 29 | 30 | /// Take some data and encode it 31 | pub fn simulate_remote(data: &[u8]) -> (blake3::Hash, Cursor) { 32 | let outboard = bao_tree::io::outboard::PostOrderMemOutboard::create(data, IROH_BLOCK_SIZE); 33 | let mut encoded = Vec::new(); 34 | encoded 35 | .write_all(outboard.tree.size().to_le_bytes().as_ref()) 36 | .unwrap(); 37 | bao_tree::io::sync::encode_ranges_validated(data, &outboard, &ChunkRanges::all(), &mut encoded) 38 | .unwrap(); 39 | let hash = outboard.root(); 40 | (hash, Cursor::new(encoded.into())) 41 | } 42 | 43 | /// Wrap a bao store in a node that has gc enabled. 44 | async fn persistent_node( 45 | path: PathBuf, 46 | gc_period: Duration, 47 | ) -> ( 48 | Node, 49 | async_channel::Receiver<()>, 50 | ) { 51 | let (gc_send, gc_recv) = async_channel::unbounded(); 52 | let node = Node::persistent(path) 53 | .gc_interval(Some(gc_period)) 54 | .register_gc_done_cb(Box::new(move || { 55 | gc_send.send_blocking(()).ok(); 56 | })) 57 | .spawn() 58 | .await 59 | .unwrap(); 60 | (node, gc_recv) 61 | } 62 | 63 | #[tokio::test] 64 | async fn redb_doc_import_stress() -> Result<()> { 65 | let _ = tracing_subscriber::fmt::try_init(); 66 | let dir = testdir!(); 67 | let (node, _) = persistent_node(dir.join("store"), Duration::from_secs(10)).await; 68 | let bao_store = node.blob_store().clone(); 69 | let client = node.client(); 70 | let doc = client.docs().create().await?; 71 | let author = client.authors().create().await?; 72 | let temp_path = dir.join("temp"); 73 | tokio::fs::create_dir_all(&temp_path).await?; 74 | let mut to_import = Vec::new(); 75 | for i in 0..100 { 76 | let data = create_test_data(16 * 1024 * 3 + 1); 77 | let path = temp_path.join(format!("file{}", i)); 78 | tokio::fs::write(&path, &data).await?; 79 | let key = Bytes::from(format!("{}", path.display())); 80 | to_import.push((key, path, data)); 81 | } 82 | for (key, path, _) in to_import.iter() { 83 | let mut progress = doc.import_file(author, key.clone(), path, true).await?; 84 | while let Some(msg) = progress.next().await { 85 | tracing::info!("import progress {:?}", msg); 86 | } 87 | } 88 | for (i, (key, _, expected)) in to_import.iter().enumerate() { 89 | let Some(entry) = doc.get_exact(author, key.clone(), true).await? else { 90 | anyhow::bail!("doc entry not found {}", i); 91 | }; 92 | let hash = entry.content_hash(); 93 | let Some(content) = bao_store.get(&hash).await? else { 94 | anyhow::bail!("content not found {} {}", i, &hash.to_hex()[..8]); 95 | }; 96 | let data = content.data_reader().read_to_end().await?; 97 | assert_eq!(data, expected); 98 | } 99 | Ok(()) 100 | } 101 | -------------------------------------------------------------------------------- /tests/util.rs: -------------------------------------------------------------------------------- 1 | #![cfg(feature = "rpc")] 2 | #![allow(dead_code)] 3 | use std::{ 4 | marker::PhantomData, 5 | net::{Ipv4Addr, Ipv6Addr, SocketAddrV4, SocketAddrV6}, 6 | ops::Deref, 7 | path::{Path, PathBuf}, 8 | }; 9 | 10 | use iroh::{discovery::Discovery, dns::DnsResolver, NodeId, RelayMode, SecretKey}; 11 | use iroh_blobs::{ 12 | net_protocol::Blobs, 13 | store::{GcConfig, Store as BlobStore}, 14 | }; 15 | use iroh_docs::protocol::Docs; 16 | use iroh_gossip::net::Gossip; 17 | use nested_enum_utils::enum_conversions; 18 | use quic_rpc::transport::{Connector, Listener}; 19 | use serde::{Deserialize, Serialize}; 20 | use tokio_util::task::AbortOnDropHandle; 21 | 22 | /// Default bind address for the node. 23 | /// 11204 is "iroh" in leetspeak 24 | pub const DEFAULT_BIND_PORT: u16 = 11204; 25 | 26 | /// The default bind address for the iroh IPv4 socket. 27 | pub const DEFAULT_BIND_ADDR_V4: SocketAddrV4 = 28 | SocketAddrV4::new(Ipv4Addr::UNSPECIFIED, DEFAULT_BIND_PORT); 29 | 30 | /// The default bind address for the iroh IPv6 socket. 31 | pub const DEFAULT_BIND_ADDR_V6: SocketAddrV6 = 32 | SocketAddrV6::new(Ipv6Addr::UNSPECIFIED, DEFAULT_BIND_PORT + 1, 0, 0); 33 | 34 | /// An iroh node that just has the blobs transport 35 | #[derive(Debug)] 36 | pub struct Node { 37 | router: iroh::protocol::Router, 38 | client: Client, 39 | store: S, 40 | rpc_task: AbortOnDropHandle<()>, 41 | } 42 | 43 | impl Deref for Node { 44 | type Target = Client; 45 | 46 | fn deref(&self) -> &Self::Target { 47 | &self.client 48 | } 49 | } 50 | 51 | #[derive(Debug, Serialize, Deserialize)] 52 | #[enum_conversions] 53 | enum Request { 54 | BlobsOrTags(iroh_blobs::rpc::proto::Request), 55 | Docs(iroh_docs::rpc::proto::Request), 56 | } 57 | 58 | #[derive(Debug, Serialize, Deserialize)] 59 | #[enum_conversions] 60 | enum Response { 61 | BlobsOrTags(iroh_blobs::rpc::proto::Response), 62 | Docs(iroh_docs::rpc::proto::Response), 63 | } 64 | 65 | #[derive(Debug, Clone, Copy)] 66 | struct Service; 67 | 68 | impl quic_rpc::Service for Service { 69 | type Req = Request; 70 | type Res = Response; 71 | } 72 | 73 | #[derive(Debug, Clone)] 74 | pub struct Client { 75 | blobs: iroh_blobs::rpc::client::blobs::Client, 76 | docs: iroh_docs::rpc::client::docs::Client, 77 | authors: iroh_docs::rpc::client::authors::Client, 78 | } 79 | 80 | impl Client { 81 | fn new(client: quic_rpc::RpcClient) -> Self { 82 | Self { 83 | blobs: iroh_blobs::rpc::client::blobs::Client::new(client.clone().map().boxed()), 84 | docs: iroh_docs::rpc::client::docs::Client::new(client.clone().map().boxed()), 85 | authors: iroh_docs::rpc::client::authors::Client::new(client.map().boxed()), 86 | } 87 | } 88 | 89 | pub fn blobs(&self) -> &iroh_blobs::rpc::client::blobs::Client { 90 | &self.blobs 91 | } 92 | 93 | pub fn docs(&self) -> &iroh_docs::rpc::client::docs::Client { 94 | &self.docs 95 | } 96 | 97 | pub fn authors(&self) -> &iroh_docs::rpc::client::authors::Client { 98 | &self.authors 99 | } 100 | } 101 | 102 | /// An iroh node builder 103 | #[derive(derive_more::Debug)] 104 | pub struct Builder { 105 | path: Option, 106 | secret_key: Option, 107 | relay_mode: RelayMode, 108 | dns_resolver: Option, 109 | node_discovery: Option>, 110 | gc_interval: Option, 111 | #[debug(skip)] 112 | register_gc_done_cb: Option>, 113 | insecure_skip_relay_cert_verify: bool, 114 | bind_random_port: bool, 115 | _p: PhantomData, 116 | } 117 | 118 | impl Builder { 119 | /// Spawns the node 120 | async fn spawn0(self, store: S) -> anyhow::Result> { 121 | let mut addr_v4 = DEFAULT_BIND_ADDR_V4; 122 | let mut addr_v6 = DEFAULT_BIND_ADDR_V6; 123 | if self.bind_random_port { 124 | addr_v4.set_port(0); 125 | addr_v6.set_port(0); 126 | } 127 | let mut builder = iroh::Endpoint::builder() 128 | .bind_addr_v4(addr_v4) 129 | .bind_addr_v6(addr_v6) 130 | .relay_mode(self.relay_mode.clone()) 131 | .insecure_skip_relay_cert_verify(self.insecure_skip_relay_cert_verify); 132 | if let Some(dns_resolver) = self.dns_resolver.clone() { 133 | builder = builder.dns_resolver(dns_resolver); 134 | } 135 | if let Some(secret_key) = self.secret_key { 136 | builder = builder.secret_key(secret_key); 137 | } 138 | if let Some(discovery) = self.node_discovery { 139 | builder = builder.discovery(discovery); 140 | } else { 141 | builder = builder.discovery_n0(); 142 | } 143 | let endpoint = builder.bind().await?; 144 | let mut router = iroh::protocol::Router::builder(endpoint.clone()); 145 | let blobs = Blobs::builder(store.clone()).build(&endpoint); 146 | let gossip = Gossip::builder().spawn(endpoint.clone()).await?; 147 | let builder = match self.path { 148 | Some(ref path) => Docs::persistent(path.to_path_buf()), 149 | None => Docs::memory(), 150 | }; 151 | let docs = match builder.spawn(&blobs, &gossip).await { 152 | Ok(docs) => docs, 153 | Err(err) => { 154 | store.shutdown().await; 155 | return Err(err); 156 | } 157 | }; 158 | router = router.accept(iroh_blobs::ALPN, blobs.clone()); 159 | router = router.accept(iroh_docs::ALPN, docs.clone()); 160 | router = router.accept(iroh_gossip::ALPN, gossip.clone()); 161 | 162 | // Build the router 163 | let router = router.spawn(); 164 | 165 | // Setup RPC 166 | let (internal_rpc, controller) = 167 | quic_rpc::transport::flume::channel::(1); 168 | let controller = controller.boxed(); 169 | let internal_rpc = internal_rpc.boxed(); 170 | let internal_rpc = quic_rpc::RpcServer::::new(internal_rpc); 171 | 172 | let docs2 = docs.clone(); 173 | let blobs2 = blobs.clone(); 174 | let rpc_task: tokio::task::JoinHandle<()> = tokio::task::spawn(async move { 175 | loop { 176 | let request = internal_rpc.accept().await; 177 | match request { 178 | Ok(accepting) => { 179 | let blobs = blobs2.clone(); 180 | let docs = docs2.clone(); 181 | tokio::task::spawn(async move { 182 | let (msg, chan) = accepting.read_first().await?; 183 | match msg { 184 | Request::BlobsOrTags(msg) => { 185 | blobs.handle_rpc_request(msg, chan.map().boxed()).await?; 186 | } 187 | Request::Docs(msg) => { 188 | docs.handle_rpc_request(msg, chan.map().boxed()).await?; 189 | } 190 | } 191 | anyhow::Ok(()) 192 | }); 193 | } 194 | Err(err) => { 195 | tracing::warn!("rpc error: {:?}", err); 196 | } 197 | } 198 | } 199 | }); 200 | 201 | let client = quic_rpc::RpcClient::new(controller); 202 | if let Some(period) = self.gc_interval { 203 | blobs.add_protected(docs.protect_cb())?; 204 | blobs.start_gc(GcConfig { 205 | period, 206 | done_callback: self.register_gc_done_cb, 207 | })?; 208 | } 209 | 210 | let client = Client::new(client); 211 | Ok(Node { 212 | router, 213 | client, 214 | store, 215 | rpc_task: AbortOnDropHandle::new(rpc_task), 216 | }) 217 | } 218 | 219 | pub fn secret_key(mut self, value: SecretKey) -> Self { 220 | self.secret_key = Some(value); 221 | self 222 | } 223 | 224 | pub fn relay_mode(mut self, value: RelayMode) -> Self { 225 | self.relay_mode = value; 226 | self 227 | } 228 | 229 | pub fn dns_resolver(mut self, value: DnsResolver) -> Self { 230 | self.dns_resolver = Some(value); 231 | self 232 | } 233 | 234 | pub fn node_discovery(mut self, value: Box) -> Self { 235 | self.node_discovery = Some(value); 236 | self 237 | } 238 | 239 | pub fn gc_interval(mut self, value: Option) -> Self { 240 | self.gc_interval = value; 241 | self 242 | } 243 | 244 | pub fn register_gc_done_cb(mut self, value: Box) -> Self { 245 | self.register_gc_done_cb = Some(value); 246 | self 247 | } 248 | 249 | pub fn insecure_skip_relay_cert_verify(mut self, value: bool) -> Self { 250 | self.insecure_skip_relay_cert_verify = value; 251 | self 252 | } 253 | 254 | pub fn bind_random_port(mut self) -> Self { 255 | self.bind_random_port = true; 256 | self 257 | } 258 | 259 | fn new(path: Option) -> Self { 260 | Self { 261 | path, 262 | secret_key: None, 263 | relay_mode: RelayMode::Default, 264 | gc_interval: None, 265 | insecure_skip_relay_cert_verify: false, 266 | bind_random_port: false, 267 | dns_resolver: None, 268 | node_discovery: None, 269 | register_gc_done_cb: None, 270 | _p: PhantomData, 271 | } 272 | } 273 | } 274 | 275 | impl Node { 276 | /// Creates a new node with memory storage 277 | pub fn memory() -> Builder { 278 | Builder::new(None) 279 | } 280 | } 281 | 282 | impl Builder { 283 | /// Spawns the node 284 | pub async fn spawn(self) -> anyhow::Result> { 285 | let store = iroh_blobs::store::mem::Store::new(); 286 | self.spawn0(store).await 287 | } 288 | } 289 | 290 | impl Node { 291 | /// Creates a new node with persistent storage 292 | pub fn persistent(path: impl AsRef) -> Builder { 293 | let path = Some(path.as_ref().to_owned()); 294 | Builder::new(path) 295 | } 296 | } 297 | 298 | impl Builder { 299 | /// Spawns the node 300 | pub async fn spawn(self) -> anyhow::Result> { 301 | let store = iroh_blobs::store::fs::Store::load(self.path.clone().unwrap()).await?; 302 | self.spawn0(store).await 303 | } 304 | } 305 | 306 | impl Node { 307 | /// Returns the node id 308 | pub fn node_id(&self) -> NodeId { 309 | self.router.endpoint().node_id() 310 | } 311 | 312 | /// Returns the blob store 313 | pub fn blob_store(&self) -> &S { 314 | &self.store 315 | } 316 | 317 | /// Shuts down the node 318 | pub async fn shutdown(self) -> anyhow::Result<()> { 319 | self.router.shutdown().await?; 320 | self.rpc_task.abort(); 321 | Ok(()) 322 | } 323 | 324 | /// Returns the client 325 | pub fn client(&self) -> &Client { 326 | &self.client 327 | } 328 | } 329 | --------------------------------------------------------------------------------