├── .cargo └── config ├── .clippy.toml ├── .config └── nextest.toml ├── .github ├── CODEOWNERS ├── dependabot.yml └── workflows │ ├── ci-docs.yml │ ├── codecov.yml │ ├── dependabot-auto-merge.yml │ ├── docs.yml │ ├── nightly.yml │ ├── publish.yml │ ├── rust.yml │ └── stale.yml ├── .gitignore ├── Cargo.toml ├── LICENSE ├── README.md ├── crates ├── component │ ├── Cargo.toml │ ├── README.md │ ├── examples │ │ └── stream_example.rs │ └── src │ │ └── lib.rs ├── mysten-network │ ├── Cargo.toml │ └── src │ │ ├── client.rs │ │ ├── codec.rs │ │ ├── config.rs │ │ ├── lib.rs │ │ ├── metrics.rs │ │ ├── multiaddr.rs │ │ └── server.rs ├── mysten-util-mem-derive │ ├── Cargo.toml │ └── lib.rs ├── mysten-util-mem │ ├── Cargo.toml │ ├── README.md │ ├── src │ │ ├── allocators.rs │ │ ├── external_impls.rs │ │ ├── lib.rs │ │ ├── malloc_size.rs │ │ ├── memory_stats_noop.rs │ │ └── sizeof.rs │ └── tests │ │ └── derive.rs ├── name-variant │ ├── Cargo.toml │ ├── README.md │ ├── src │ │ └── lib.rs │ └── tests │ │ └── test.rs ├── prometheus-closure-metric │ ├── Cargo.toml │ ├── src │ │ └── lib.rs │ └── tests │ │ └── closure_metric.rs ├── rccheck │ ├── Cargo.toml │ ├── proptest-regressions │ │ └── tests │ │ │ └── ed25519_certgen_tests.txt │ └── src │ │ ├── ed25519_certgen.rs │ │ ├── lib.rs │ │ └── tests │ │ ├── ed25519_certgen_tests.rs │ │ ├── ed25519_external_trust_anchor.rs │ │ ├── psk_set_tests.rs │ │ ├── psk_tests.rs │ │ └── test_utils.rs ├── telemetry-subscribers │ ├── Cargo.toml │ ├── README.md │ ├── examples │ │ └── easy-init.rs │ ├── src │ │ ├── lib.rs │ │ └── span_latency_prom.rs │ └── tests │ │ └── reload.rs ├── typed-store-derive │ ├── Cargo.toml │ └── src │ │ └── lib.rs ├── typed-store │ ├── Cargo.toml │ ├── src │ │ ├── lib.rs │ │ ├── metrics.rs │ │ ├── rocks │ │ │ ├── errors.rs │ │ │ ├── iter.rs │ │ │ ├── keys.rs │ │ │ ├── mod.rs │ │ │ ├── tests.rs │ │ │ └── values.rs │ │ ├── tests │ │ │ └── store_tests.rs │ │ └── traits.rs │ └── tests │ │ └── macro_tests.rs └── x │ ├── Cargo.toml │ └── src │ ├── lint.rs │ └── main.rs ├── deny.toml ├── rust-toolchain └── scripts ├── changed-files.sh ├── get_current_version.sh └── is_version_already_uploaded.sh /.cargo/config: -------------------------------------------------------------------------------- 1 | [alias] 2 | # Collection of project wide clippy lints. This is done via an alias because 3 | # clippy doesn't currently allow for specifiying project-wide lints in a 4 | # configuration file. This is a similar workaround to the ones presented here: 5 | # 6 | xclippy = [ 7 | "clippy", "--all-targets", "--", 8 | "-Wclippy::all", 9 | "-Wclippy::disallowed_methods", 10 | ] 11 | xlint = "run --package x --bin x -- lint" 12 | -------------------------------------------------------------------------------- /.clippy.toml: -------------------------------------------------------------------------------- 1 | # cyclomatic complexity is not always useful 2 | cognitive-complexity-threshold = 100 3 | # types are used for safety encoding 4 | type-complexity-threshold = 10000 5 | # big constructors 6 | too-many-arguments-threshold = 14 7 | 8 | disallowed-methods = [ 9 | # we use tracing with the log feature instead of the log crate. 10 | { path = "log::info", reason = "use tracing::info instead" }, 11 | { path = "log::debug", reason = "use tracing::debug instead" }, 12 | { path = "log::error", reason = "use tracing::error instead" }, 13 | { path = "log::warn", reason = "use tracing::warn instead" }, 14 | # unbounded channels are for expert use only 15 | { path = "tokio::sync::mpsc::unbounded_channel", reason = "use a bounded channel instead" }, 16 | { path = "futures::channel::mpsc::unbounded", reason = "use a bounded channel instead" }, 17 | { path = "futures_channel::mpsc::unbounded", reason = "use a bounded channel instead" }, 18 | ] 19 | -------------------------------------------------------------------------------- /.config/nextest.toml: -------------------------------------------------------------------------------- 1 | [profile.ci] 2 | # Print out output for failing tests as soon as they fail, and also at the end 3 | # of the run (for easy scrollability). 4 | failure-output = "immediate-final" 5 | # Show skipped tests in the CI output. 6 | status-level = "skip" 7 | # Do not cancel the test run on the first failure. 8 | fail-fast = false 9 | # Retry failing tests in order to not block builds on flaky tests 10 | retries = 2 11 | -------------------------------------------------------------------------------- /.github/CODEOWNERS: -------------------------------------------------------------------------------- 1 | crates/rccheck @huitseeker @kchalkias 2 | crates/name-variant @huitseeker 3 | crates/telemetry-subscribers @velvia 4 | crates/component @laura-makdah 5 | crates/typed-store @huitseeker @gdanezis 6 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | # 2 | # Dependabot configuration file 3 | # 4 | 5 | version: 2 6 | updates: 7 | - package-ecosystem: "cargo" 8 | directory: "/" 9 | schedule: 10 | interval: "weekly" 11 | - package-ecosystem: "github-actions" 12 | directory: "/" 13 | schedule: 14 | interval: "weekly" 15 | -------------------------------------------------------------------------------- /.github/workflows/ci-docs.yml: -------------------------------------------------------------------------------- 1 | name: Documentation 2 | 3 | on: 4 | push: 5 | branches: [ main, extensions ] 6 | pull_request: 7 | types: [opened, synchronize, reopened, ready_for_review] 8 | 9 | 10 | jobs: 11 | spelling: 12 | name: Lint documentation 13 | runs-on: [ubuntu-latest] 14 | steps: 15 | - uses: actions/checkout@v3 16 | - name: Spell Check Docs 17 | uses: crate-ci/typos@master 18 | with: 19 | files: ./*.md 20 | isolated: true 21 | -------------------------------------------------------------------------------- /.github/workflows/codecov.yml: -------------------------------------------------------------------------------- 1 | name: Codecov 2 | on: 3 | push: 4 | branches: 5 | - main 6 | pull_request: 7 | branches: 8 | - main 9 | types: [opened, reopened, synchronize] 10 | 11 | jobs: 12 | codecov-grcov: 13 | name: Generate code coverage 14 | runs-on: ubuntu-latest 15 | strategy: 16 | fail-fast: true 17 | steps: 18 | - uses: actions/checkout@v3 19 | - uses: actions-rs/toolchain@v1 20 | with: 21 | components: llvm-tools-preview 22 | # Enable caching of the 'librocksdb-sys' crate by additionally caching the 23 | # 'librocksdb-sys' src directory which is managed by cargo 24 | - uses: bmwill/rust-cache@v1 # Fork of 'Swatinem/rust-cache' which allows caching additional paths 25 | with: 26 | path: ~/.cargo/registry/src/**/librocksdb-sys-* 27 | - name: Install grcov, and cache the binary 28 | uses: baptiste0928/cargo-install@v1 29 | with: 30 | crate: grcov 31 | locked: true 32 | - name: Build 33 | uses: actions-rs/cargo@v1 34 | with: 35 | command: build 36 | env: 37 | RUSTFLAGS: '-Cinstrument-coverage' 38 | RUSTDOCFLAGS: '-Cinstrument-coverage' 39 | - name: Run tests 40 | env: 41 | RUSTFLAGS: '-Cinstrument-coverage' 42 | RUSTDOCFLAGS: '-Cinstrument-coverage' 43 | LLVM_PROFILE_FILE: 'codecov-instrumentation-%p-%m.profraw' 44 | run: cargo test 45 | - name: Run grcov 46 | run: grcov . --binary-path target/debug/ -s . -t lcov --branch --ignore-not-existing --ignore '../**' --ignore '/*' -o coverage.lcov 47 | - name: Upload to codecov.io 48 | uses: codecov/codecov-action@v3 49 | -------------------------------------------------------------------------------- /.github/workflows/dependabot-auto-merge.yml: -------------------------------------------------------------------------------- 1 | name: auto-merge 2 | 3 | on: 4 | pull_request_target: 5 | branches: 6 | - main 7 | 8 | permissions: 9 | contents: read 10 | 11 | jobs: 12 | # Merge pull requests from dependabot that upgrade a minor version number and pass CI 13 | auto-merge: 14 | permissions: 15 | contents: none 16 | runs-on: ubuntu-latest 17 | if: github.actor == 'dependabot[bot]' 18 | steps: 19 | - uses: actions/checkout@v3 20 | - uses: ahmadnassri/action-dependabot-auto-merge@v2.6 21 | with: 22 | github-token: ${{ secrets.AUTOMERGE_TOKEN }} 23 | command: 'squash and merge' 24 | target: minor 25 | -------------------------------------------------------------------------------- /.github/workflows/docs.yml: -------------------------------------------------------------------------------- 1 | name: Docs 2 | 3 | on: 4 | push: 5 | branches: 6 | - master 7 | paths: 8 | - "**.rs" 9 | - "Cargo.toml" 10 | - "Cargo.lock" 11 | workflow_dispatch: 12 | 13 | jobs: 14 | docs: 15 | name: Generate crate documentation 16 | runs-on: ubuntu-latest 17 | steps: 18 | - name: Checkout sources 19 | uses: actions/checkout@v3 20 | 21 | - name: Install Rust toolchain 22 | uses: actions-rs/toolchain@v1 23 | with: 24 | profile: minimal 25 | toolchain: nightly 26 | override: true 27 | 28 | - name: Generate documentation 29 | uses: actions-rs/cargo@v1 30 | env: 31 | RUSTDOCFLAGS: "--enable-index-page -Zunstable-options" 32 | with: 33 | command: doc 34 | args: --workspace --no-deps 35 | 36 | - name: Deploy documentation 37 | uses: peaceiris/actions-gh-pages@v3 38 | with: 39 | github_token: ${{ secrets.GITHUB_TOKEN }} 40 | publish_dir: ./target/doc 41 | -------------------------------------------------------------------------------- /.github/workflows/nightly.yml: -------------------------------------------------------------------------------- 1 | name: nightly 2 | 3 | on: 4 | schedule: 5 | - cron: '0 0 * * *' # every day at midnight 6 | 7 | env: 8 | CARGO_TERM_COLOR: always 9 | # Disable incremental compilation. 10 | # 11 | # Incremental compilation is useful as part of an edit-build-test-edit cycle, 12 | # as it lets the compiler avoid recompiling code that hasn't changed. However, 13 | # on CI, we're not making small edits; we're almost always building the entire 14 | # project from scratch. Thus, incremental compilation on CI actually 15 | # introduces *additional* overhead to support making future builds 16 | # faster...but no future builds will ever occur in any given CI environment. 17 | # 18 | # See https://matklad.github.io/2021/09/04/fast-rust-builds.html#ci-workflow 19 | # for details. 20 | CARGO_INCREMENTAL: 0 21 | # Allow more retries for network requests in cargo (downloading crates) and 22 | # rustup (installing toolchains). This should help to reduce flaky CI failures 23 | # from transient network timeouts or other issues. 24 | CARGO_NET_RETRY: 10 25 | RUSTUP_MAX_RETRIES: 10 26 | # Don't emit giant backtraces in the CI logs. 27 | RUST_BACKTRACE: short 28 | 29 | jobs: 30 | beta: 31 | name: Run test on the beta channel 32 | runs-on: ubuntu-latest 33 | steps: 34 | - uses: actions/checkout@v3 35 | - name: Install beta toolchain 36 | uses: actions-rs/toolchain@v1 37 | with: 38 | toolchain: beta 39 | components: clippy 40 | override: true 41 | # See '.cargo/config' for list of enabled/disappled clippy lints 42 | - name: cargo clippy 43 | run: cargo xclippy -D warnings 44 | - name: cargo test 45 | uses: actions-rs/cargo@v1 46 | with: 47 | command: test 48 | args: --all-features 49 | 50 | cargo-udeps: 51 | runs-on: ubuntu-latest 52 | steps: 53 | - uses: actions/checkout@v3 54 | - uses: actions-rs/toolchain@v1 55 | # Enable caching of the 'librocksdb-sys' crate by additionally caching the 56 | # 'librocksdb-sys' src directory which is managed by cargo 57 | - uses: bmwill/rust-cache@v1 # Fork of 'Swatinem/rust-cache' which allows caching additional paths 58 | with: 59 | path: ~/.cargo/registry/src/**/librocksdb-sys-* 60 | - name: Install cargo-udeps, and cache the binary 61 | uses: baptiste0928/cargo-install@v1 62 | with: 63 | crate: cargo-udeps 64 | locked: true 65 | # Normally running cargo-udeps requires use of a nightly compiler 66 | # In order to have a more stable and less noisy experience, lets instead 67 | # opt to use the stable toolchain specified via the 'rust-toolchain' file 68 | # and instead enable nightly features via 'RUSTC_BOOTSTRAP' 69 | - name: run cargo-udeps 70 | run: RUSTC_BOOTSTRAP=1 cargo udeps 71 | -------------------------------------------------------------------------------- /.github/workflows/publish.yml: -------------------------------------------------------------------------------- 1 | name: publish crates 2 | on: 3 | push: 4 | branches: 5 | - main 6 | 7 | env: 8 | CARGO_TERM_COLOR: always 9 | # Disable incremental compilation. 10 | # 11 | # Incremental compilation is useful as part of an edit-build-test-edit cycle, 12 | # as it lets the compiler avoid recompiling code that hasn't changed. However, 13 | # on CI, we're not making small edits; we're almost always building the entire 14 | # project from scratch. Thus, incremental compilation on CI actually 15 | # introduces *additional* overhead to support making future builds 16 | # faster...but no future builds will ever occur in any given CI environment. 17 | # 18 | # See https://matklad.github.io/2021/09/04/fast-rust-builds.html#ci-workflow 19 | # for details. 20 | CARGO_INCREMENTAL: 0 21 | # Allow more retries for network requests in cargo (downloading crates) and 22 | # rustup (installing toolchains). This should help to reduce flaky CI failures 23 | # from transient network timeouts or other issues. 24 | CARGO_NET_RETRY: 10 25 | RUSTUP_MAX_RETRIES: 10 26 | # Don't emit giant backtraces in the CI logs. 27 | RUST_BACKTRACE: short 28 | 29 | jobs: 30 | publish: 31 | name: Publish to crates.io 32 | # Needed to get around problems when running this script and package version does not change 33 | continue-on-error: true 34 | runs-on: ubuntu-latest 35 | strategy: 36 | matrix: 37 | package: 38 | - mysten-network 39 | - name-variant 40 | - telemetry-subscribers 41 | - typed-store 42 | - typed-store-derive 43 | steps: 44 | - name: Checkout 45 | uses: actions/checkout@v3 46 | - name: Install Rust toolchain 47 | uses: actions-rs/toolchain@v1 48 | with: 49 | profile: minimal 50 | toolchain: nightly 51 | override: true 52 | - id: check 53 | run: | 54 | set +e 55 | ./scripts/is_version_already_uploaded.sh ${{ matrix.package }} 56 | export EXIT_CODE="$?" 57 | set -e 58 | if [[ "$EXIT_CODE" == "7" ]]; then 59 | echo '::set-output name=is_new_version::no' 60 | elif [[ "$EXIT_CODE" == "0" ]]; then 61 | echo '::set-output name=is_new_version::yes' 62 | else 63 | # Unexpected outcome, indicates a bug. 64 | exit "$EXIT_CODE" 65 | fi 66 | - name: Check semver 67 | # Only run the semver script if the version changed, otherwise it errors out 68 | if: steps.check.outputs.is_new_version == 'yes' 69 | uses: obi1kenobi/cargo-semver-checks-action@v1 70 | with: 71 | crate-name: ${{ matrix.package }} 72 | version-tag-prefix: ${{ matrix.package }}-v 73 | - name: Tag the version 74 | if: steps.check.outputs.is_new_version == 'yes' 75 | run: | 76 | set -euxo pipefail 77 | export CURRENT_VERSION="$(./scripts/get_current_version.sh ${{ matrix.package}})" 78 | git tag "${{ matrix.package}}-v$CURRENT_VERSION" 79 | git push origin "${{ matrix.package}}-v$CURRENT_VERSION" 80 | - uses: actions-rs/toolchain@v1 81 | if: steps.check.outputs.is_new_version == 'yes' 82 | - name: Publish packages 83 | if: steps.check.outputs.is_new_version == 'yes' 84 | env: 85 | CARGO_REGISTRY_TOKEN: ${{ secrets.CARGO_REGISTRY_TOKEN }} 86 | run: cargo publish -p ${{ matrix.package }} 87 | -------------------------------------------------------------------------------- /.github/workflows/rust.yml: -------------------------------------------------------------------------------- 1 | name: rust 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | pull_request: 8 | types: [opened, synchronize, reopened, ready_for_review] 9 | 10 | env: 11 | CARGO_TERM_COLOR: always 12 | # Disable incremental compilation. 13 | # 14 | # Incremental compilation is useful as part of an edit-build-test-edit cycle, 15 | # as it lets the compiler avoid recompiling code that hasn't changed. However, 16 | # on CI, we're not making small edits; we're almost always building the entire 17 | # project from scratch. Thus, incremental compilation on CI actually 18 | # introduces *additional* overhead to support making future builds 19 | # faster...but no future builds will ever occur in any given CI environment. 20 | # 21 | # See https://matklad.github.io/2021/09/04/fast-rust-builds.html#ci-workflow 22 | # for details. 23 | CARGO_INCREMENTAL: 0 24 | # Allow more retries for network requests in cargo (downloading crates) and 25 | # rustup (installing toolchains). This should help to reduce flaky CI failures 26 | # from transient network timeouts or other issues. 27 | CARGO_NET_RETRY: 10 28 | RUSTUP_MAX_RETRIES: 10 29 | # Don't emit giant backtraces in the CI logs. 30 | RUST_BACKTRACE: short 31 | 32 | jobs: 33 | license-check: 34 | name: license-check 35 | runs-on: [ubuntu-latest] 36 | steps: 37 | - uses: actions/checkout@v3 38 | - uses: actions-rs/toolchain@v1 39 | - uses: bmwill/rust-cache@v1 # Fork of 'Swatinem/rust-cache' which allows caching additional paths 40 | - run: cargo xlint 41 | 42 | test: 43 | runs-on: ${{ matrix.os }} 44 | strategy: 45 | matrix: 46 | os: 47 | - ubuntu-ghcloud 48 | - windows-ghcloud 49 | fail-fast: false 50 | env: 51 | RUSTFLAGS: -D warnings 52 | steps: 53 | - uses: actions/checkout@v3 54 | - uses: actions-rs/toolchain@v1 55 | - uses: taiki-e/install-action@nextest 56 | # Enable caching of the 'librocksdb-sys' crate by additionally caching the 57 | # 'librocksdb-sys' src directory which is managed by cargo 58 | - uses: bmwill/rust-cache@v1 # Fork of 'Swatinem/rust-cache' which allows caching additional paths 59 | with: 60 | path: ~/.cargo/registry/src/**/librocksdb-sys-* 61 | - name: cargo test 62 | run: | 63 | cargo nextest run --all-features --profile ci 64 | - name: Doctests 65 | # all-features activates tokio-console, which rustdoc requires tokio_unstable, see 66 | # https://github.com/tokio-rs/console/tree/main/console-subscriber#enabling-tokio-instrumentation 67 | run: | 68 | cargo test --doc 69 | # Ensure there are no uncommitted changes in the repo after running tests 70 | - run: scripts/changed-files.sh 71 | 72 | clippy: 73 | runs-on: ubuntu-latest 74 | steps: 75 | - uses: actions/checkout@v3 76 | - uses: actions-rs/toolchain@v1 77 | with: 78 | components: clippy 79 | # Enable caching of the 'librocksdb-sys' crate by additionally caching the 80 | # 'librocksdb-sys' src directory which is managed by cargo 81 | - uses: bmwill/rust-cache@v1 # Fork of 'Swatinem/rust-cache' which allows caching additional paths 82 | with: 83 | path: ~/.cargo/registry/src/**/librocksdb-sys-* 84 | # See '.cargo/config' for list of enabled/disappled clippy lints 85 | - name: cargo clippy 86 | run: cargo xclippy -D warnings 87 | 88 | rustfmt: 89 | runs-on: ubuntu-latest 90 | steps: 91 | - uses: actions/checkout@v3 92 | - uses: actions-rs/toolchain@v1 93 | with: 94 | components: rustfmt 95 | - name: rustfmt 96 | uses: actions-rs/cargo@v1 97 | with: 98 | command: fmt 99 | args: --all --check 100 | 101 | cargo-deny: 102 | name: cargo-deny (advisories, licenses, bans, ...) 103 | runs-on: ubuntu-latest 104 | steps: 105 | - uses: actions/checkout@v3 106 | - uses: EmbarkStudios/cargo-deny-action@v1 107 | -------------------------------------------------------------------------------- /.github/workflows/stale.yml: -------------------------------------------------------------------------------- 1 | name: Mark stale issues and pull requests 2 | on: 3 | schedule: 4 | - cron: "30 1 * * *" 5 | jobs: 6 | stale: 7 | runs-on: ubuntu-latest 8 | permissions: 9 | issues: write 10 | pull-requests: write 11 | steps: 12 | - uses: actions/stale@v6 13 | with: 14 | repo-token: ${{ secrets.GITHUB_TOKEN }} 15 | days-before-stale: 60 16 | # We disable issue closing for now 17 | days-before-issue-close: -1 18 | exempt-all-milestones: true 19 | exempt-all-assignees: true 20 | stale-issue-message: 'This issue is stale because it has been open 60 days with no activity. Remove stale label or comment or this will be closed in 7 days.' 21 | stale-pr-message: 'This PR is stale because it has been open 60 days with no activity. Remove stale label or comment or this will be closed in 7 days.' 22 | close-issue-message: 'This issue was closed because it has been stalled for 7 days with no activity.' 23 | close-pr-message: 'This PR was closed because it has been stalled for 7 days with no activity.' 24 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.json 2 | .DS_Store 3 | .storage_* 4 | .*storage* 5 | .venv/ 6 | .idea/* 7 | rust/.idea/* 8 | 9 | target/ 10 | !/**/src/**/target/ 11 | .test_* 12 | 13 | # Generated by Cargo 14 | # will have compiled files and executables 15 | /target/ 16 | 17 | # Remove Cargo.lock from gitignore if creating an executable, leave it for libraries 18 | # More information here https://doc.rust-lang.org/cargo/guide/cargo-toml-vs-cargo-lock.html 19 | Cargo.lock 20 | workspace.xml 21 | 22 | # These are backup files generated by rustfmt 23 | **/*.rs.bk 24 | scripts/build_and_fab 25 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [workspace] 2 | members = [ 3 | "crates/component", 4 | "crates/mysten-network", 5 | "crates/mysten-util-mem", 6 | "crates/mysten-util-mem-derive", 7 | "crates/name-variant", 8 | "crates/prometheus-closure-metric", 9 | "crates/rccheck", 10 | "crates/telemetry-subscribers", 11 | "crates/typed-store", 12 | "crates/typed-store-derive", 13 | "crates/x", 14 | ] 15 | 16 | [profile.release] 17 | codegen-units = 1 18 | lto = true 19 | 20 | [profile.bench] 21 | codegen-units = 1 22 | lto = true 23 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Mysten-infra development now occurs at https://github.com/MystenLabs/sui/crates/ 2 | 3 | # mysten-infra 4 | 5 | ![Build](https://github.com/mystenlabs/mysten-infra/actions/workflows/rust.yml/badge.svg) 6 | [![rustc](https://img.shields.io/badge/rustc-1.60+-blue?style=flat-square&logo=rust)](https://www.rust-lang.org) 7 | [![license](https://img.shields.io/badge/license-Apache-blue.svg?style=flat-square)](LICENSE) 8 | [![Mysten-infra Rust Crates Documentation (main)](https://img.shields.io/badge/docs-main-0f0)](https://mystenlabs.github.io/mysten-infra) 9 | [![codecov](https://codecov.io/gh/MystenLabs/mysten-infra/branch/main/graph/badge.svg?token=JFDHKUFRWS)](https://codecov.io/gh/MystenLabs/mysten-infra) 10 | 11 | Common infrastructure used by multiple Mysten projects. 12 | -------------------------------------------------------------------------------- /crates/component/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "component" 3 | version = "0.1.0" 4 | license = "Apache-2.0" 5 | authors = ["Laura Makdah "] 6 | description = "library for resilient component supervision" 7 | repository = "https://github.com/mystenlabs/mysten-infra" 8 | edition = "2021" 9 | publish = false 10 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 11 | 12 | [dependencies] 13 | async-trait = "0.1.56" 14 | eyre = "0.6.8" 15 | tokio = { version = "1.21.2", features = ["sync", "macros", "rt", "rt-multi-thread"] } 16 | -------------------------------------------------------------------------------- /crates/component/README.md: -------------------------------------------------------------------------------- 1 | # Resilient Components 2 | 3 | This library provides supervision of components that should be consistently running without downtime. 4 | We automatically restart a component when it stops running, and provide a contract for the component to 5 | communicate with the supervisor to signal when a situation arises that requires a restart. 6 | 7 | 8 | ## Use 9 | 10 | For each component that needs to be registered with supervision, we will instantiate a `Supervisor` object to do supervision for that component. 11 | The supervisor should be initialized by passing in an object that implements the `Manageable` trait. To start the component, 12 | call the spawn method and await. 13 | ```asm 14 | let supervisor = Supervisor::new(stream_component); 15 | 16 | supervisor.spawn().await? 17 | ``` 18 | 19 | A user-provided async function will be called by the supervisor to start a tokio task. We wrap the tokio task with communication channels, and one end of the channels will be passed into the user-provided function which will run the component, and the other ends of the channels will be held by the supervisor to receive irrecoverable error messages and to send cancellation signals to the tokio task. 20 | 21 | In order to have a component that gets managed, a user needs to implement the following async functions on their struct to be implementing the Manageable trait: 22 | 23 | 1. start - This function is responsible for launching the tokio task that needs parental supervision and panic-tolerance. It will have as input a channel sender for passing irrecoverable errors to the supervisor, as well as a channel receiver that will be listened to after sending a message to the supervisor to receive an ack that the message was received. The function should then return on reception of the cancellation signal. 24 | 2. handle_irrecoverable - This function will be called upon receiving an irrecoverable error. This allows the user to decide what to log or alert, and what actions to take, if any, for this particular component in terms of resource cleanup. After this function is called, the component is restarted again via the start function above. 25 | 26 | After these functions are implemented, a user can create a Supervisor object with the constructor and then call spawn on the supervisor. This will run the supervision on the component and will consistently ensure that it restarted after any irrecoverable errors. 27 | 28 | Note that since we never expect these tasks to complete, the user does not need to call join on these handles created in the start function. The constantly running supervision of the task will also handle the event that the async task execution completes by ending supervision, although this is not an expected use case. 29 | 30 | 31 | ## Example 32 | 33 | The executable example is used as an integration test for this library. To run 34 | 35 | You can run it with 36 | ``` 37 | cargo run --example stream_example 38 | ``` 39 | and then ctrl + c to stop the component. 40 | 41 | This should generate the following output. 42 | ``` 43 | starting component task 44 | Received irrecoverable error: missing something required 45 | starting component task 46 | terminating component task 47 | ``` 48 | -------------------------------------------------------------------------------- /crates/component/examples/stream_example.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2022, Mysten Labs, Inc. 2 | // SPDX-License-Identifier: Apache-2.0 3 | 4 | extern crate component; 5 | 6 | use async_trait::async_trait; 7 | use component::{IrrecoverableError, Manageable, Supervisor}; 8 | use eyre::eyre; 9 | use std::cmp::min; 10 | use std::sync::Once; 11 | use tokio::sync::mpsc::Sender; 12 | use tokio::sync::oneshot::Receiver as oneshotReceiver; 13 | use tokio::task::JoinHandle; 14 | 15 | static mut SHOULD_FAIL: bool = true; 16 | static FIXER: Once = Once::new(); 17 | 18 | fn fix() { 19 | FIXER.call_once(|| unsafe { 20 | SHOULD_FAIL = false; 21 | }) 22 | } 23 | 24 | /// We create two structs, an empty struct as a component that will contain functions 25 | /// that create and fully encapsulate an instance of the actual type. 26 | /// An instance of the actual type needs to be instantiated inside the supervised task 27 | /// in order to have the correct lifetime. 28 | pub struct MockTcpStreamComponent {} 29 | 30 | pub struct MockTcpStream { 31 | read_data: Vec, 32 | } 33 | 34 | impl MockTcpStream { 35 | pub fn new() -> Self { 36 | let read_data = Vec::new(); 37 | MockTcpStream { read_data } 38 | } 39 | 40 | /// This function will fail on the first call and then succeed on preceding calls to create 41 | /// a situation where we have an irrecoverable error. 42 | fn mock_read(&self, buf: &mut [u8]) -> Result { 43 | // failure should happen once 44 | unsafe { 45 | if SHOULD_FAIL { 46 | fix(); 47 | return Result::Err(eyre!("Could not read from stream.")); 48 | } 49 | } 50 | 51 | let size: usize = min(self.read_data.len(), buf.len()); 52 | buf[..size].copy_from_slice(&self.read_data[..size]); 53 | Ok(size) 54 | } 55 | } 56 | 57 | impl Default for MockTcpStream { 58 | fn default() -> Self { 59 | Self::new() 60 | } 61 | } 62 | 63 | impl MockTcpStreamComponent { 64 | /// This is a function that should run continuously doing some operation, here we are 65 | /// continuously listening on a mocked TCP Stream. This is ultimately the function that we 66 | /// are supervising. 67 | /// Inside this function we first initialize any state that will be used in this component so 68 | /// that the scope is also correctly reset on a restart. 69 | /// 70 | /// This would be an excellent place to also add a scopeguard with a defer_panic so that if the 71 | /// component panics without a user caught irrecoverable error, a descriptive error message and/ 72 | /// or stacktrace can be forwarded to the supervisor. 73 | pub async fn listen( 74 | tx_irrecoverable: Sender, 75 | rx_cancellation: oneshotReceiver<()>, 76 | ) { 77 | // Initialize the concrete type 78 | let m_tcp = MockTcpStream::new(); 79 | 80 | loop { 81 | let mut buf = [0; 10]; 82 | match m_tcp.mock_read(&mut buf) { 83 | Ok(_) => {} // process 84 | Err(_) => { 85 | let e = eyre!("missing something required"); 86 | tx_irrecoverable 87 | .send(e) 88 | .await 89 | .expect("Could not send irrecoverable signal."); 90 | wait_for_cancellation(rx_cancellation).await; 91 | return; 92 | } 93 | }; 94 | } 95 | } 96 | } 97 | 98 | /// Wait for the cancellation signal in order to ensure that the message we sent to the 99 | /// supervisor was received before we return which causes the join handle to complete. If we 100 | /// were to return immediately, there would be no guarantee the message we send will be received. 101 | async fn wait_for_cancellation(rx_cancellation: oneshotReceiver<()>) { 102 | loop { 103 | tokio::select! { 104 | _ = rx_cancellation => { 105 | println!("terminating component task"); 106 | break; 107 | } 108 | } 109 | } 110 | } 111 | 112 | #[async_trait] 113 | impl Manageable for MockTcpStreamComponent { 114 | #[allow(clippy::async_yields_async)] 115 | /// The start function spawns a tokio task supplied with a function that 116 | /// should be constantly running. 117 | async fn start( 118 | &self, 119 | tx_irrecoverable: Sender, 120 | rx_cancellation: oneshotReceiver<()>, 121 | ) -> tokio::task::JoinHandle<()> { 122 | println!("starting component task"); 123 | let handle: JoinHandle<()> = tokio::spawn(Self::listen(tx_irrecoverable, rx_cancellation)); 124 | handle 125 | } 126 | 127 | /// Implement this function to log the error messages or take any task-specific action such as 128 | /// closing a file or terminating children tasks. 129 | fn handle_irrecoverable( 130 | &mut self, 131 | irrecoverable: IrrecoverableError, 132 | ) -> Result<(), eyre::Report> { 133 | println!("Received irrecoverable error: {irrecoverable}"); 134 | Ok(()) 135 | } 136 | } 137 | 138 | #[tokio::main] 139 | pub async fn main() -> Result<(), eyre::Report> { 140 | // Create a component 141 | let stream_component = MockTcpStreamComponent {}; 142 | 143 | // Create a supervisor for the component 144 | let supervisor = Supervisor::new(stream_component); 145 | 146 | // Spawn the supervisor to start the component and supervision. 147 | match supervisor.spawn().await { 148 | Ok(_) => {} 149 | Err(e) => println!("Got this error {:?}", e), 150 | }; 151 | Ok(()) 152 | } 153 | -------------------------------------------------------------------------------- /crates/component/src/lib.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2022, Mysten Labs, Inc. 2 | // SPDX-License-Identifier: Apache-2.0 3 | 4 | use async_trait::async_trait; 5 | use eyre::eyre; 6 | use tokio::sync::mpsc::{channel, Receiver, Sender}; 7 | use tokio::sync::oneshot::{ 8 | channel as oneshotChannel, Receiver as oneshotReceiver, Sender as oneshotSender, 9 | }; 10 | 11 | pub type IrrecoverableError = eyre::Report; 12 | type JoinHandle = tokio::task::JoinHandle<()>; 13 | 14 | static CHANNEL_SIZE: usize = 10; 15 | 16 | /// A Supervisor is instantiated to supervise a task that should always be running. 17 | /// A running supervisor will start a component task, and ensure that it is restarted 18 | /// if it ever stops. 19 | pub struct Supervisor { 20 | /// a message from our supervised component containing an error that we cannot recover 21 | /// receiving on this will trigger the component to restart 22 | irrecoverable_signal: Receiver, 23 | 24 | /// a signal to the supervised component that is implicitly sent after we receive a 25 | /// message on the channel above, which provides the supervised component with an "ack" 26 | cancellation_signal: Option>, 27 | 28 | /// the join handle of the tokio task that was spawned by the Manageable start method 29 | join_handle: Option, 30 | 31 | /// the Manageable trait object that contains functions to start the component and to 32 | /// handle_irrecoverable in the case that a restart is needed 33 | manageable: M, 34 | } 35 | 36 | /// In order to be Manageable, a user defines the following two functions: 37 | /// 38 | /// 1. A start function that launches a tokio task, as input it takes: 39 | /// - an irrecoverable error sender, on which the component sends information to the supervisor about 40 | /// an irrecoverable event that has occurred 41 | /// - a cancellation handle, which will be listened to in the task once an irrecoverable message 42 | /// has been sent, used as an "ack" that the message has been received and so the function can return 43 | /// 44 | /// 2. A handle_irrecoverable which takes actions on a relaunch due to an irrecoverable error 45 | /// that happened. It takes the error message that may contain a stack trace and other information 46 | /// that was sent to the Supervisor via the tx_irrecoverable passed into start. 47 | #[async_trait] 48 | pub trait Manageable { 49 | // The function that spawns a tokio task 50 | async fn start( 51 | &self, 52 | tx_irrecoverable: Sender, 53 | rx_cancellation: oneshotReceiver<()>, 54 | ) -> tokio::task::JoinHandle<()>; // Note the task is "silent" (returns nothing) 55 | 56 | // The function for cleanup after the task has encountered an irrecoverable error 57 | fn handle_irrecoverable( 58 | &mut self, 59 | irrecoverable: IrrecoverableError, 60 | ) -> Result<(), eyre::Report>; 61 | } 62 | 63 | impl Supervisor { 64 | /// Creates a new supervisor using a Manageable component. 65 | pub fn new(component: M) -> Self { 66 | let (_, tr_irrecoverable) = channel(CHANNEL_SIZE); 67 | Supervisor { 68 | irrecoverable_signal: tr_irrecoverable, 69 | cancellation_signal: None, 70 | join_handle: None, 71 | manageable: component, 72 | } 73 | } 74 | 75 | /// Spawn calls the start function of the Manageable component and runs supervision. 76 | pub async fn spawn(mut self) -> Result<(), eyre::Report> { 77 | let (tx_irrecoverable, tr_irrecoverable) = channel(CHANNEL_SIZE); 78 | let (tx_cancellation, tr_cancellation) = oneshotChannel(); 79 | 80 | // call Manageable start method 81 | let wrapped_handle = self 82 | .manageable 83 | .start(tx_irrecoverable, tr_cancellation) 84 | .await; 85 | 86 | self.irrecoverable_signal = tr_irrecoverable; 87 | self.cancellation_signal = Some(tx_cancellation); 88 | self.join_handle = Some(wrapped_handle); 89 | 90 | self.run().await 91 | } 92 | 93 | /// Run watches continuously for irrecoverable errors or JoinHandle completion. 94 | async fn run(mut self) -> Result<(), eyre::Report> { 95 | // select statement that listens for the following cases: 96 | // 97 | // Irrecoverable signal incoming => log, terminate and restart 98 | // completion of the task => already terminated, log and restart 99 | // 100 | // The handle_irrecoverable is run before the existing task gets 101 | // cancelled by restart in the case that an irrecoverable signal 102 | // was sent to us. This makes resource cleanup possible. 103 | 104 | loop { 105 | let mut message = eyre!("An unexpected shutdown was observed in a component."); 106 | tokio::select! { 107 | Some(m) = self.irrecoverable_signal.recv() => { 108 | message = m; 109 | }, 110 | 111 | // Poll the JoinHandle 112 | _result = self.join_handle.as_mut().unwrap(), if self.join_handle.is_some() => { 113 | // this could be due to an un-caught panic 114 | // we don't have a user-supplied message to log, so we use the generic one 115 | } 116 | } 117 | self.manageable.handle_irrecoverable(message)?; 118 | self.restart().await; 119 | } 120 | } 121 | 122 | async fn restart(&mut self) { 123 | // restart 124 | let (tx_irrecoverable, tr_irrecoverable) = channel(CHANNEL_SIZE); 125 | let (tx_cancellation, tr_cancellation) = oneshotChannel(); 126 | 127 | // call the start method 128 | let wrapped_handle: JoinHandle = self 129 | .manageable 130 | .start(tx_irrecoverable, tr_cancellation) 131 | .await; 132 | 133 | // reset the supervision handles & channel end points 134 | // dropping the old cancellation_signal implicitly sends cancellation by closing the channel 135 | self.irrecoverable_signal = tr_irrecoverable; 136 | self.cancellation_signal = Some(tx_cancellation); 137 | 138 | self.join_handle = Some(wrapped_handle); 139 | } 140 | } 141 | -------------------------------------------------------------------------------- /crates/mysten-network/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "mysten-network" 3 | version = "0.2.0" 4 | license = "Apache-2.0" 5 | edition = "2021" 6 | authors = ["Brandon Williams "] 7 | description = "Mysten's network tooling" 8 | repository = "https://github.com/mystenlabs/mysten-infra" 9 | publish = ["crates-io"] 10 | 11 | [dependencies] 12 | bincode = "1.3.3" 13 | bytes = "1.2.1" 14 | eyre = "0.6.8" 15 | futures = "0.3.21" 16 | http = "0.2.8" 17 | multiaddr = "0.16.0" 18 | serde = { version = "1.0.140", features = ["derive"] } 19 | tokio = { version = "1.21.2", features = ["sync", "rt", "macros"] } 20 | tokio-stream = { version = "0.1.11", features = ["net"] } 21 | tonic = { version = "0.8.2", features = ["transport"] } 22 | tonic-health = "0.7.0" 23 | tower = { version = "0.4.13", features = ["full"] } 24 | tower-http = { version = "0.3.4", features = ["trace", "set-header", "propagate-header"] } 25 | tracing = "0.1.37" 26 | -------------------------------------------------------------------------------- /crates/mysten-network/src/client.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2022, Mysten Labs, Inc. 2 | // SPDX-License-Identifier: Apache-2.0 3 | 4 | use crate::{ 5 | config::Config, 6 | multiaddr::{parse_dns, parse_ip4, parse_ip6}, 7 | }; 8 | use eyre::{eyre, Context, Result}; 9 | use multiaddr::{Multiaddr, Protocol}; 10 | use tonic::transport::{Channel, Endpoint, Uri}; 11 | 12 | pub async fn connect(address: &Multiaddr) -> Result { 13 | let channel = endpoint_from_multiaddr(address)?.connect().await?; 14 | Ok(channel) 15 | } 16 | 17 | pub fn connect_lazy(address: &Multiaddr) -> Result { 18 | let channel = endpoint_from_multiaddr(address)?.connect_lazy(); 19 | Ok(channel) 20 | } 21 | 22 | pub(crate) async fn connect_with_config(address: &Multiaddr, config: &Config) -> Result { 23 | let channel = endpoint_from_multiaddr(address)? 24 | .apply_config(config) 25 | .connect() 26 | .await?; 27 | Ok(channel) 28 | } 29 | 30 | pub(crate) fn connect_lazy_with_config(address: &Multiaddr, config: &Config) -> Result { 31 | let channel = endpoint_from_multiaddr(address)? 32 | .apply_config(config) 33 | .connect_lazy(); 34 | Ok(channel) 35 | } 36 | 37 | fn endpoint_from_multiaddr(addr: &Multiaddr) -> Result { 38 | let mut iter = addr.iter(); 39 | 40 | let channel = match iter.next().ok_or_else(|| eyre!("address is empty"))? { 41 | Protocol::Dns(_) => { 42 | let (dns_name, tcp_port, http_or_https) = parse_dns(addr)?; 43 | let uri = format!("{http_or_https}://{dns_name}:{tcp_port}"); 44 | MyEndpoint::try_from_uri(uri)? 45 | } 46 | Protocol::Ip4(_) => { 47 | let (socket_addr, http_or_https) = parse_ip4(addr)?; 48 | let uri = format!("{http_or_https}://{socket_addr}"); 49 | MyEndpoint::try_from_uri(uri)? 50 | } 51 | Protocol::Ip6(_) => { 52 | let (socket_addr, http_or_https) = parse_ip6(addr)?; 53 | let uri = format!("{http_or_https}://{socket_addr}"); 54 | MyEndpoint::try_from_uri(uri)? 55 | } 56 | // Protocol::Memory(_) => todo!(), 57 | #[cfg(unix)] 58 | Protocol::Unix(_) => { 59 | let (path, http_or_https) = crate::multiaddr::parse_unix(addr)?; 60 | let uri = format!("{http_or_https}://localhost"); 61 | MyEndpoint::try_from_uri(uri)?.with_uds_connector(path.as_ref().into()) 62 | } 63 | unsupported => return Err(eyre!("unsupported protocol {unsupported}")), 64 | }; 65 | 66 | Ok(channel) 67 | } 68 | 69 | struct MyEndpoint { 70 | endpoint: Endpoint, 71 | #[cfg(unix)] 72 | uds_connector: Option, 73 | } 74 | 75 | impl MyEndpoint { 76 | fn new(endpoint: Endpoint) -> Self { 77 | Self { 78 | endpoint, 79 | #[cfg(unix)] 80 | uds_connector: None, 81 | } 82 | } 83 | 84 | fn try_from_uri(uri: String) -> Result { 85 | let uri: Uri = uri 86 | .parse() 87 | .with_context(|| format!("unable to create Uri from '{uri}'"))?; 88 | let endpoint = Endpoint::from(uri); 89 | Ok(Self::new(endpoint)) 90 | } 91 | 92 | #[cfg(unix)] 93 | fn with_uds_connector(self, path: std::path::PathBuf) -> Self { 94 | Self { 95 | endpoint: self.endpoint, 96 | uds_connector: Some(path), 97 | } 98 | } 99 | 100 | fn apply_config(mut self, config: &Config) -> Self { 101 | self.endpoint = apply_config_to_endpoint(config, self.endpoint); 102 | self 103 | } 104 | 105 | fn connect_lazy(self) -> Channel { 106 | #[cfg(unix)] 107 | if let Some(path) = self.uds_connector { 108 | return self 109 | .endpoint 110 | .connect_with_connector_lazy(tower::service_fn(move |_: Uri| { 111 | let path = path.clone(); 112 | 113 | // Connect to a Uds socket 114 | tokio::net::UnixStream::connect(path) 115 | })); 116 | } 117 | 118 | self.endpoint.connect_lazy() 119 | } 120 | 121 | async fn connect(self) -> Result { 122 | #[cfg(unix)] 123 | if let Some(path) = self.uds_connector { 124 | return self 125 | .endpoint 126 | .connect_with_connector(tower::service_fn(move |_: Uri| { 127 | let path = path.clone(); 128 | 129 | // Connect to a Uds socket 130 | tokio::net::UnixStream::connect(path) 131 | })) 132 | .await 133 | .map_err(Into::into); 134 | } 135 | 136 | self.endpoint.connect().await.map_err(Into::into) 137 | } 138 | } 139 | 140 | fn apply_config_to_endpoint(config: &Config, mut endpoint: Endpoint) -> Endpoint { 141 | if let Some(limit) = config.concurrency_limit_per_connection { 142 | endpoint = endpoint.concurrency_limit(limit); 143 | } 144 | 145 | if let Some(timeout) = config.request_timeout { 146 | endpoint = endpoint.timeout(timeout); 147 | } 148 | 149 | if let Some(timeout) = config.connect_timeout { 150 | endpoint = endpoint.connect_timeout(timeout); 151 | } 152 | 153 | if let Some(tcp_nodelay) = config.tcp_nodelay { 154 | endpoint = endpoint.tcp_nodelay(tcp_nodelay); 155 | } 156 | 157 | if let Some(http2_keepalive_interval) = config.http2_keepalive_interval { 158 | endpoint = endpoint.http2_keep_alive_interval(http2_keepalive_interval); 159 | } 160 | 161 | if let Some(http2_keepalive_timeout) = config.http2_keepalive_timeout { 162 | endpoint = endpoint.keep_alive_timeout(http2_keepalive_timeout); 163 | } 164 | 165 | if let Some((limit, duration)) = config.rate_limit { 166 | endpoint = endpoint.rate_limit(limit, duration); 167 | } 168 | 169 | endpoint 170 | .initial_stream_window_size(config.http2_initial_stream_window_size) 171 | .initial_connection_window_size(config.http2_initial_connection_window_size) 172 | .tcp_keepalive(config.tcp_keepalive) 173 | } 174 | -------------------------------------------------------------------------------- /crates/mysten-network/src/codec.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2022, Mysten Labs, Inc. 2 | // SPDX-License-Identifier: Apache-2.0 3 | 4 | use bytes::{Buf, BufMut}; 5 | use std::marker::PhantomData; 6 | use tonic::{ 7 | codec::{Codec, DecodeBuf, Decoder, EncodeBuf, Encoder}, 8 | Status, 9 | }; 10 | 11 | #[derive(Debug)] 12 | pub struct BincodeEncoder(PhantomData); 13 | 14 | impl Encoder for BincodeEncoder { 15 | type Item = T; 16 | type Error = Status; 17 | 18 | fn encode(&mut self, item: Self::Item, buf: &mut EncodeBuf<'_>) -> Result<(), Self::Error> { 19 | bincode::serialize_into(buf.writer(), &item).map_err(|e| Status::internal(e.to_string())) 20 | } 21 | } 22 | 23 | #[derive(Debug)] 24 | pub struct BincodeDecoder(PhantomData); 25 | 26 | impl Decoder for BincodeDecoder { 27 | type Item = U; 28 | type Error = Status; 29 | 30 | fn decode(&mut self, buf: &mut DecodeBuf<'_>) -> Result, Self::Error> { 31 | if !buf.has_remaining() { 32 | return Ok(None); 33 | } 34 | 35 | let chunk = buf.chunk(); 36 | 37 | let item: Self::Item = 38 | bincode::deserialize(chunk).map_err(|e| Status::internal(e.to_string()))?; 39 | buf.advance(chunk.len()); 40 | 41 | Ok(Some(item)) 42 | } 43 | } 44 | 45 | /// A [`Codec`] that implements `application/grpc+bincode` via the serde library. 46 | #[derive(Debug, Clone)] 47 | pub struct BincodeCodec(PhantomData<(T, U)>); 48 | 49 | impl Default for BincodeCodec { 50 | fn default() -> Self { 51 | Self(PhantomData) 52 | } 53 | } 54 | 55 | impl Codec for BincodeCodec 56 | where 57 | T: serde::Serialize + Send + 'static, 58 | U: serde::de::DeserializeOwned + Send + 'static, 59 | { 60 | type Encode = T; 61 | type Decode = U; 62 | type Encoder = BincodeEncoder; 63 | type Decoder = BincodeDecoder; 64 | 65 | fn encoder(&mut self) -> Self::Encoder { 66 | BincodeEncoder(PhantomData) 67 | } 68 | 69 | fn decoder(&mut self) -> Self::Decoder { 70 | BincodeDecoder(PhantomData) 71 | } 72 | } 73 | -------------------------------------------------------------------------------- /crates/mysten-network/src/config.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2022, Mysten Labs, Inc. 2 | // SPDX-License-Identifier: Apache-2.0 3 | use crate::metrics::{DefaultMetricsCallbackProvider, MetricsCallbackProvider}; 4 | use crate::{ 5 | client::{connect_lazy_with_config, connect_with_config}, 6 | server::ServerBuilder, 7 | }; 8 | use eyre::Result; 9 | use multiaddr::Multiaddr; 10 | use serde::{Deserialize, Serialize}; 11 | use std::time::Duration; 12 | use tonic::transport::Channel; 13 | 14 | #[derive(Debug, Default, Deserialize, Serialize)] 15 | pub struct Config { 16 | /// Set the concurrency limit applied to on requests inbound per connection. 17 | pub concurrency_limit_per_connection: Option, 18 | 19 | /// Set a timeout for all request handlers. 20 | pub request_timeout: Option, 21 | 22 | /// Set a timeout for establishing an outbound connection. 23 | pub connect_timeout: Option, 24 | 25 | /// Sets the SETTINGS_INITIAL_WINDOW_SIZE option for HTTP2 stream-level flow control. 26 | /// Default is 65,535 27 | pub http2_initial_stream_window_size: Option, 28 | 29 | /// Sets the max connection-level flow control for HTTP2 30 | /// 31 | /// Default is 65,535 32 | pub http2_initial_connection_window_size: Option, 33 | 34 | /// Sets the SETTINGS_MAX_CONCURRENT_STREAMS option for HTTP2 connections. 35 | /// 36 | /// Default is no limit (None). 37 | pub http2_max_concurrent_streams: Option, 38 | 39 | /// Set whether TCP keepalive messages are enabled on accepted connections. 40 | /// 41 | /// If None is specified, keepalive is disabled, otherwise the duration specified will be the 42 | /// time to remain idle before sending TCP keepalive probes. 43 | /// 44 | /// Default is no keepalive (None) 45 | pub tcp_keepalive: Option, 46 | 47 | /// Set the value of TCP_NODELAY option for accepted connections. Enabled by default. 48 | pub tcp_nodelay: Option, 49 | 50 | /// Set whether HTTP2 Ping frames are enabled on accepted connections. 51 | /// 52 | /// If None is specified, HTTP2 keepalive is disabled, otherwise the duration specified will be 53 | /// the time interval between HTTP2 Ping frames. The timeout for receiving an acknowledgement 54 | /// of the keepalive ping can be set with http2_keepalive_timeout. 55 | /// 56 | /// Default is no HTTP2 keepalive (None) 57 | pub http2_keepalive_interval: Option, 58 | 59 | /// Sets a timeout for receiving an acknowledgement of the keepalive ping. 60 | /// 61 | /// If the ping is not acknowledged within the timeout, the connection will be closed. Does nothing 62 | /// if http2_keep_alive_interval is disabled. 63 | /// 64 | /// Default is 20 seconds. 65 | pub http2_keepalive_timeout: Option, 66 | 67 | // Only affects servers 68 | pub load_shed: Option, 69 | 70 | /// Only affects clients 71 | pub rate_limit: Option<(u64, Duration)>, 72 | 73 | // Only affects servers 74 | pub global_concurrency_limit: Option, 75 | } 76 | 77 | impl Config { 78 | pub fn new() -> Self { 79 | Default::default() 80 | } 81 | 82 | pub fn server_builder(&self) -> ServerBuilder { 83 | ServerBuilder::from_config(self, DefaultMetricsCallbackProvider::default()) 84 | } 85 | 86 | pub fn server_builder_with_metrics(&self, metrics_provider: M) -> ServerBuilder 87 | where 88 | M: MetricsCallbackProvider, 89 | { 90 | ServerBuilder::from_config(self, metrics_provider) 91 | } 92 | 93 | pub async fn connect(&self, addr: &Multiaddr) -> Result { 94 | connect_with_config(addr, self).await 95 | } 96 | 97 | pub fn connect_lazy(&self, addr: &Multiaddr) -> Result { 98 | connect_lazy_with_config(addr, self) 99 | } 100 | } 101 | -------------------------------------------------------------------------------- /crates/mysten-network/src/lib.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2022, Mysten Labs, Inc. 2 | // SPDX-License-Identifier: Apache-2.0 3 | pub mod client; 4 | pub mod codec; 5 | pub mod config; 6 | pub mod metrics; 7 | pub mod multiaddr; 8 | pub mod server; 9 | -------------------------------------------------------------------------------- /crates/mysten-network/src/metrics.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2022, Mysten Labs, Inc. 2 | // SPDX-License-Identifier: Apache-2.0 3 | use std::time::Duration; 4 | use tonic::codegen::http::header::HeaderName; 5 | use tonic::codegen::http::{HeaderValue, Request, Response}; 6 | use tonic::{Code, Status}; 7 | use tower_http::classify::GrpcFailureClass; 8 | use tower_http::trace::{OnFailure, OnRequest, OnResponse}; 9 | use tracing::Span; 10 | 11 | pub(crate) static GRPC_ENDPOINT_PATH_HEADER: HeaderName = HeaderName::from_static("grpc-path-req"); 12 | 13 | /// The trait to be implemented when want to be notified about 14 | /// a new request and related metrics around it. When a request 15 | /// is performed (up to the point that a response is created) the 16 | /// on_response method is called with the corresponding metrics 17 | /// details. The on_request method will be called when the request 18 | /// is received, but not further processing has happened at this 19 | /// point. 20 | pub trait MetricsCallbackProvider: Send + Sync + Clone + 'static { 21 | /// Method will be called when a request has been received. 22 | /// `path`: the endpoint uri path 23 | fn on_request(&self, path: String); 24 | 25 | /// Method to be called from the server when a request is performed. 26 | /// `path`: the endpoint uri path 27 | /// `latency`: the time when the request was received and when the response was created 28 | /// `status`: the http status code of the response 29 | /// `grpc_status_code`: the grpc status code (see https://github.com/grpc/grpc/blob/master/doc/statuscodes.md#status-codes-and-their-use-in-grpc) 30 | fn on_response(&self, path: String, latency: Duration, status: u16, grpc_status_code: Code); 31 | 32 | /// Called when request call is started 33 | fn on_start(&self, _path: &str) {} 34 | 35 | /// Called when request call is dropped. 36 | /// It is guaranteed that for each on_start there will be corresponding on_drop 37 | fn on_drop(&self, _path: &str) {} 38 | } 39 | 40 | #[derive(Clone, Default)] 41 | pub struct DefaultMetricsCallbackProvider {} 42 | impl MetricsCallbackProvider for DefaultMetricsCallbackProvider { 43 | fn on_request(&self, _path: String) {} 44 | 45 | fn on_response( 46 | &self, 47 | _path: String, 48 | _latency: Duration, 49 | _status: u16, 50 | _grpc_status_code: Code, 51 | ) { 52 | } 53 | } 54 | 55 | #[derive(Clone)] 56 | pub(crate) struct MetricsHandler { 57 | metrics_provider: M, 58 | } 59 | 60 | impl MetricsHandler { 61 | pub(crate) fn new(metrics_provider: M) -> Self { 62 | Self { metrics_provider } 63 | } 64 | } 65 | 66 | impl OnResponse for MetricsHandler { 67 | fn on_response(self, response: &Response, latency: Duration, _span: &Span) { 68 | let grpc_status = Status::from_header_map(response.headers()); 69 | let grpc_status_code = grpc_status.map_or(Code::Ok, |s| s.code()); 70 | 71 | let path: HeaderValue = response 72 | .headers() 73 | .get(&GRPC_ENDPOINT_PATH_HEADER) 74 | .unwrap() 75 | .clone(); 76 | 77 | self.metrics_provider.on_response( 78 | path.to_str().unwrap().to_string(), 79 | latency, 80 | response.status().as_u16(), 81 | grpc_status_code, 82 | ); 83 | } 84 | } 85 | 86 | impl OnRequest for MetricsHandler { 87 | fn on_request(&mut self, request: &Request, _span: &Span) { 88 | self.metrics_provider 89 | .on_request(request.uri().path().to_string()); 90 | } 91 | } 92 | 93 | impl OnFailure for MetricsHandler { 94 | fn on_failure( 95 | &mut self, 96 | _failure_classification: GrpcFailureClass, 97 | _latency: Duration, 98 | _span: &Span, 99 | ) { 100 | // just do nothing for now so we avoid printing unnecessary logs 101 | } 102 | } 103 | -------------------------------------------------------------------------------- /crates/mysten-network/src/multiaddr.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2022, Mysten Labs, Inc. 2 | // SPDX-License-Identifier: Apache-2.0 3 | 4 | use eyre::{eyre, Result}; 5 | use multiaddr::{Multiaddr, Protocol}; 6 | use std::{ 7 | borrow::Cow, 8 | net::{IpAddr, SocketAddr}, 9 | }; 10 | 11 | // Converts a /ip{4,6}/-/tcp/-[/-] Multiaddr to SocketAddr. 12 | // Useful when an external library only accepts SocketAddr, e.g. to start a local server. 13 | // See `client::endpoint_from_multiaddr()` for converting to Endpoint for clients. 14 | pub fn to_socket_addr(addr: &Multiaddr) -> Result { 15 | let mut iter = addr.iter(); 16 | let ip = match iter 17 | .next() 18 | .ok_or_else(|| eyre!("failed to convert to SocketAddr: Multiaddr does not contain IP"))? 19 | { 20 | Protocol::Ip4(ip4_addr) => IpAddr::V4(ip4_addr), 21 | Protocol::Ip6(ip6_addr) => IpAddr::V6(ip6_addr), 22 | unsupported => return Err(eyre!("unsupported protocol {unsupported}")), 23 | }; 24 | let tcp_port = parse_tcp(&mut iter)?; 25 | Ok(SocketAddr::new(ip, tcp_port)) 26 | } 27 | 28 | pub(crate) fn parse_tcp<'a, T: Iterator>>(protocols: &mut T) -> Result { 29 | if let Protocol::Tcp(port) = protocols 30 | .next() 31 | .ok_or_else(|| eyre!("unexpected end of multiaddr"))? 32 | { 33 | Ok(port) 34 | } else { 35 | Err(eyre!("expected tcp protocol")) 36 | } 37 | } 38 | 39 | pub(crate) fn parse_http_https<'a, T: Iterator>>( 40 | protocols: &mut T, 41 | ) -> Result<&'static str> { 42 | match protocols 43 | .next() 44 | .ok_or_else(|| eyre!("unexpected end of multiaddr"))? 45 | { 46 | Protocol::Http => Ok("http"), 47 | Protocol::Https => Ok("https"), 48 | _ => Err(eyre!("expected http/https protocol")), 49 | } 50 | } 51 | 52 | pub(crate) fn parse_end<'a, T: Iterator>>(protocols: &mut T) -> Result<()> { 53 | if protocols.next().is_none() { 54 | Ok(()) 55 | } else { 56 | Err(eyre!("expected end of multiaddr")) 57 | } 58 | } 59 | 60 | // Parse a full /dns/-/tcp/-/{http,https} address 61 | pub(crate) fn parse_dns(address: &Multiaddr) -> Result<(Cow<'_, str>, u16, &'static str)> { 62 | let mut iter = address.iter(); 63 | 64 | let dns_name = match iter 65 | .next() 66 | .ok_or_else(|| eyre!("unexpected end of multiaddr"))? 67 | { 68 | Protocol::Dns(dns_name) => dns_name, 69 | other => return Err(eyre!("expected dns found {other}")), 70 | }; 71 | let tcp_port = parse_tcp(&mut iter)?; 72 | let http_or_https = parse_http_https(&mut iter)?; 73 | parse_end(&mut iter)?; 74 | Ok((dns_name, tcp_port, http_or_https)) 75 | } 76 | 77 | // Parse a full /ip4/-/tcp/-/{http,https} address 78 | pub(crate) fn parse_ip4(address: &Multiaddr) -> Result<(SocketAddr, &'static str)> { 79 | let mut iter = address.iter(); 80 | 81 | let ip_addr = match iter 82 | .next() 83 | .ok_or_else(|| eyre!("unexpected end of multiaddr"))? 84 | { 85 | Protocol::Ip4(ip4_addr) => IpAddr::V4(ip4_addr), 86 | other => return Err(eyre!("expected ip4 found {other}")), 87 | }; 88 | let tcp_port = parse_tcp(&mut iter)?; 89 | let http_or_https = parse_http_https(&mut iter)?; 90 | parse_end(&mut iter)?; 91 | let socket_addr = SocketAddr::new(ip_addr, tcp_port); 92 | 93 | Ok((socket_addr, http_or_https)) 94 | } 95 | 96 | // Parse a full /ip6/-/tcp/-/{http,https} address 97 | pub(crate) fn parse_ip6(address: &Multiaddr) -> Result<(SocketAddr, &'static str)> { 98 | let mut iter = address.iter(); 99 | 100 | let ip_addr = match iter 101 | .next() 102 | .ok_or_else(|| eyre!("unexpected end of multiaddr"))? 103 | { 104 | Protocol::Ip6(ip6_addr) => IpAddr::V6(ip6_addr), 105 | other => return Err(eyre!("expected ip6 found {other}")), 106 | }; 107 | let tcp_port = parse_tcp(&mut iter)?; 108 | let http_or_https = parse_http_https(&mut iter)?; 109 | parse_end(&mut iter)?; 110 | let socket_addr = SocketAddr::new(ip_addr, tcp_port); 111 | 112 | Ok((socket_addr, http_or_https)) 113 | } 114 | 115 | // Parse a full /unix/-/{http,https} address 116 | #[cfg(unix)] 117 | pub(crate) fn parse_unix(address: &Multiaddr) -> Result<(Cow<'_, str>, &'static str)> { 118 | let mut iter = address.iter(); 119 | 120 | let path = match iter 121 | .next() 122 | .ok_or_else(|| eyre!("unexpected end of multiaddr"))? 123 | { 124 | Protocol::Unix(path) => path, 125 | other => return Err(eyre!("expected unix found {other}")), 126 | }; 127 | let http_or_https = parse_http_https(&mut iter)?; 128 | parse_end(&mut iter)?; 129 | 130 | Ok((path, http_or_https)) 131 | } 132 | 133 | #[cfg(test)] 134 | mod test { 135 | use super::to_socket_addr; 136 | use multiaddr::multiaddr; 137 | 138 | #[test] 139 | fn test_to_socket_addr_basic() { 140 | let multi_addr_ipv4 = multiaddr!(Ip4([127, 0, 0, 1]), Tcp(10500u16)); 141 | let socket_addr_ipv4 = 142 | to_socket_addr(&multi_addr_ipv4).expect("Couldn't convert to socket addr"); 143 | assert_eq!(socket_addr_ipv4.to_string(), "127.0.0.1:10500"); 144 | 145 | let multi_addr_ipv6 = multiaddr!(Ip6([172, 0, 0, 1, 1, 1, 1, 1]), Tcp(10500u16)); 146 | let socket_addr_ipv6 = 147 | to_socket_addr(&multi_addr_ipv6).expect("Couldn't convert to socket addr"); 148 | assert_eq!(socket_addr_ipv6.to_string(), "[ac::1:1:1:1:1]:10500"); 149 | } 150 | 151 | #[test] 152 | fn test_to_socket_addr_unsupported_protocol() { 153 | let multi_addr_dns = multiaddr!(Dnsaddr("mysten.sui"), Tcp(10500u16)); 154 | let _ = to_socket_addr(&multi_addr_dns).expect_err("DNS is unsupported"); 155 | } 156 | } 157 | -------------------------------------------------------------------------------- /crates/mysten-util-mem-derive/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "mysten-util-mem-derive" 3 | version = "0.1.0" 4 | authors = ["Parity Technologies "] 5 | license = "MIT OR Apache-2.0" 6 | description = "Crate for memory reporting" 7 | repository = "https://github.com/mystenlabs/mysten-infra" 8 | edition = "2021" 9 | rust-version = "1.56.1" 10 | publish = false 11 | 12 | [lib] 13 | path = "lib.rs" 14 | proc-macro = true 15 | 16 | [dependencies] 17 | proc-macro2 = "1.0.47" 18 | syn = { version = "1.0.102", features = ["full"] } 19 | synstructure = "0.12" 20 | -------------------------------------------------------------------------------- /crates/mysten-util-mem-derive/lib.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2022, Mysten Labs, Inc. 2 | // SPDX-License-Identifier: Apache-2.0 3 | 4 | // Copyright 2020 Parity Technologies 5 | // 6 | // Licensed under the Apache License, Version 2.0 or the MIT license 8 | // , at your 9 | // option. This file may not be copied, modified, or distributed 10 | // except according to those terms. 11 | 12 | //! A crate for deriving the MallocSizeOf trait. 13 | //! 14 | //! This is a copy of Servo malloc_size_of_derive code, modified to work with 15 | //! our `mysten_util_mem` library 16 | 17 | #![allow(clippy::all)] 18 | 19 | extern crate proc_macro2; 20 | #[macro_use] 21 | extern crate syn; 22 | #[macro_use] 23 | extern crate synstructure; 24 | 25 | decl_derive!([MallocSizeOf, attributes(ignore_malloc_size_of)] => malloc_size_of_derive); 26 | 27 | fn malloc_size_of_derive(s: synstructure::Structure) -> proc_macro2::TokenStream { 28 | let match_body = s.each(|binding| { 29 | let ignore = binding 30 | .ast() 31 | .attrs 32 | .iter() 33 | .any(|attr| match attr.parse_meta().unwrap() { 34 | syn::Meta::Path(ref path) | syn::Meta::List(syn::MetaList { ref path, .. }) 35 | if path.is_ident("ignore_malloc_size_of") => 36 | { 37 | panic!( 38 | "#[ignore_malloc_size_of] should have an explanation, \ 39 | e.g. #[ignore_malloc_size_of = \"because reasons\"]" 40 | ); 41 | } 42 | syn::Meta::NameValue(syn::MetaNameValue { ref path, .. }) 43 | if path.is_ident("ignore_malloc_size_of") => 44 | { 45 | true 46 | } 47 | _ => false, 48 | }); 49 | if ignore { 50 | None 51 | } else if let syn::Type::Array(..) = binding.ast().ty { 52 | Some(quote! { 53 | for item in #binding.iter() { 54 | sum += mysten_util_mem::MallocSizeOf::size_of(item, ops); 55 | } 56 | }) 57 | } else { 58 | Some(quote! { 59 | sum += mysten_util_mem::MallocSizeOf::size_of(#binding, ops); 60 | }) 61 | } 62 | }); 63 | 64 | let ast = s.ast(); 65 | let name = &ast.ident; 66 | let (impl_generics, ty_generics, where_clause) = ast.generics.split_for_impl(); 67 | let mut where_clause = where_clause.unwrap_or(&parse_quote!(where)).clone(); 68 | for param in ast.generics.type_params() { 69 | let ident = ¶m.ident; 70 | where_clause 71 | .predicates 72 | .push(parse_quote!(#ident: mysten_util_mem::MallocSizeOf)); 73 | } 74 | 75 | let tokens = quote! { 76 | impl #impl_generics mysten_util_mem::MallocSizeOf for #name #ty_generics #where_clause { 77 | #[inline] 78 | #[allow(unused_variables, unused_mut, unreachable_code)] 79 | fn size_of(&self, ops: &mut mysten_util_mem::MallocSizeOfOps) -> usize { 80 | let mut sum = 0; 81 | match *self { 82 | #match_body 83 | } 84 | sum 85 | } 86 | } 87 | }; 88 | 89 | tokens 90 | } 91 | -------------------------------------------------------------------------------- /crates/mysten-util-mem/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "mysten-util-mem" 3 | version = "0.11.0" 4 | authors = ["Parity Technologies ", "Andrew Schran "] 5 | repository = "https://github.com/mystenlabs/mysten-infra" 6 | description = "Collection of memory related utilities" 7 | license = "MIT OR Apache-2.0" 8 | edition = "2021" 9 | rust-version = "1.56.1" 10 | publish = false 11 | 12 | [dependencies] 13 | cfg-if = "1.0.0" 14 | hashbrown = { version = "0.12", optional = true } 15 | mysten-util-mem-derive = { path = "../mysten-util-mem-derive", version = "0.1" } 16 | impl-trait-for-tuples = "0.2.0" 17 | fastcrypto = { git = "https://github.com/MystenLabs/fastcrypto", rev = "12c522bea01e4a3d19ecae63d789edcdcce2d66c" } 18 | indexmap = { version = "1.9.1", features = ["serde"] } 19 | roaring = "0.10.0" 20 | ed25519-consensus = { version = "2.0.1", features = ["serde"] } 21 | 22 | smallvec = { version = "1.10.0", optional = true } 23 | parking_lot = { version = "0.12.0", optional = true } 24 | once_cell = "1.16.0" 25 | 26 | [features] 27 | default = ["std", "hashbrown", "smallvec", "estimate-heapsize"] 28 | std = ["parking_lot"] 29 | # Full estimate: no call to allocator 30 | estimate-heapsize = [] 31 | -------------------------------------------------------------------------------- /crates/mysten-util-mem/README.md: -------------------------------------------------------------------------------- 1 | # mysten-util-mem 2 | 3 | This crate provides tools for measuring the heap memory usage of specific structures. 4 | 5 | ## Annotating types with `MallocSizeOf` trait 6 | 7 | To measure your struct's memory usage, it and all of its child types must implement the `MallocSizeOf` trait. 8 | 9 | For types that are local to your crate, this is really easy. Just add: 10 | 11 | ```rust 12 | #[derive(MallocSizeOf)] 13 | ``` 14 | 15 | For external types, you'll need to implement the trait here in [`external_impls.rs`](https://github.com/MystenLabs/mysten-infra/blob/main/crates/mysten-util-mem/src/external_impls.rs). See the existing implementations in that file for examples. 16 | 17 | Note that `size_of` should return only the number of **heap-allocated bytes** used by the type. For example, a type such as `struct MyStruct([u8; 128])` would return **zero**, not 128. Recursive accounting for heap-allocated memory when your struct is part of e.g. a `Vec` or `HashMap` is already taken care of by the implementations of `MallocSizeOf` on those collection types. 18 | 19 | Oftentimes, the public interface of the type you are measuring does not provide enough information to precisely measure the amount of heap space it allocates. In that case, you can try just to produce a reasonable estimate. 20 | 21 | ## Measuring memory usage 22 | 23 | To compute the heap usage of an annotated type at runtime, simply call `mysten_util_mem::malloc_size(&my_struct)`. For complete memory usage, add in the inline size of the type as well, as in: 24 | 25 | ```rust 26 | mysten_util_mem::malloc_size(&my_struct) + std::mem::size_of::() 27 | ``` 28 | 29 | ## Putting it all together 30 | 31 | For an example PR using library, take a look at https://github.com/MystenLabs/narwhal/pull/898/files. 32 | -------------------------------------------------------------------------------- /crates/mysten-util-mem/src/allocators.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2022, Mysten Labs, Inc. 2 | // SPDX-License-Identifier: Apache-2.0 3 | 4 | // Copyright 2020 Parity Technologies 5 | // 6 | // Licensed under the Apache License, Version 2.0 or the MIT license 8 | // , at your 9 | // option. This file may not be copied, modified, or distributed 10 | // except according to those terms. 11 | 12 | #[cfg(feature = "std")] 13 | use crate::malloc_size::MallocUnconditionalSizeOf; 14 | use crate::malloc_size::{MallocSizeOf, MallocSizeOfOps, VoidPtrToSizeFn}; 15 | #[cfg(not(feature = "std"))] 16 | use core::ffi::c_void; 17 | #[cfg(feature = "std")] 18 | use std::os::raw::c_void; 19 | 20 | mod usable_size { 21 | 22 | use super::*; 23 | 24 | cfg_if::cfg_if! { 25 | 26 | if #[cfg(any( 27 | target_arch = "wasm32", 28 | feature = "estimate-heapsize", 29 | ))] { 30 | 31 | // do not try system allocator 32 | 33 | /// Warning this is for compatibility only. 34 | /// This function does panic: `estimate-heapsize` feature needs to be activated 35 | /// to avoid this function call. 36 | pub unsafe extern "C" fn malloc_usable_size(_ptr: *const c_void) -> usize { 37 | unreachable!("estimate heapsize only") 38 | } 39 | 40 | } else if #[cfg(any( 41 | target_os = "linux", 42 | target_os = "android", 43 | target_os = "freebsd", 44 | ))] { 45 | // Linux/BSD call system allocator (currently malloc). 46 | extern "C" { 47 | pub fn malloc_usable_size(ptr: *const c_void) -> usize; 48 | } 49 | 50 | } else { 51 | // default allocator for non linux or windows system use estimate 52 | pub unsafe extern "C" fn malloc_usable_size(_ptr: *const c_void) -> usize { 53 | unreachable!("estimate heapsize or feature allocator needed") 54 | } 55 | 56 | } 57 | 58 | } 59 | 60 | /// No enclosing function defined. 61 | #[inline] 62 | pub fn new_enclosing_size_fn() -> Option { 63 | None 64 | } 65 | } 66 | 67 | /// Get a new instance of a MallocSizeOfOps 68 | pub fn new_malloc_size_ops() -> MallocSizeOfOps { 69 | MallocSizeOfOps::new( 70 | usable_size::malloc_usable_size, 71 | usable_size::new_enclosing_size_fn(), 72 | None, 73 | ) 74 | } 75 | 76 | /// Extension methods for `MallocSizeOf` trait, do not implement 77 | /// directly. 78 | /// It allows getting heapsize without exposing `MallocSizeOfOps` 79 | /// (a single default `MallocSizeOfOps` is used for each call). 80 | pub trait MallocSizeOfExt: MallocSizeOf { 81 | /// Method to launch a heapsize measurement with a 82 | /// fresh state. 83 | fn malloc_size_of(&self) -> usize { 84 | let mut ops = new_malloc_size_ops(); 85 | ::size_of(self, &mut ops) 86 | } 87 | } 88 | 89 | impl MallocSizeOfExt for T {} 90 | 91 | #[cfg(feature = "std")] 92 | impl MallocSizeOf for std::sync::Arc { 93 | fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { 94 | self.unconditional_size_of(ops) 95 | } 96 | } 97 | -------------------------------------------------------------------------------- /crates/mysten-util-mem/src/external_impls.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2022, Mysten Labs, Inc. 2 | // SPDX-License-Identifier: Apache-2.0 3 | 4 | use crate::{MallocShallowSizeOf, MallocSizeOf}; 5 | 6 | // ed25519_consensus 7 | malloc_size_of_is_0!(ed25519_consensus::Signature); 8 | 9 | // fastcrypto 10 | malloc_size_of_is_0!(fastcrypto::bls12381::min_sig::BLS12381PublicKey); 11 | malloc_size_of_is_0!(fastcrypto::bls12381::min_sig::BLS12381Signature); 12 | malloc_size_of_is_0!(fastcrypto::bls12381::min_sig::BLS12381AggregateSignature); 13 | malloc_size_of_is_0!(fastcrypto::bls12381::min_pk::BLS12381PublicKey); 14 | malloc_size_of_is_0!(fastcrypto::bls12381::min_pk::BLS12381Signature); 15 | malloc_size_of_is_0!(fastcrypto::bls12381::min_pk::BLS12381AggregateSignature); 16 | malloc_size_of_is_0!(fastcrypto::ed25519::Ed25519PublicKey); 17 | malloc_size_of_is_0!(fastcrypto::ed25519::Ed25519Signature); 18 | impl MallocSizeOf for fastcrypto::ed25519::Ed25519AggregateSignature { 19 | fn size_of(&self, ops: &mut crate::MallocSizeOfOps) -> usize { 20 | self.sigs.size_of(ops) 21 | } 22 | } 23 | 24 | // hash_map 25 | malloc_size_of_is_0!(std::collections::hash_map::RandomState); 26 | 27 | // indexmap 28 | impl MallocShallowSizeOf for indexmap::IndexMap { 29 | fn shallow_size_of(&self, _ops: &mut crate::MallocSizeOfOps) -> usize { 30 | self.capacity() 31 | * (std::mem::size_of::() 32 | + std::mem::size_of::() 33 | + (2 * std::mem::size_of::())) 34 | } 35 | } 36 | impl MallocSizeOf for indexmap::IndexMap { 37 | // This only produces a rough estimate of IndexMap size, because we cannot access private 38 | // fields to measure them precisely. 39 | fn size_of(&self, ops: &mut crate::MallocSizeOfOps) -> usize { 40 | let mut n = self.shallow_size_of(ops); 41 | if let (Some(k), Some(v)) = (K::constant_size(), V::constant_size()) { 42 | n += self.len() * (k + v) 43 | } else { 44 | n += self 45 | .iter() 46 | .fold(n, |acc, (k, v)| acc + k.size_of(ops) + v.size_of(ops)) 47 | } 48 | n 49 | } 50 | } 51 | 52 | // roaring 53 | impl MallocSizeOf for roaring::RoaringBitmap { 54 | // This only produces a rough estimate of RoaringBitmap size, because we cannot access private 55 | // fields to measure them precisely. 56 | fn size_of(&self, _ops: &mut crate::MallocSizeOfOps) -> usize { 57 | self.serialized_size() 58 | } 59 | } 60 | -------------------------------------------------------------------------------- /crates/mysten-util-mem/src/lib.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2022, Mysten Labs, Inc. 2 | // SPDX-License-Identifier: Apache-2.0 3 | 4 | // Copyright 2020 Parity Technologies 5 | // 6 | // Licensed under the Apache License, Version 2.0 or the MIT license 8 | // , at your 9 | // option. This file may not be copied, modified, or distributed 10 | // except according to those terms. 11 | 12 | //! Crate for parity memory management related utilities. 13 | //! It includes global allocator choice, heap measurement and 14 | //! memory erasure. 15 | 16 | #![allow(clippy::all)] 17 | #![cfg_attr(not(feature = "std"), no_std)] 18 | 19 | #[cfg(not(feature = "std"))] 20 | extern crate alloc; 21 | 22 | // default allocator used 23 | mod memory_stats_noop; 24 | use memory_stats_noop as memory_stats; 25 | 26 | pub mod allocators; 27 | 28 | #[cfg(any( 29 | any(target_os = "macos", target_os = "ios"), 30 | feature = "estimate-heapsize" 31 | ))] 32 | pub mod sizeof; 33 | 34 | /// This is a copy of patched crate `malloc_size_of` as a module. 35 | /// We need to have it as an inner module to be able to define our own traits implementation, 36 | /// if at some point the trait become standard enough we could use the right way of doing it 37 | /// by implementing it in our type traits crates. At this time moving this trait to the primitive 38 | /// types level would impact too much of the dependencies to be easily manageable. 39 | #[macro_use] 40 | mod malloc_size; 41 | 42 | pub mod external_impls; 43 | 44 | pub use allocators::MallocSizeOfExt; 45 | pub use malloc_size::{MallocShallowSizeOf, MallocSizeOf, MallocSizeOfOps}; 46 | 47 | pub use mysten_util_mem_derive::*; 48 | 49 | /// Heap size of structure. 50 | /// 51 | /// Structure can be anything that implements MallocSizeOf. 52 | pub fn malloc_size(t: &T) -> usize { 53 | MallocSizeOf::size_of(t, &mut allocators::new_malloc_size_ops()) 54 | } 55 | 56 | /// An error related to the memory stats gathering. 57 | #[derive(Clone, Debug)] 58 | pub struct MemoryStatsError(memory_stats::Error); 59 | 60 | #[cfg(feature = "std")] 61 | impl std::fmt::Display for MemoryStatsError { 62 | fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result { 63 | self.0.fmt(fmt) 64 | } 65 | } 66 | 67 | #[cfg(feature = "std")] 68 | impl std::error::Error for MemoryStatsError {} 69 | 70 | /// Snapshot of collected memory metrics. 71 | #[non_exhaustive] 72 | #[derive(Debug, Clone)] 73 | pub struct MemoryAllocationSnapshot { 74 | /// Total resident memory, in bytes. 75 | pub resident: u64, 76 | /// Total allocated memory, in bytes. 77 | pub allocated: u64, 78 | } 79 | 80 | /// Accessor to the allocator internals. 81 | #[derive(Clone)] 82 | pub struct MemoryAllocationTracker(self::memory_stats::MemoryAllocationTracker); 83 | 84 | impl MemoryAllocationTracker { 85 | /// Create an instance of an allocation tracker. 86 | pub fn new() -> Result { 87 | self::memory_stats::MemoryAllocationTracker::new() 88 | .map(MemoryAllocationTracker) 89 | .map_err(MemoryStatsError) 90 | } 91 | 92 | /// Create an allocation snapshot. 93 | pub fn snapshot(&self) -> Result { 94 | self.0.snapshot().map_err(MemoryStatsError) 95 | } 96 | } 97 | 98 | #[cfg(feature = "std")] 99 | #[cfg(test)] 100 | mod test { 101 | use super::{malloc_size, MallocSizeOf, MallocSizeOfExt}; 102 | use std::sync::Arc; 103 | 104 | #[test] 105 | fn test_arc() { 106 | let val = Arc::new("test".to_string()); 107 | let s = val.malloc_size_of(); 108 | assert!(s > 0); 109 | } 110 | 111 | #[test] 112 | fn test_dyn() { 113 | trait Augmented: MallocSizeOf {} 114 | impl Augmented for Vec {} 115 | let val: Arc = Arc::new(vec![0u8; 1024]); 116 | assert!(malloc_size(&*val) > 1000); 117 | } 118 | } 119 | -------------------------------------------------------------------------------- /crates/mysten-util-mem/src/memory_stats_noop.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2022, Mysten Labs, Inc. 2 | // SPDX-License-Identifier: Apache-2.0 3 | 4 | // Copyright 2021 Parity Technologies 5 | // 6 | // Licensed under the Apache License, Version 2.0 or the MIT license 8 | // , at your 9 | // option. This file may not be copied, modified, or distributed 10 | // except according to those terms. 11 | 12 | #[derive(Clone, Debug)] 13 | pub struct Unimplemented; 14 | pub use Unimplemented as Error; 15 | 16 | #[cfg(feature = "std")] 17 | impl std::fmt::Display for Unimplemented { 18 | fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result { 19 | fmt.write_str("unimplemented") 20 | } 21 | } 22 | 23 | #[derive(Clone)] 24 | pub struct MemoryAllocationTracker {} 25 | 26 | impl MemoryAllocationTracker { 27 | pub fn new() -> Result { 28 | Err(Error) 29 | } 30 | 31 | pub fn snapshot(&self) -> Result { 32 | unimplemented!(); 33 | } 34 | } 35 | -------------------------------------------------------------------------------- /crates/mysten-util-mem/src/sizeof.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2022, Mysten Labs, Inc. 2 | // SPDX-License-Identifier: Apache-2.0 3 | 4 | // Copyright 2020 Parity Technologies 5 | // 6 | // Licensed under the Apache License, Version 2.0 or the MIT license 8 | // , at your 9 | // option. This file may not be copied, modified, or distributed 10 | // except according to those terms. 11 | 12 | //! Estimation for heapsize calculation. Usable to replace call to allocator method (for some 13 | //! allocators or simply because we just need a deterministic cunsumption measurement). 14 | 15 | use crate::malloc_size::{ 16 | MallocShallowSizeOf, MallocSizeOf, MallocSizeOfOps, MallocUnconditionalShallowSizeOf, 17 | }; 18 | #[cfg(not(feature = "std"))] 19 | use alloc::boxed::Box; 20 | #[cfg(not(feature = "std"))] 21 | use alloc::string::String; 22 | #[cfg(not(feature = "std"))] 23 | use alloc::sync::Arc; 24 | #[cfg(not(feature = "std"))] 25 | use alloc::vec::Vec; 26 | #[cfg(not(feature = "std"))] 27 | use core::mem::{size_of, size_of_val}; 28 | 29 | #[cfg(feature = "std")] 30 | use std::mem::{size_of, size_of_val}; 31 | #[cfg(feature = "std")] 32 | use std::sync::Arc; 33 | 34 | impl MallocShallowSizeOf for Box { 35 | fn shallow_size_of(&self, _ops: &mut MallocSizeOfOps) -> usize { 36 | size_of_val(&**self) 37 | } 38 | } 39 | 40 | impl MallocSizeOf for String { 41 | fn size_of(&self, _ops: &mut MallocSizeOfOps) -> usize { 42 | self.capacity() * size_of::() 43 | } 44 | } 45 | 46 | impl MallocShallowSizeOf for Vec { 47 | fn shallow_size_of(&self, _ops: &mut MallocSizeOfOps) -> usize { 48 | self.capacity() * size_of::() 49 | } 50 | } 51 | 52 | impl MallocUnconditionalShallowSizeOf for Arc { 53 | fn unconditional_shallow_size_of(&self, _ops: &mut MallocSizeOfOps) -> usize { 54 | size_of::() 55 | } 56 | } 57 | -------------------------------------------------------------------------------- /crates/mysten-util-mem/tests/derive.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2022, Mysten Labs, Inc. 2 | // SPDX-License-Identifier: Apache-2.0 3 | 4 | // Copyright 2020 Parity Technologies 5 | // 6 | // Licensed under the Apache License, Version 2.0 or the MIT license 8 | // , at your 9 | // option. This file may not be copied, modified, or distributed 10 | // except according to those terms. 11 | 12 | #![cfg(feature = "std")] 13 | 14 | use mysten_util_mem::{MallocSizeOf, MallocSizeOfExt}; 15 | 16 | #[test] 17 | fn derive_vec() { 18 | #[derive(MallocSizeOf)] 19 | struct Trivia { 20 | v: Vec, 21 | } 22 | 23 | let t = Trivia { v: vec![0u8; 1024] }; 24 | 25 | assert!(t.malloc_size_of() > 1000); 26 | } 27 | 28 | #[test] 29 | fn derive_hashmap() { 30 | #[derive(MallocSizeOf, Default)] 31 | struct Trivia { 32 | hm: std::collections::HashMap>, 33 | } 34 | 35 | let mut t = Trivia::default(); 36 | 37 | t.hm.insert(1, vec![0u8; 2048]); 38 | 39 | assert!(t.malloc_size_of() > 2000); 40 | } 41 | 42 | #[test] 43 | fn derive_ignore() { 44 | #[derive(MallocSizeOf, Default)] 45 | struct Trivia { 46 | hm: std::collections::HashMap>, 47 | #[ignore_malloc_size_of = "I don't like vectors"] 48 | v: Vec, 49 | } 50 | 51 | let mut t = Trivia::default(); 52 | 53 | t.hm.insert(1, vec![0u8; 2048]); 54 | t.v = vec![0u8; 1024]; 55 | assert!(t.malloc_size_of() < 3000); 56 | } 57 | 58 | #[test] 59 | #[cfg(all(feature = "lru", feature = "hashbrown"))] 60 | fn derive_morecomplex() { 61 | #[derive(MallocSizeOf)] 62 | struct Trivia { 63 | hm: hashbrown::HashMap>, 64 | cache: lru::LruCache>, 65 | } 66 | 67 | let mut t = Trivia { 68 | hm: hashbrown::HashMap::new(), 69 | cache: lru::LruCache::unbounded(), 70 | }; 71 | 72 | t.hm.insert(1, vec![0u8; 2048]); 73 | t.cache.put(1, vec![0u8; 2048]); 74 | t.cache.put(2, vec![0u8; 4096]); 75 | 76 | assert!(t.malloc_size_of() > 8000); 77 | } 78 | 79 | #[test] 80 | fn derive_tuple() { 81 | #[derive(MallocSizeOf)] 82 | struct Trivia { 83 | tp1: (), 84 | tp2: (Vec, Vec), 85 | } 86 | 87 | let t = Trivia { 88 | tp1: (), 89 | tp2: (vec![7u8; 1024], vec![9u8; 1024]), 90 | }; 91 | 92 | assert!(t.malloc_size_of() > 2000); 93 | assert!(t.malloc_size_of() < 3000); 94 | } 95 | -------------------------------------------------------------------------------- /crates/name-variant/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "name-variant" 3 | version = "0.1.0" 4 | authors = ["Francois Garillot "] 5 | edition = "2021" 6 | description = "Generates a method to name enum variants." 7 | license = "Apache-2.0" 8 | repository = "https://github.com/mystenlabs/mysten-infra" 9 | publish = ["crates-io"] 10 | 11 | [lib] 12 | proc-macro = true 13 | 14 | [dependencies] 15 | proc-macro2 = "1.0.47" 16 | quote = "1.0.20" 17 | syn = "1.0.102" 18 | -------------------------------------------------------------------------------- /crates/name-variant/README.md: -------------------------------------------------------------------------------- 1 | # name_variant 2 | 3 | Generates methods to print an enum variant's name as a string. 4 | 5 | # Example 6 | 7 | ```rust 8 | use name_variant::NamedVariant; 9 | 10 | #[derive(NamedVariant)] 11 | enum TestEnum { 12 | A, 13 | B(), 14 | C(i32, i32), 15 | D { _name: String, _age: i32 }, 16 | VariantTest, 17 | } 18 | 19 | fn main() { 20 | let x = TestEnum::C(1, 2); 21 | assert_eq!(x.variant_name(), "C"); 22 | 23 | let x = TestEnum::A; 24 | assert_eq!(x.variant_name(), "A"); 25 | 26 | let x = TestEnum::B(); 27 | assert_eq!(x.variant_name(), "B"); 28 | 29 | let x = TestEnum::D {_name: "Jane Doe".into(), _age: 30 }; 30 | assert_eq!(x.variant_name(), "D"); 31 | 32 | let x = TestEnum::VariantTest; 33 | assert_eq!(x.variant_name(), "VariantTest"); 34 | } 35 | ``` 36 | -------------------------------------------------------------------------------- /crates/name-variant/src/lib.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2022, Mysten Labs, Inc. 2 | // SPDX-License-Identifier: Apache-2.0 3 | //! Generates methods to print the name of the enum variant. 4 | //! 5 | //! # Example 6 | //! 7 | //! ```rust 8 | //! use name_variant::NamedVariant; 9 | //! 10 | //! # macro_rules! dont_test { () => { 11 | //! #[derive(NamedVariant)] 12 | //! enum TestEnum { 13 | //! A, 14 | //! B(), 15 | //! C(i32, i32), 16 | //! D { _name: String, _age: i32 }, 17 | //! VariantTest, 18 | //! } 19 | //! 20 | //! let x = TestEnum::C(1, 2); 21 | //! assert_eq!(x.variant_name(), "C"); 22 | //! 23 | //! let x = TestEnum::A; 24 | //! assert_eq!(x.variant_name(), "A"); 25 | //! 26 | //! let x = TestEnum::B(); 27 | //! assert_eq!(x.variant_name(), "B"); 28 | //! 29 | //! let x = TestEnum::D {_name: "Jane Doe".into(), _age: 30 }; 30 | //! assert_eq!(x.variant_name(), "D"); 31 | //! 32 | //! let x = TestEnum::VariantTest; 33 | //! assert_eq!(x.variant_name(), "VariantTest"); 34 | //! 35 | //! # }}; 36 | //! ``` 37 | 38 | extern crate proc_macro; 39 | 40 | use proc_macro::TokenStream; 41 | use proc_macro2::{Ident, Span, TokenStream as TokenStream2}; 42 | 43 | use quote::{quote, quote_spanned}; 44 | use syn::spanned::Spanned; 45 | use syn::{parse_macro_input, Data, DataEnum, DeriveInput, Error, Fields}; 46 | 47 | macro_rules! derive_error { 48 | ($string: tt) => { 49 | Error::new(Span::call_site(), $string) 50 | .to_compile_error() 51 | .into() 52 | }; 53 | } 54 | 55 | fn match_enum_to_string(name: &Ident, variants: &DataEnum) -> proc_macro2::TokenStream { 56 | // the variant dispatch proper 57 | let mut match_arms = quote! {}; 58 | for variant in variants.variants.iter() { 59 | let variant_ident = &variant.ident; 60 | let fields_in_variant = match &variant.fields { 61 | Fields::Unnamed(_) => quote_spanned! {variant.span() => (..) }, 62 | Fields::Unit => quote_spanned! { variant.span() => }, 63 | Fields::Named(_) => quote_spanned! {variant.span() => {..} }, 64 | }; 65 | let variant_string = variant_ident.to_string(); 66 | 67 | match_arms.extend(quote! { 68 | #name::#variant_ident #fields_in_variant => #variant_string, 69 | }); 70 | } 71 | match_arms 72 | } 73 | 74 | #[proc_macro_derive(NamedVariant)] 75 | pub fn derive_named_variant(input: TokenStream) -> TokenStream { 76 | let input = parse_macro_input!(input as DeriveInput); 77 | 78 | let name = &input.ident; 79 | let data = &input.data; 80 | 81 | let mut variant_checker_functions; 82 | 83 | match data { 84 | Data::Enum(data_enum) => { 85 | variant_checker_functions = TokenStream2::new(); 86 | 87 | let variant_arms = match_enum_to_string(name, data_enum); 88 | 89 | variant_checker_functions.extend(quote_spanned! { name.span() => 90 | const fn variant_name(&self) -> &'static str { 91 | match self { 92 | #variant_arms 93 | } 94 | } 95 | }); 96 | } 97 | _ => return derive_error!("NamedVariant is only implemented for enums"), 98 | }; 99 | 100 | let (impl_generics, ty_generics, where_clause) = input.generics.split_for_impl(); 101 | 102 | let expanded = quote! { 103 | impl #impl_generics #name #ty_generics #where_clause { 104 | #variant_checker_functions 105 | } 106 | }; 107 | 108 | TokenStream::from(expanded) 109 | } 110 | -------------------------------------------------------------------------------- /crates/name-variant/tests/test.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2022, Mysten Labs, Inc. 2 | // SPDX-License-Identifier: Apache-2.0 3 | use name_variant::NamedVariant; 4 | 5 | #[derive(NamedVariant)] 6 | enum TestEnum { 7 | A, 8 | B(), 9 | C(i32, i32), 10 | D { _name: String, _age: i32 }, 11 | VariantTest, 12 | } 13 | 14 | #[test] 15 | fn smoke_test() { 16 | let x = TestEnum::C(1, 2); 17 | assert_eq!(x.variant_name(), "C"); 18 | 19 | let x = TestEnum::A; 20 | assert_eq!(x.variant_name(), "A"); 21 | 22 | let x = TestEnum::B(); 23 | assert_eq!(x.variant_name(), "B"); 24 | 25 | let x = TestEnum::D { 26 | _name: "Jane Doe".into(), 27 | _age: 30, 28 | }; 29 | assert_eq!(x.variant_name(), "D"); 30 | 31 | let x = TestEnum::VariantTest; 32 | assert_eq!(x.variant_name(), "VariantTest"); 33 | } 34 | -------------------------------------------------------------------------------- /crates/prometheus-closure-metric/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "prometheus-closure-metric" 3 | version = "0.1.0" 4 | authors = ["Andrew Schran "] 5 | repository = "https://github.com/mystenlabs/mysten-infra" 6 | description = "Library for a Prometheus metric that computes its value by given closure at collection time" 7 | license = "Apache-2.0" 8 | edition = "2021" 9 | publish = false 10 | 11 | [dependencies] 12 | anyhow = "1.0.58" 13 | prometheus = "0.13.1" 14 | protobuf = "^2.0" 15 | -------------------------------------------------------------------------------- /crates/prometheus-closure-metric/src/lib.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2022, Mysten Labs, Inc. 2 | // SPDX-License-Identifier: Apache-2.0 3 | 4 | // Copyright 2014 The Prometheus Authors 5 | // Copyright 2019 TiKV Project Authors. Licensed under Apache-2.0. 6 | 7 | //! This library implements a `ClosureMetric` for crate `prometheus` whose value is computed at 8 | //! the time of collection by a provided closure. 9 | 10 | // TODO: add example usage once constructor macros are implemented. 11 | // (For now, look at tests for an example.) 12 | 13 | use anyhow::anyhow; 14 | use anyhow::Result; 15 | use prometheus::core; 16 | use prometheus::proto; 17 | 18 | /// A Prometheus metric whose value is computed at collection time by the provided closure. 19 | /// 20 | /// WARNING: The provided closure must be fast (~milliseconds or faster), since it blocks 21 | /// metric collection. 22 | #[derive(Debug)] 23 | pub struct ClosureMetric { 24 | desc: core::Desc, 25 | f: F, 26 | value_type: ValueType, 27 | label_pairs: Vec, 28 | } 29 | 30 | impl ClosureMetric 31 | where 32 | F: Fn() -> T + Sync + Send, 33 | T: core::Number, 34 | { 35 | pub fn new( 36 | describer: D, 37 | value_type: ValueType, 38 | f: F, 39 | label_values: &[&str], 40 | ) -> Result { 41 | let desc = describer.describe()?; 42 | let label_pairs = make_label_pairs(&desc, label_values)?; 43 | 44 | Ok(Self { 45 | desc, 46 | f, 47 | value_type, 48 | label_pairs, 49 | }) 50 | } 51 | 52 | pub fn metric(&self) -> proto::Metric { 53 | let mut m = proto::Metric::default(); 54 | m.set_label(protobuf::RepeatedField::from_vec(self.label_pairs.clone())); 55 | 56 | let val = (self.f)().into_f64(); 57 | match self.value_type { 58 | ValueType::Counter => { 59 | let mut counter = proto::Counter::default(); 60 | counter.set_value(val); 61 | m.set_counter(counter); 62 | } 63 | ValueType::Gauge => { 64 | let mut gauge = proto::Gauge::default(); 65 | gauge.set_value(val); 66 | m.set_gauge(gauge); 67 | } 68 | } 69 | 70 | m 71 | } 72 | } 73 | 74 | impl prometheus::core::Collector for ClosureMetric 75 | where 76 | F: Fn() -> T + Sync + Send, 77 | T: core::Number, 78 | { 79 | fn desc(&self) -> Vec<&prometheus::core::Desc> { 80 | vec![&self.desc] 81 | } 82 | 83 | fn collect(&self) -> Vec { 84 | let mut m = proto::MetricFamily::default(); 85 | m.set_name(self.desc.fq_name.clone()); 86 | m.set_help(self.desc.help.clone()); 87 | m.set_field_type(self.value_type.metric_type()); 88 | m.set_metric(protobuf::RepeatedField::from_vec(vec![self.metric()])); 89 | vec![m] 90 | } 91 | } 92 | 93 | #[derive(Debug, Clone, Copy)] 94 | pub enum ValueType { 95 | Counter, 96 | Gauge, 97 | } 98 | 99 | impl ValueType { 100 | /// `metric_type` returns the corresponding proto metric type. 101 | pub fn metric_type(self) -> proto::MetricType { 102 | match self { 103 | ValueType::Counter => proto::MetricType::COUNTER, 104 | ValueType::Gauge => proto::MetricType::GAUGE, 105 | } 106 | } 107 | } 108 | 109 | pub fn make_label_pairs(desc: &core::Desc, label_values: &[&str]) -> Result> { 110 | if desc.variable_labels.len() != label_values.len() { 111 | return Err(anyhow!("inconsistent cardinality")); 112 | } 113 | 114 | let total_len = desc.variable_labels.len() + desc.const_label_pairs.len(); 115 | if total_len == 0 { 116 | return Ok(vec![]); 117 | } 118 | 119 | if desc.variable_labels.is_empty() { 120 | return Ok(desc.const_label_pairs.clone()); 121 | } 122 | 123 | let mut label_pairs = Vec::with_capacity(total_len); 124 | for (i, n) in desc.variable_labels.iter().enumerate() { 125 | let mut label_pair = proto::LabelPair::default(); 126 | label_pair.set_name(n.clone()); 127 | label_pair.set_value(label_values[i].to_owned()); 128 | label_pairs.push(label_pair); 129 | } 130 | 131 | for label_pair in &desc.const_label_pairs { 132 | label_pairs.push(label_pair.clone()); 133 | } 134 | label_pairs.sort(); 135 | Ok(label_pairs) 136 | } 137 | 138 | // TODO: add and test macros for easier ClosureMetric construction. 139 | -------------------------------------------------------------------------------- /crates/prometheus-closure-metric/tests/closure_metric.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2022, Mysten Labs, Inc. 2 | // SPDX-License-Identifier: Apache-2.0 3 | 4 | use prometheus_closure_metric::ClosureMetric; 5 | 6 | #[test] 7 | fn closure_metric_basic() { 8 | let opts = 9 | prometheus::opts!("my_closure_metric", "A test closure metric",).variable_label("my_label"); 10 | 11 | let fn_42 = || 42_u64; 12 | let metric0 = ClosureMetric::new( 13 | opts, 14 | prometheus_closure_metric::ValueType::Gauge, 15 | fn_42, 16 | &["forty_two"], 17 | ) 18 | .unwrap(); 19 | 20 | assert!(prometheus::default_registry() 21 | .register(Box::new(metric0)) 22 | .is_ok()); 23 | 24 | // Gather the metrics. 25 | let metric_families = prometheus::default_registry().gather(); 26 | assert_eq!(1, metric_families.len()); 27 | let metric_family = &metric_families[0]; 28 | assert_eq!("my_closure_metric", metric_family.get_name()); 29 | let metric = metric_family.get_metric(); 30 | assert_eq!(1, metric.len()); 31 | assert_eq!(42.0, metric[0].get_gauge().get_value()); 32 | let labels = metric[0].get_label(); 33 | assert_eq!(1, labels.len()); 34 | assert_eq!("my_label", labels[0].get_name()); 35 | assert_eq!("forty_two", labels[0].get_value()); 36 | } 37 | -------------------------------------------------------------------------------- /crates/rccheck/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "rccheck" 3 | version = "0.1.0" 4 | license = "Apache-2.0" 5 | authors = ["Francois Garillot "] 6 | description = "tools for rustls-based certificate verification using pre-shared public keys" 7 | repository = "https://github.com/mystenlabs/mysten-infra" 8 | edition = "2021" 9 | publish = false 10 | 11 | [dependencies] 12 | ed25519 = { version = "1.5.2", features = ["pkcs8", "alloc", "zeroize"] } 13 | ed25519-dalek = "1.0.1" 14 | eyre = "0.6.8" 15 | ouroboros = "0.15.1" 16 | pkcs8 = { version = "0.9.0", features = ["std"] } 17 | rcgen = { version = "0.10.0", features = ["x509-parser"] } 18 | rustls = { version = "0.20.6", default-features = false, features = ["logging", "dangerous_configuration"] } 19 | serde = { version = "1.0.140", features = ["derive"] } 20 | tracing = "0.1.37" 21 | webpki = { version = "0.22.0", features = ["alloc", "std"] } 22 | x509-parser = "0.14.0" 23 | 24 | [dev-dependencies] 25 | bincode = "1.3.3" 26 | proptest = "1.0.0" 27 | rand = "0.7.3" 28 | -------------------------------------------------------------------------------- /crates/rccheck/proptest-regressions/tests/ed25519_certgen_tests.txt: -------------------------------------------------------------------------------- 1 | # Seeds for failure cases proptest has generated in the past. It is 2 | # automatically read and these particular cases re-run before any 3 | # novel cases are generated. 4 | # 5 | # It is recommended to check this file in to source control so that 6 | # everyone who runs the test benefits from these saved cases. 7 | cc 2314fa35d5986111fdc8a2e76ea8c71740afee8009f5a5562334ec93f9b9f3c3 # shrinks to kp = Keypair { secret: SecretKey: [242, 33, 74, 135, 132, 113, 26, 59, 115, 111, 186, 109, 99, 50, 165, 102, 95, 247, 82, 254, 166, 81, 194, 89, 227, 90, 236, 179, 199, 229, 9, 176], public: PublicKey(CompressedEdwardsY: [36, 252, 248, 160, 223, 187, 68, 119, 111, 224, 216, 76, 235, 141, 167, 246, 199, 188, 186, 164, 160, 100, 176, 30, 145, 135, 53, 99, 212, 141, 235, 105]), EdwardsPoint{ X: FieldElement51([636188592870209, 2122666908395076, 1759756592963115, 407366879547462, 1514920451640683]), Y: FieldElement51([1934028786357739, 917481144122446, 1075599933454187, 431199849450843, 1369755919471043]), Z: FieldElement51([506105058121111, 2016703970022612, 206462402701108, 1275383251367447, 1679329397810154]), T: FieldElement51([380064152557241, 1407294455312354, 1352227124520218, 1140276849245621, 1008811480537287]) }) } 8 | -------------------------------------------------------------------------------- /crates/rccheck/src/ed25519_certgen.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2022, Mysten Labs, Inc. 2 | // SPDX-License-Identifier: Apache-2.0 3 | 4 | use ed25519::pkcs8::EncodePrivateKey; 5 | 6 | use pkcs8::der::Encode; 7 | use rcgen::{CertificateParams, KeyPair, SignatureAlgorithm}; 8 | 9 | use crate::Certifiable; 10 | 11 | #[cfg(test)] 12 | #[path = "tests/ed25519_certgen_tests.rs"] 13 | mod ed25519_certgen_tests; 14 | 15 | #[cfg(test)] 16 | #[path = "tests/ed25519_external_trust_anchor.rs"] 17 | mod ed25519_external_trust_anchor; 18 | 19 | fn dalek_to_keypair_bytes(dalek_kp: ed25519_dalek::Keypair) -> ed25519::KeypairBytes { 20 | let private = dalek_kp.secret; 21 | let _public = dalek_kp.public; 22 | 23 | ed25519::KeypairBytes { 24 | secret_key: private.to_bytes(), 25 | // ring cannot handle the optional public key that would be legal der here 26 | // that is, ring expects PKCS#8 v.1 27 | public_key: None, // Some(_public.to_bytes()), 28 | } 29 | } 30 | 31 | fn keypair_bytes_to_pkcs8_n_algo( 32 | kpb: ed25519::KeypairBytes, 33 | ) -> Result<(pkcs8::der::SecretDocument, &'static SignatureAlgorithm), pkcs8::Error> { 34 | // PKCS#8 v2 as described in [RFC 5958]. 35 | // PKCS#8 v2 keys include an additional public key field. 36 | let pkcs8 = kpb.to_pkcs8_der()?; 37 | 38 | Ok((pkcs8, &rcgen::PKCS_ED25519)) 39 | } 40 | 41 | fn gen_certificate( 42 | subject_names: impl Into>, 43 | key_pair: (&[u8], &'static SignatureAlgorithm), 44 | ) -> Result { 45 | let kp = KeyPair::from_der_and_sign_algo(key_pair.0, key_pair.1)?; 46 | 47 | let mut cert_params = CertificateParams::new(subject_names); 48 | cert_params.key_pair = Some(kp); 49 | cert_params.distinguished_name = rcgen::DistinguishedName::new(); 50 | cert_params.alg = key_pair.1; 51 | 52 | let cert = rcgen::Certificate::from_params(cert_params).expect( 53 | "unreachable! from_params should only fail if the key is incompatible with params.algo", 54 | ); 55 | let cert_bytes = cert.serialize_der()?; 56 | Ok(rustls::Certificate(cert_bytes)) 57 | } 58 | 59 | /// Signs a CertificateSigningRequest (CSR) and produces a X.509 Certificate. 60 | fn sign_certificate_signing_request( 61 | csr_der: &[u8], // DER-serialized CSR 62 | key_pair: (&[u8], &'static SignatureAlgorithm), 63 | ) -> Result { 64 | let kp = KeyPair::from_der_and_sign_algo(key_pair.0, key_pair.1)?; 65 | 66 | let csr = rcgen::CertificateSigningRequest::from_der(csr_der).map_err(eyre::Report::new)?; 67 | 68 | let mut cert_params = CertificateParams::new(vec!["localhost".to_string()]); 69 | cert_params.key_pair = Some(kp); 70 | cert_params.distinguished_name = rcgen::DistinguishedName::new(); 71 | cert_params.alg = key_pair.1; 72 | 73 | let cert = rcgen::Certificate::from_params(cert_params).expect( 74 | "unreachable! from_params should only fail if the key is incompatible with params.algo", 75 | ); 76 | 77 | let signed_cert = csr 78 | .serialize_der_with_signer(&cert) 79 | .map_err(eyre::Report::new)?; 80 | Ok(rustls::Certificate(signed_cert)) 81 | } 82 | 83 | // Token struct to peg this purely functional impl on 84 | pub struct Ed25519 {} 85 | impl Certifiable for Ed25519 { 86 | type PublicKey = ed25519_dalek::PublicKey; 87 | 88 | type KeyPair = ed25519_dalek::Keypair; 89 | 90 | /// KISS function to generate a self signed certificate from a dalek keypair 91 | /// Given a set of domain names you want your certificate to be valid for, this function fills in the other generation parameters with 92 | /// reasonable defaults and generates a self signed certificate using the keypair passed as argument as output. 93 | /// 94 | /// ## Example 95 | /// ``` 96 | /// extern crate rccheck; 97 | /// use rccheck::ed25519_certgen::Ed25519; 98 | /// use rccheck::Certifiable; 99 | /// # let mut rng = rand::thread_rng(); 100 | /// let subject_alt_names = vec!["localhost".to_string()]; 101 | /// let kp = ed25519_dalek::Keypair::generate(&mut rng); 102 | /// 103 | /// let cert = Ed25519::keypair_to_certificate(subject_alt_names, kp).unwrap(); 104 | /// // The certificate is now valid for localhost 105 | /// ``` 106 | /// 107 | fn keypair_to_certificate( 108 | subject_names: impl Into>, 109 | kp: Self::KeyPair, 110 | ) -> Result { 111 | let keypair_bytes = dalek_to_keypair_bytes(kp); 112 | let (pkcs_bytes, alg) = 113 | keypair_bytes_to_pkcs8_n_algo(keypair_bytes).map_err(eyre::Report::new)?; 114 | 115 | let certificate = gen_certificate(subject_names, (pkcs_bytes.as_bytes(), alg))?; 116 | Ok(certificate) 117 | } 118 | 119 | /// This produces X.509 `SubjectPublicKeyInfo` (SPKI) as defined in [RFC 5280 Section 4.1.2.7](https://datatracker.ietf.org/doc/html/rfc5280). 120 | /// in DER-encoded format, serialized to a byte string. 121 | /// Example 122 | /// ``` 123 | /// use rccheck::*; 124 | /// let mut rng = rand::thread_rng(); 125 | /// let keypair = ed25519_dalek::Keypair::generate(&mut rng); 126 | /// let spki = ed25519_certgen::Ed25519::public_key_to_spki(&keypair.public); // readable by Psk::from_der 127 | /// ``` 128 | fn public_key_to_spki(public_key: &Self::PublicKey) -> Vec { 129 | let subject_public_key = public_key.as_bytes(); 130 | 131 | let key_info = pkcs8::spki::SubjectPublicKeyInfo { 132 | algorithm: pkcs8::spki::AlgorithmIdentifier { 133 | // ed25519 OID 134 | oid: ed25519::pkcs8::ALGORITHM_OID, 135 | // some environments require a type ASN.1 NULL, use the commented alternative if so 136 | // this instead matches our rcgen-produced certificates for compatibiltiy 137 | // use pkcs8::spki::der::asn1; 138 | parameters: None, // Some(asn1::Any::from(asn1::Null)), 139 | }, 140 | subject_public_key, 141 | }; 142 | 143 | // Infallible because we know the public key is valid. 144 | key_info 145 | .to_vec() 146 | .expect("Dalek public key should be valid!") 147 | } 148 | 149 | fn sign_certificate_request( 150 | certificate_request_der: &[u8], 151 | kp: Self::KeyPair, 152 | ) -> Result { 153 | let keypair_bytes = dalek_to_keypair_bytes(kp); 154 | let (pkcs_bytes, alg) = 155 | keypair_bytes_to_pkcs8_n_algo(keypair_bytes).map_err(eyre::Report::new)?; 156 | 157 | sign_certificate_signing_request(certificate_request_der, (pkcs_bytes.as_bytes(), alg)) 158 | } 159 | 160 | fn keypair_to_der_certificate_request( 161 | subject_names: impl Into>, 162 | kp: Self::KeyPair, 163 | ) -> Result, eyre::Report> { 164 | let keypair_bytes = dalek_to_keypair_bytes(kp); 165 | let (pkcs_bytes, alg) = 166 | keypair_bytes_to_pkcs8_n_algo(keypair_bytes).map_err(eyre::Report::new)?; 167 | let kp = KeyPair::from_der_and_sign_algo(pkcs_bytes.as_bytes(), alg)?; 168 | 169 | let mut cert_params = CertificateParams::new(subject_names.into()); 170 | cert_params.key_pair = Some(kp); 171 | cert_params.distinguished_name = rcgen::DistinguishedName::new(); 172 | cert_params.alg = alg; 173 | 174 | let cert = rcgen::Certificate::from_params(cert_params).expect( 175 | "unreachable! from_params should only fail if the key is incompatible with params.algo", 176 | ); 177 | Ok(cert.serialize_request_der()?) 178 | } 179 | } 180 | -------------------------------------------------------------------------------- /crates/rccheck/src/tests/ed25519_certgen_tests.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2022, Mysten Labs, Inc. 2 | // SPDX-License-Identifier: Apache-2.0 3 | 4 | use std::time::SystemTime; 5 | 6 | use crate::{ 7 | test_utils::{dalek_keypair_strategy, dalek_pubkey_strategy}, 8 | Certifiable, Psk, 9 | }; 10 | 11 | use super::*; 12 | 13 | use proptest::prelude::*; 14 | use rustls::{client::ServerCertVerifier, server::ClientCertVerifier}; 15 | 16 | proptest! { 17 | #[test] 18 | fn ed25519_keys_to_spki( 19 | pub_key in dalek_pubkey_strategy(), 20 | ){ 21 | let spki = Ed25519::public_key_to_spki(&pub_key); 22 | let psk = Psk::from_der(&spki); 23 | psk.unwrap(); 24 | } 25 | 26 | #[test] 27 | fn rc_gen_self_signed_dalek( 28 | kp in dalek_keypair_strategy(), 29 | ) { 30 | let subject_alt_names = vec!["localhost".to_string()]; 31 | let public_key = kp.public; 32 | 33 | let cert = Ed25519::keypair_to_certificate(subject_alt_names, kp).unwrap(); 34 | 35 | let spki = Ed25519::public_key_to_spki(&public_key); 36 | let psk = Psk::from_der(&spki).unwrap(); 37 | let now = SystemTime::now(); 38 | 39 | // this passes client verification 40 | psk.verify_client_cert(&cert, &[], now).unwrap(); 41 | 42 | // this passes server verification 43 | let mut empty = std::iter::empty(); 44 | psk.verify_server_cert( 45 | &cert, 46 | &[], 47 | &rustls::ServerName::try_from("localhost").unwrap(), 48 | &mut empty, 49 | &[], 50 | now, 51 | ) 52 | .unwrap(); 53 | } 54 | 55 | #[test] 56 | fn rc_gen_not_self_signed_dalek( 57 | // this time the pubkey does not match the creation keypair 58 | kp in dalek_keypair_strategy(), 59 | public_key in dalek_pubkey_strategy(), 60 | ) { 61 | let subject_alt_names = vec!["localhost".to_string()]; 62 | 63 | let cert = Ed25519::keypair_to_certificate(subject_alt_names, kp).unwrap(); 64 | 65 | let spki = Ed25519::public_key_to_spki(&public_key); 66 | let psk = Psk::from_der(&spki).unwrap(); 67 | let now = SystemTime::now(); 68 | 69 | // this does not pass client verification 70 | assert!(psk.verify_client_cert(&cert, &[], now).err() 71 | .unwrap() 72 | .to_string() 73 | .contains("invalid peer certificate")); 74 | 75 | // this does not pass server verification 76 | let mut empty = std::iter::empty(); 77 | assert!(psk.verify_server_cert( 78 | &cert, 79 | &[], 80 | &rustls::ServerName::try_from("localhost").unwrap(), 81 | &mut empty, 82 | &[], 83 | now, 84 | ) 85 | .err() 86 | .unwrap() 87 | .to_string() 88 | .contains("invalid peer certificate")); 89 | } 90 | } 91 | -------------------------------------------------------------------------------- /crates/rccheck/src/tests/ed25519_external_trust_anchor.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2022, Mysten Labs, Inc. 2 | // SPDX-License-Identifier: Apache-2.0 3 | 4 | use std::time::SystemTime; 5 | 6 | use super::*; 7 | use crate::{ed25519_certgen::Ed25519, test_utils::dalek_keypair_strategy, PskSet}; 8 | 9 | use proptest::prelude::*; 10 | 11 | proptest! { 12 | #[test] 13 | fn rc_gen_ca_client_split( 14 | client_kp in dalek_keypair_strategy(), 15 | ca_kp in dalek_keypair_strategy(), 16 | ) { 17 | let subject_alt_names = vec!["localhost".to_string()]; 18 | 19 | let ca_public_key = ca_kp.public; 20 | 21 | // 1. Generate CSR on client 22 | let cert_request = 23 | Ed25519::keypair_to_der_certificate_request(subject_alt_names, client_kp).unwrap(); 24 | 25 | // (2) Client sends CSR to CA 26 | 27 | // CA signs CSR and produces Certificate 28 | let cert = Ed25519::sign_certificate_request(&cert_request[..], ca_kp).unwrap(); 29 | 30 | let ca_spki = Ed25519::public_key_to_spki(&ca_public_key); 31 | let now = SystemTime::now(); 32 | 33 | let psk_set = PskSet::from_der(&[&ca_spki[..]]).unwrap(); 34 | let mut empty = std::iter::empty(); 35 | 36 | assert!(psk_set 37 | .verify_client_cert(&ca_spki, &cert, &[], now) 38 | .is_ok()); 39 | assert!(psk_set 40 | .verify_server_cert( 41 | &ca_spki, 42 | &cert, 43 | &[], 44 | &rustls::ServerName::try_from("localhost").unwrap(), 45 | &mut empty, 46 | &[], 47 | now, 48 | ) 49 | .is_ok()); 50 | } 51 | 52 | #[test] 53 | fn rc_gen_ca_client_split_err( 54 | client_kp in dalek_keypair_strategy(), 55 | ca_kp in dalek_keypair_strategy() 56 | ) { 57 | let client_kp_copy = ed25519_dalek::Keypair::from_bytes(&client_kp.to_bytes()).unwrap(); 58 | 59 | let subject_alt_names = vec!["localhost".to_string()]; 60 | 61 | let ca_public_key = ca_kp.public; 62 | 63 | // 1. Generate CSR on client 64 | let cert_request = 65 | Ed25519::keypair_to_der_certificate_request(subject_alt_names, client_kp).unwrap(); 66 | 67 | // Client signs CSR and produces Certificate 68 | let cert = Ed25519::sign_certificate_request(&cert_request[..], client_kp_copy).unwrap(); 69 | 70 | let ca_spki = Ed25519::public_key_to_spki(&ca_public_key); 71 | let now = SystemTime::now(); 72 | 73 | let psk_set = PskSet::from_der(&[&ca_spki[..]]).unwrap(); 74 | let mut empty = std::iter::empty(); 75 | 76 | assert!(psk_set 77 | .verify_client_cert(&ca_spki, &cert, &[], now) 78 | .is_err()); 79 | assert!(psk_set 80 | .verify_server_cert( 81 | &ca_spki, 82 | &cert, 83 | &[], 84 | &rustls::ServerName::try_from("localhost").unwrap(), 85 | &mut empty, 86 | &[], 87 | now, 88 | ) 89 | .is_err()); 90 | } 91 | } 92 | -------------------------------------------------------------------------------- /crates/rccheck/src/tests/psk_set_tests.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2022, Mysten Labs, Inc. 2 | // SPDX-License-Identifier: Apache-2.0 3 | 4 | use crate::{ed25519_certgen::Ed25519, test_utils::cert_bytes_to_spki_bytes, *}; 5 | use rcgen::generate_simple_self_signed; 6 | 7 | #[test] 8 | fn serde_round_trip_psk_set() { 9 | let subject_alt_names = vec!["localhost".to_string()]; 10 | 11 | let cert = generate_simple_self_signed(subject_alt_names).unwrap(); 12 | let cert_bytes: Vec = cert.serialize_der().unwrap(); 13 | 14 | let spki = cert_bytes_to_spki_bytes(&cert_bytes); 15 | let psk = PskSet::from_der(&[&spki[..]]).unwrap(); 16 | let psk_bytes = bincode::serialize(&psk).unwrap(); 17 | let psk_roundtripped = bincode::deserialize::(&psk_bytes).unwrap(); 18 | assert_eq!(psk, psk_roundtripped); 19 | } 20 | 21 | #[test] 22 | fn rc_gen_self_signed_dalek() { 23 | let mut rng = ::from_seed([0; 32]); 24 | let kp = ed25519_dalek::Keypair::generate(&mut rng); 25 | let kp2 = ed25519_dalek::Keypair::generate(&mut rng); 26 | 27 | let subject_alt_names = vec!["localhost".to_string()]; 28 | 29 | let public_key = kp.public; 30 | let public_key_2 = kp2.public; 31 | 32 | let spki = Ed25519::public_key_to_spki(&public_key); 33 | let spki2 = Ed25519::public_key_to_spki(&public_key_2); 34 | 35 | let psk_set = PskSet::from_der(&[&spki[..], &spki2[..]]).unwrap(); 36 | let now = SystemTime::now(); 37 | 38 | let cert = Ed25519::keypair_to_certificate(subject_alt_names, kp).unwrap(); 39 | 40 | // this passes client verification 41 | psk_set.verify_client_cert(&spki, &cert, &[], now).unwrap(); 42 | 43 | // this passes server verification 44 | let mut empty = std::iter::empty(); 45 | psk_set 46 | .verify_server_cert( 47 | &spki, 48 | &cert, 49 | &[], 50 | &rustls::ServerName::try_from("localhost").unwrap(), 51 | &mut empty, 52 | &[], 53 | now, 54 | ) 55 | .unwrap(); 56 | } 57 | 58 | #[test] 59 | fn rc_gen_not_self_signed_dalek() { 60 | let mut rng = ::from_seed([0; 32]); 61 | let kp = ed25519_dalek::Keypair::generate(&mut rng); 62 | let invalid_kp = ed25519_dalek::Keypair::generate(&mut rng); 63 | 64 | let subject_alt_names = vec!["localhost".to_string()]; 65 | 66 | let public_key = kp.public; 67 | 68 | let spki = Ed25519::public_key_to_spki(&public_key); 69 | 70 | let psk_set = PskSet::from_der(&[&spki[..]]).unwrap(); 71 | let now = SystemTime::now(); 72 | 73 | let invalid_cert = Ed25519::keypair_to_certificate(subject_alt_names, invalid_kp).unwrap(); 74 | 75 | // this does not pass client verification 76 | assert!(psk_set 77 | .verify_client_cert(&spki, &invalid_cert, &[], now) 78 | .err() 79 | .unwrap() 80 | .to_string() 81 | .contains("invalid peer certificate")); 82 | 83 | // this passes server verification 84 | let mut empty = std::iter::empty(); 85 | assert!(psk_set 86 | .verify_server_cert( 87 | &spki, 88 | &invalid_cert, 89 | &[], 90 | &rustls::ServerName::try_from("localhost").unwrap(), 91 | &mut empty, 92 | &[], 93 | now, 94 | ) 95 | .err() 96 | .unwrap() 97 | .to_string() 98 | .contains("invalid peer certificate")); 99 | } 100 | -------------------------------------------------------------------------------- /crates/rccheck/src/tests/psk_tests.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2022, Mysten Labs, Inc. 2 | // SPDX-License-Identifier: Apache-2.0 3 | 4 | use crate::{test_utils::cert_bytes_to_spki_bytes, *}; 5 | use rcgen::generate_simple_self_signed; 6 | use rustls::{client::ServerCertVerifier, server::ClientCertVerifier}; 7 | 8 | #[test] 9 | fn serde_round_trip() { 10 | let subject_alt_names = vec!["localhost".to_string()]; 11 | 12 | let cert = generate_simple_self_signed(subject_alt_names).unwrap(); 13 | let cert_bytes: Vec = cert.serialize_der().unwrap(); 14 | 15 | let spki = cert_bytes_to_spki_bytes(&cert_bytes); 16 | let psk = Psk::from_der(&spki).unwrap(); 17 | let psk_bytes = bincode::serialize(&psk).unwrap(); 18 | let psk_roundtripped = bincode::deserialize::(&psk_bytes).unwrap(); 19 | assert_eq!(psk, psk_roundtripped); 20 | } 21 | 22 | #[test] 23 | fn rc_gen_self_client() { 24 | let subject_alt_names = vec!["localhost".to_string()]; 25 | 26 | let cert = generate_simple_self_signed(subject_alt_names).unwrap(); 27 | let cert_bytes: Vec = cert.serialize_der().unwrap(); 28 | let spki = cert_bytes_to_spki_bytes(&cert_bytes); 29 | let psk = Psk::from_der(&spki).unwrap(); 30 | 31 | let now = SystemTime::now(); 32 | let rstls_cert = rustls::Certificate(cert_bytes); 33 | 34 | assert!(psk.verify_client_cert(&rstls_cert, &[], now).is_ok()); 35 | } 36 | 37 | #[test] 38 | fn rc_gen_not_self_client() { 39 | let subject_alt_names = vec!["localhost".to_string()]; 40 | 41 | let cert = generate_simple_self_signed(subject_alt_names.clone()).unwrap(); 42 | let cert_bytes: Vec = cert.serialize_der().unwrap(); 43 | 44 | let other_cert = generate_simple_self_signed(subject_alt_names).unwrap(); 45 | let other_bytes: Vec = other_cert.serialize_der().unwrap(); 46 | let spki = cert_bytes_to_spki_bytes(&other_bytes); 47 | let psk = Psk::from_der(&spki).unwrap(); 48 | 49 | let now = SystemTime::now(); 50 | let rstls_cert = rustls::Certificate(cert_bytes); 51 | 52 | assert!(psk 53 | .verify_client_cert(&rstls_cert, &[], now) 54 | .err() 55 | .unwrap() 56 | .to_string() 57 | .contains("invalid peer certificate")); 58 | } 59 | 60 | #[test] 61 | fn rc_gen_self_server() { 62 | let subject_alt_names = vec!["localhost".to_string()]; 63 | 64 | let cert = generate_simple_self_signed(subject_alt_names).unwrap(); 65 | let cert_bytes: Vec = cert.serialize_der().unwrap(); 66 | let spki = cert_bytes_to_spki_bytes(&cert_bytes); 67 | let psk = Psk::from_der(&spki).unwrap(); 68 | 69 | let now = SystemTime::now(); 70 | let rstls_cert = rustls::Certificate(cert_bytes); 71 | 72 | let mut empty = std::iter::empty(); 73 | 74 | assert!(psk 75 | .verify_server_cert( 76 | &rstls_cert, 77 | &[], 78 | &rustls::ServerName::try_from("localhost").unwrap(), 79 | &mut empty, 80 | &[], 81 | now 82 | ) 83 | .is_ok()); 84 | } 85 | 86 | #[test] 87 | fn rc_gen_not_self_server() { 88 | let subject_alt_names = vec!["localhost".to_string()]; 89 | 90 | let cert = generate_simple_self_signed(subject_alt_names.clone()).unwrap(); 91 | let cert_bytes: Vec = cert.serialize_der().unwrap(); 92 | 93 | let other_cert = generate_simple_self_signed(subject_alt_names).unwrap(); 94 | let other_bytes: Vec = other_cert.serialize_der().unwrap(); 95 | let spki = cert_bytes_to_spki_bytes(&other_bytes); 96 | let psk = Psk::from_der(&spki).unwrap(); 97 | let now = SystemTime::now(); 98 | let rstls_cert = rustls::Certificate(cert_bytes); 99 | 100 | let mut empty = std::iter::empty(); 101 | 102 | assert!(psk 103 | .verify_server_cert( 104 | &rstls_cert, 105 | &[], 106 | &rustls::ServerName::try_from("localhost").unwrap(), 107 | &mut empty, 108 | &[], 109 | now 110 | ) 111 | .err() 112 | .unwrap() 113 | .to_string() 114 | .contains("invalid peer certificate")); 115 | } 116 | -------------------------------------------------------------------------------- /crates/rccheck/src/tests/test_utils.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2022, Mysten Labs, Inc. 2 | // SPDX-License-Identifier: Apache-2.0 3 | 4 | use proptest::prelude::*; 5 | use proptest::strategy::Strategy; 6 | use x509_parser::prelude::FromDer; 7 | use x509_parser::prelude::X509Certificate; 8 | 9 | /// 10 | /// Proptest Helpers 11 | /// 12 | 13 | pub fn dalek_keypair_strategy() -> impl Strategy { 14 | any::<[u8; 32]>() 15 | .prop_map(|seed| { 16 | let mut rng = ::from_seed(seed); 17 | ed25519_dalek::Keypair::generate(&mut rng) 18 | }) 19 | .no_shrink() 20 | } 21 | 22 | pub fn dalek_pubkey_strategy() -> impl Strategy { 23 | dalek_keypair_strategy().prop_map(|v| v.public) 24 | } 25 | 26 | /// 27 | /// Misc Helpers 28 | /// 29 | 30 | pub fn cert_bytes_to_spki_bytes(cert_bytes: &[u8]) -> Vec { 31 | let cert_parsed = X509Certificate::from_der(cert_bytes) 32 | .map_err(|_| rustls::Error::InvalidCertificateEncoding) 33 | .unwrap(); 34 | let spki = cert_parsed.1.public_key().clone(); 35 | spki.raw.to_vec() 36 | } 37 | -------------------------------------------------------------------------------- /crates/telemetry-subscribers/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "telemetry-subscribers" 3 | version = "0.2.0" 4 | license = "Apache-2.0" 5 | authors = ["Evan Chan "] 6 | description = "Library for common telemetry and observability functionality" 7 | repository = "https://github.com/mystenlabs/mysten-infra" 8 | edition = "2021" 9 | publish = ["crates-io"] 10 | 11 | [dependencies] 12 | console-subscriber = { version = "0.1.6", optional = true } 13 | crossterm = "0.25.0" 14 | once_cell = "1.13.0" 15 | opentelemetry = { version = "0.18.0", features = ["rt-tokio"], optional = true } 16 | opentelemetry-jaeger = { version = "0.17.0", features = ["rt-tokio"], optional = true } 17 | prometheus = "0.13.1" 18 | tokio = { version = "1.21.2", features = ["sync", "macros", "rt", "rt-multi-thread"] } 19 | tracing = "0.1.37" 20 | tracing-appender = "0.2.2" 21 | tracing-bunyan-formatter = "0.3.3" 22 | tracing-chrome = { version = "0.7.0", optional = true } 23 | tracing-opentelemetry = { version = "0.18.0", optional = true } 24 | tracing-subscriber = { version = "0.3.15", features = ["std", "time", "registry", "env-filter"] } 25 | 26 | [features] 27 | default = ["jaeger", "chrome"] 28 | tokio-console = ["console-subscriber"] 29 | jaeger = ["tracing-opentelemetry", "opentelemetry", "opentelemetry-jaeger"] 30 | chrome = ["tracing-chrome"] 31 | 32 | [dev-dependencies] 33 | camino = "1.0.9" 34 | -------------------------------------------------------------------------------- /crates/telemetry-subscribers/README.md: -------------------------------------------------------------------------------- 1 | # Telemetry library 2 | 3 | This is a library for common telemetry functionality, especially subscribers for [Tokio tracing](https://github.com/tokio-rs/tracing) 4 | libraries. Here we simply package many common subscribers, such as writing trace data to Jaeger, distributed tracing, 5 | common logs and metrics destinations, etc. into a easy to configure common package. There are also 6 | some unique layers such as one to automatically create Prometheus latency histograms for spans. 7 | 8 | We also purposely separate out logging levels from span creation. This is often needed by production apps 9 | as normally it is not desired to log at very high levels, but still desirable to gather sampled span data 10 | all the way down to TRACE level spans. 11 | 12 | Getting started is easy. In your app: 13 | 14 | ```rust 15 | let config = telemetry::TelemetryConfig::new("my_app"); 16 | let guard = telemetry::init(config); 17 | ``` 18 | 19 | It is important to retain the guard until the end of the program. Assign it in the main fn and keep it, 20 | for once it drops then log output will stop. 21 | 22 | There is a builder API available: just do `TelemetryConfig::new()...` Another convenient initialization method 23 | is `TelemetryConfig::new().with_env()` to populate the config from environment vars. 24 | 25 | You can also run the example and see output in ANSI color: 26 | 27 | cargo run --example easy-init 28 | 29 | ## Features 30 | - `jaeger` - this feature is enabled by default as it enables jaeger tracing 31 | - `json` - Bunyan formatter - JSON log output, optional 32 | - `tokio-console` - [Tokio-console](https://github.com/tokio-rs/console) subscriber, optional 33 | 34 | ### Stdout vs file output 35 | 36 | By default, logs (but not spans) are formatted for human readability and output to stdout, with key-value tags at the end of every line. 37 | `RUST_LOG` can be configured for custom logging output, including filtering. 38 | 39 | By setting `log_file` in the config, one can write log output to a daily-rotated file. 40 | 41 | ### Tracing and span output 42 | 43 | Detailed span start and end logs can be generated by defining the `json_log_output` config variable. Note that this causes all output to be in JSON format, which is not as human-readable, so it is not enabled by default. 44 | This output can easily be fed to backends such as ElasticSearch for indexing, alerts, aggregation, and analysis. 45 | 46 | NOTE: JSON output requires the `json` crate feature to be enabled. 47 | 48 | ### Jaeger (seeing distributed traces) 49 | 50 | To see nested spans visualized with [Jaeger](https://www.jaegertracing.io), do the following: 51 | 52 | 1. Run this to get a local Jaeger container: `docker run -d -p6831:6831/udp -p6832:6832/udp -p16686:16686 jaegertracing/all-in-one:latest` 53 | 2. Set `enable_jaeger` config setting to true or set `TOKIO_JAEGER` env var 54 | 3. Run your app 55 | 4. Browse to `http://localhost:16686/` and select the service you configured using `service_name` 56 | 57 | NOTE: separate spans (which are not nested) are not connected as a single trace for now. 58 | 59 | Jaeger subscriber is enabled by default but is protected by the jaeger feature flag. If you'd like to leave 60 | out the Jaeger dependencies, you can turn off the default-features in your dependency: 61 | 62 | telemetry = { url = "...", default-features = false } 63 | 64 | ### Automatic Prometheus span latencies 65 | 66 | Included in this library is a tracing-subscriber layer named `PrometheusSpanLatencyLayer`. It will create 67 | a Prometheus histogram to track latencies for every span in your app, which is super convenient for tracking 68 | span performance in production apps. 69 | 70 | Enabling this layer can only be done programmatically, by passing in a Prometheus registry to `TelemetryConfig`. 71 | 72 | ### Span levels vs log levels 73 | 74 | What spans are included for Jaeger output, automatic span latencies, etc.? These are controlled by 75 | the `span_level` config attribute, or the `TS_SPAN_LEVEL` environment variable. Note that this is 76 | separate from `RUST_LOG`, so that you can separately control the logging verbosity from the level of 77 | spans that are to be recorded and traced. 78 | 79 | ### Live async inspection / Tokio Console 80 | 81 | [Tokio-console](https://github.com/tokio-rs/console) is an awesome CLI tool designed to analyze and help debug Rust apps using Tokio, in real time! It relies on a special subscriber. 82 | 83 | 1. Build your app using a special flag: `RUSTFLAGS="--cfg tokio_unstable" cargo build` 84 | 2. Enable the `tokio-console` feature for this crate. 85 | 2. Set the `tokio_console` config setting when running your app (or set TOKIO_CONSOLE env var if using config `with_env()` method) 86 | 3. Clone the console repo and `cargo run` to launch the console 87 | 88 | NOTE: setting tokio TRACE logs is NOT necessary. It says that in the docs but there's no need to change Tokio logging levels at all. The console subscriber has a special filter enabled taking care of that. 89 | 90 | By default, Tokio console listens on port 6669. To change this setting as well as other setting such as 91 | the retention policy, please see the [configuration](https://docs.rs/console-subscriber/latest/console_subscriber/struct.Builder.html#configuration) guide. 92 | 93 | ### Custom panic hook 94 | 95 | This library installs a custom panic hook which records a log (event) at ERROR level using the tracing 96 | crate. This allows span information from the panic to be properly recorded as well. 97 | 98 | To exit the process on panic, set the `CRASH_ON_PANIC` environment variable. 99 | -------------------------------------------------------------------------------- /crates/telemetry-subscribers/examples/easy-init.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2022, Mysten Labs, Inc. 2 | // SPDX-License-Identifier: Apache-2.0 3 | 4 | use tracing::{debug, info, warn}; 5 | 6 | fn main() { 7 | let _guard = telemetry_subscribers::TelemetryConfig::new("my_app") 8 | .with_env() 9 | .init(); 10 | 11 | info!(a = 1, "This will be INFO."); 12 | debug!(a = 2, "This will be DEBUG."); 13 | warn!(a = 3, "This will be WARNING."); 14 | panic!("This should cause error logs to be printed out!"); 15 | } 16 | -------------------------------------------------------------------------------- /crates/telemetry-subscribers/src/span_latency_prom.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2022, Mysten Labs, Inc. 2 | // SPDX-License-Identifier: Apache-2.0 3 | 4 | //! This is a module that records Tokio-tracing [span](https://docs.rs/tracing/latest/tracing/span/index.html) 5 | //! latencies into Prometheus histograms directly. 6 | //! The name of the Prometheus histogram is "tracing_span_latencies[_sum/count/bucket]" 7 | //! 8 | //! There is also the tracing-timing crate, from which this differs significantly: 9 | //! - tracing-timing records latencies between events (logs). We just want to record the latencies of spans. 10 | //! - tracing-timing does not output to Prometheus, and extracting data from its histograms takes extra CPU 11 | //! - tracing-timing records latencies using HDRHistogram, which is great, but uses extra memory when one 12 | //! is already using Prometheus 13 | //! Thus this is a much smaller and more focused module. 14 | //! 15 | //! ## Making spans visible 16 | //! This module can only record latencies for spans that get created. By default, this is controlled by 17 | //! env_filter and logging levels. 18 | 19 | use std::time::Instant; 20 | 21 | use prometheus::{exponential_buckets, register_histogram_vec_with_registry, Registry}; 22 | use tracing::{span, Subscriber}; 23 | 24 | /// A tokio_tracing Layer that records span latencies into Prometheus histograms 25 | pub struct PrometheusSpanLatencyLayer { 26 | span_latencies: prometheus::HistogramVec, 27 | } 28 | 29 | #[derive(Debug)] 30 | pub enum PrometheusSpanError { 31 | /// num_buckets must be positive >= 1 32 | ZeroOrNegativeNumBuckets, 33 | PromError(prometheus::Error), 34 | } 35 | 36 | impl From for PrometheusSpanError { 37 | fn from(err: prometheus::Error) -> Self { 38 | Self::PromError(err) 39 | } 40 | } 41 | 42 | const TOP_LATENCY_IN_NS: f64 = 300.0 * 1.0e9; 43 | const LOWEST_LATENCY_IN_NS: f64 = 500.0; 44 | 45 | impl PrometheusSpanLatencyLayer { 46 | /// Create a new layer, injecting latencies into the given registry. 47 | /// The num_buckets controls how many buckets thus how much memory and time series one 48 | /// uses up in Prometheus (and in the application). 10 is probably a minimum. 49 | pub fn try_new(registry: &Registry, num_buckets: usize) -> Result { 50 | if num_buckets < 1 { 51 | return Err(PrometheusSpanError::ZeroOrNegativeNumBuckets); 52 | } 53 | 54 | // Histogram for span latencies must accommodate a wide range of possible latencies, so 55 | // don't use the default Prometheus buckets. Latencies in NS. Calculate the multiplier 56 | // to go from LOWEST to TOP in num_bucket steps, step n+1 = step n * factor. 57 | let factor = (TOP_LATENCY_IN_NS / LOWEST_LATENCY_IN_NS).powf(1.0 / (num_buckets as f64)); 58 | let buckets = exponential_buckets(LOWEST_LATENCY_IN_NS, factor, num_buckets)?; 59 | let span_latencies = register_histogram_vec_with_registry!( 60 | "tracing_span_latencies", 61 | "Latencies from tokio-tracing spans", 62 | &["span_name"], 63 | buckets, 64 | registry 65 | )?; 66 | Ok(Self { span_latencies }) 67 | } 68 | } 69 | 70 | struct PromSpanTimestamp(Instant); 71 | 72 | impl tracing_subscriber::Layer for PrometheusSpanLatencyLayer 73 | where 74 | S: Subscriber + for<'span> tracing_subscriber::registry::LookupSpan<'span>, 75 | { 76 | fn on_new_span( 77 | &self, 78 | _attrs: &span::Attributes, 79 | id: &span::Id, 80 | ctx: tracing_subscriber::layer::Context, 81 | ) { 82 | let span = ctx.span(id).unwrap(); 83 | // NOTE: there are other extensions that insert timings. For example, 84 | // tracing_subscriber's with_span_events() inserts events at open and close that contain timings. 85 | // However, we cannot be guaranteed that those events would be turned on. 86 | span.extensions_mut() 87 | .insert(PromSpanTimestamp(Instant::now())); 88 | } 89 | 90 | fn on_close(&self, id: span::Id, ctx: tracing_subscriber::layer::Context<'_, S>) { 91 | let span = ctx.span(&id).unwrap(); 92 | let start_time = span 93 | .extensions() 94 | .get::() 95 | .expect("Could not find saved timestamp on span") 96 | .0; 97 | let elapsed_ns = start_time.elapsed().as_nanos() as u64; 98 | self.span_latencies 99 | .with_label_values(&[span.name()]) 100 | .observe(elapsed_ns as f64); 101 | } 102 | } 103 | 104 | #[cfg(test)] 105 | mod tests { 106 | use super::*; 107 | 108 | #[test] 109 | fn test_prom_span_latency_init() { 110 | let registry = prometheus::Registry::new(); 111 | 112 | let res = PrometheusSpanLatencyLayer::try_new(®istry, 0); 113 | assert!(matches!( 114 | res, 115 | Err(PrometheusSpanError::ZeroOrNegativeNumBuckets) 116 | )); 117 | } 118 | } 119 | -------------------------------------------------------------------------------- /crates/telemetry-subscribers/tests/reload.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2022, Mysten Labs, Inc. 2 | // SPDX-License-Identifier: Apache-2.0 3 | 4 | use camino::Utf8PathBuf; 5 | use std::env; 6 | use std::fs; 7 | use telemetry_subscribers::TelemetryConfig; 8 | use tracing::{debug, info}; 9 | 10 | #[test] 11 | fn reload() { 12 | let log_file_prefix = "out.log"; 13 | let mut config = TelemetryConfig::new("test"); 14 | config.log_file = Some(log_file_prefix.to_owned()); 15 | config.panic_hook = false; 16 | 17 | let (guard, reload_handle) = config.init(); 18 | 19 | info!("Should be able to see this"); 20 | debug!("This won't be captured"); 21 | reload_handle.update("debug").unwrap(); 22 | debug!("Now you can see this!"); 23 | 24 | debug!("{}", reload_handle.get().unwrap()); 25 | 26 | drop(guard); 27 | 28 | let current_dir = Utf8PathBuf::from_path_buf(env::current_dir().unwrap()).unwrap(); 29 | 30 | for entry in current_dir.read_dir_utf8().unwrap() { 31 | let entry = entry.unwrap(); 32 | 33 | if entry.file_name().starts_with(log_file_prefix) { 34 | let logs = fs::read_to_string(entry.path()).unwrap(); 35 | 36 | assert!(logs.contains("Should be able to see this")); 37 | assert!(!logs.contains("This won't be captured")); 38 | assert!(logs.contains("Now you can see this!")); 39 | 40 | fs::remove_file(entry.path()).unwrap(); 41 | return; 42 | } 43 | } 44 | 45 | panic!("could not find log file"); 46 | } 47 | -------------------------------------------------------------------------------- /crates/typed-store-derive/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "typed-store-derive" 3 | version = "0.3.0" 4 | license = "Apache-2.0" 5 | authors = ["Ade A. "] 6 | description = "macros for the typed-store crate" 7 | repository = "https://github.com/mystenlabs/mysten-infra" 8 | edition = "2021" 9 | publish = ["crates-io"] 10 | 11 | [lib] 12 | proc-macro = true 13 | 14 | [dependencies] 15 | proc-macro2 = "1.0.47" 16 | quote = "1.0.9" 17 | syn = { version = "1.0.102", features = ["full"] } 18 | 19 | [dev-dependencies] 20 | eyre = "0.6.8" 21 | rocksdb = { version = "0.19.0", features = ["snappy", "lz4", "zstd", "zlib", "multi-threaded-cf"], default-features = false } 22 | tempfile = "3.3.0" 23 | typed-store = { path = "../typed-store" } 24 | tokio = { version = "1.21.2", features = ["test-util"] } 25 | -------------------------------------------------------------------------------- /crates/typed-store/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "typed-store" 3 | version = "0.4.0" 4 | license = "Apache-2.0" 5 | authors = ["Francois Garillot "] 6 | description = "a typed database interface" 7 | repository = "https://github.com/mystenlabs/mysten-infra" 8 | edition = "2021" 9 | publish = ["crates-io"] 10 | 11 | [dependencies] 12 | bincode = "1.3.3" 13 | collectable = "0.0.2" 14 | eyre = "0.6.8" 15 | fdlimit = "0.2.1" 16 | once_cell = "1.15.0" 17 | tap = "1.0.1" 18 | prometheus = "0.13.2" 19 | # deactivation of bzip2 due to https://github.com/rust-rocksdb/rust-rocksdb/issues/609 20 | rocksdb = { version = "0.19.0", features = ["snappy", "lz4", "zstd", "zlib", "multi-threaded-cf"], default-features = false } 21 | serde = { version = "1.0.140", features = ["derive"] } 22 | thiserror = "1.0.37" 23 | tokio = { version = "1.21.2", features = ["full", "test-util"] } 24 | tracing = "0.1.37" 25 | 26 | [dev-dependencies] 27 | tempfile = "3.3.0" 28 | once_cell = "1.13.0" 29 | proc-macro2 = "1.0.47" 30 | quote = "1.0.9" 31 | syn = { version = "1.0.102", features = ["derive"] } 32 | typed-store-derive = {path = "../typed-store-derive"} 33 | -------------------------------------------------------------------------------- /crates/typed-store/src/lib.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2021, Facebook, Inc. and its affiliates 2 | // Copyright (c) 2022, Mysten Labs, Inc. 3 | // SPDX-License-Identifier: Apache-2.0 4 | #![warn( 5 | future_incompatible, 6 | nonstandard_style, 7 | rust_2018_idioms, 8 | rust_2021_compatibility 9 | )] 10 | 11 | use eyre::Result; 12 | use rocksdb::MultiThreaded; 13 | use serde::{de::DeserializeOwned, Serialize}; 14 | use std::{ 15 | cmp::Eq, 16 | collections::{HashMap, VecDeque}, 17 | hash::Hash, 18 | sync::Arc, 19 | }; 20 | use tokio::sync::{ 21 | mpsc::{channel, Sender}, 22 | oneshot, 23 | }; 24 | 25 | pub mod traits; 26 | pub use traits::Map; 27 | pub mod metrics; 28 | pub mod rocks; 29 | pub use metrics::DBMetrics; 30 | 31 | #[cfg(test)] 32 | #[path = "tests/store_tests.rs"] 33 | pub mod store_tests; 34 | 35 | pub type StoreError = rocks::TypedStoreError; 36 | 37 | type StoreResult = Result; 38 | 39 | pub enum StoreCommand { 40 | Write(Key, Value, Option>>), 41 | WriteAll(Vec<(Key, Value)>, oneshot::Sender>), 42 | Delete(Key), 43 | DeleteAll(Vec, oneshot::Sender>), 44 | Read(Key, oneshot::Sender>>), 45 | ReadRawBytes(Key, oneshot::Sender>>>), 46 | ReadAll(Vec, oneshot::Sender>>>), 47 | NotifyRead(Key, oneshot::Sender>>), 48 | Iter( 49 | Option bool + Send>>, 50 | oneshot::Sender>, 51 | ), 52 | } 53 | 54 | #[derive(Clone)] 55 | pub struct Store { 56 | channel: Sender>, 57 | pub rocksdb: Arc>, 58 | } 59 | 60 | impl Store 61 | where 62 | Key: Hash + Eq + Serialize + DeserializeOwned + Send + 'static, 63 | Value: Serialize + DeserializeOwned + Send + Clone + 'static, 64 | { 65 | pub fn new(keyed_db: rocks::DBMap) -> Self { 66 | let mut obligations = HashMap::>>::new(); 67 | let clone_db = keyed_db.rocksdb.clone(); 68 | let (tx, mut rx) = channel(100); 69 | tokio::spawn(async move { 70 | while let Some(command) = rx.recv().await { 71 | match command { 72 | StoreCommand::Write(key, value, sender) => { 73 | let response = keyed_db.insert(&key, &value); 74 | if response.is_ok() { 75 | if let Some(mut senders) = obligations.remove(&key) { 76 | while let Some(s) = senders.pop_front() { 77 | let _ = s.send(Ok(Some(value.clone()))); 78 | } 79 | } 80 | } 81 | if let Some(replier) = sender { 82 | let _ = replier.send(response); 83 | } 84 | } 85 | StoreCommand::WriteAll(key_values, sender) => { 86 | let response = 87 | keyed_db.multi_insert(key_values.iter().map(|(k, v)| (k, v))); 88 | 89 | if response.is_ok() { 90 | for (key, _) in key_values { 91 | if let Some(mut senders) = obligations.remove(&key) { 92 | while let Some(s) = senders.pop_front() { 93 | let _ = s.send(Ok(None)); 94 | } 95 | } 96 | } 97 | } 98 | let _ = sender.send(response); 99 | } 100 | StoreCommand::Delete(key) => { 101 | let _ = keyed_db.remove(&key); 102 | if let Some(mut senders) = obligations.remove(&key) { 103 | while let Some(s) = senders.pop_front() { 104 | let _ = s.send(Ok(None)); 105 | } 106 | } 107 | } 108 | StoreCommand::DeleteAll(keys, sender) => { 109 | let response = keyed_db.multi_remove(keys.iter()); 110 | // notify the obligations only when the delete was successful 111 | if response.is_ok() { 112 | for key in keys { 113 | if let Some(mut senders) = obligations.remove(&key) { 114 | while let Some(s) = senders.pop_front() { 115 | let _ = s.send(Ok(None)); 116 | } 117 | } 118 | } 119 | } 120 | let _ = sender.send(response); 121 | } 122 | StoreCommand::Read(key, sender) => { 123 | let response = keyed_db.get(&key); 124 | let _ = sender.send(response); 125 | } 126 | StoreCommand::ReadAll(keys, sender) => { 127 | let response = keyed_db.multi_get(keys.as_slice()); 128 | let _ = sender.send(response); 129 | } 130 | StoreCommand::NotifyRead(key, sender) => { 131 | let response = keyed_db.get(&key); 132 | if let Ok(Some(_)) = response { 133 | let _ = sender.send(response); 134 | } else { 135 | obligations 136 | .entry(key) 137 | .or_insert_with(VecDeque::new) 138 | .push_back(sender) 139 | } 140 | } 141 | StoreCommand::Iter(predicate, sender) => { 142 | let response = if let Some(func) = predicate { 143 | keyed_db.iter().filter(func).collect() 144 | } else { 145 | // Beware, we may overload the memory with a large table! 146 | keyed_db.iter().collect() 147 | }; 148 | 149 | let _ = sender.send(response); 150 | } 151 | StoreCommand::ReadRawBytes(key, sender) => { 152 | let response = keyed_db.get_raw_bytes(&key); 153 | let _ = sender.send(response); 154 | } 155 | } 156 | } 157 | }); 158 | Self { 159 | channel: tx, 160 | rocksdb: clone_db, 161 | } 162 | } 163 | } 164 | 165 | impl Store 166 | where 167 | Key: Serialize + DeserializeOwned + Send, 168 | Value: Serialize + DeserializeOwned + Send, 169 | { 170 | pub async fn async_write(&self, key: Key, value: Value) { 171 | if let Err(e) = self 172 | .channel 173 | .send(StoreCommand::Write(key, value, None)) 174 | .await 175 | { 176 | panic!("Failed to send Write command to store: {e}"); 177 | } 178 | } 179 | 180 | pub async fn sync_write(&self, key: Key, value: Value) -> StoreResult<()> { 181 | let (sender, receiver) = oneshot::channel(); 182 | if let Err(e) = self 183 | .channel 184 | .send(StoreCommand::Write(key, value, Some(sender))) 185 | .await 186 | { 187 | panic!("Failed to send Write command to store: {e}"); 188 | } 189 | receiver 190 | .await 191 | .expect("Failed to receive reply to Write command from store") 192 | } 193 | 194 | /// Atomically writes all the key-value pairs in storage. 195 | /// If the operation is successful, then the result will be a non 196 | /// error empty result. Otherwise the error is returned. 197 | pub async fn sync_write_all( 198 | &self, 199 | key_value_pairs: impl IntoIterator, 200 | ) -> StoreResult<()> { 201 | let (sender, receiver) = oneshot::channel(); 202 | if let Err(e) = self 203 | .channel 204 | .send(StoreCommand::WriteAll( 205 | key_value_pairs.into_iter().collect(), 206 | sender, 207 | )) 208 | .await 209 | { 210 | panic!("Failed to send WriteAll command to store: {e}"); 211 | } 212 | receiver 213 | .await 214 | .expect("Failed to receive reply to WriteAll command from store") 215 | } 216 | 217 | pub async fn remove(&self, key: Key) { 218 | if let Err(e) = self.channel.send(StoreCommand::Delete(key)).await { 219 | panic!("Failed to send Delete command to store: {e}"); 220 | } 221 | } 222 | 223 | /// Atomically removes all the data referenced by the provided keys. 224 | /// If the operation is successful, then the result will be a non 225 | /// error empty result. Otherwise the error is returned. 226 | pub async fn remove_all(&self, keys: impl IntoIterator) -> StoreResult<()> { 227 | let (sender, receiver) = oneshot::channel(); 228 | if let Err(e) = self 229 | .channel 230 | .send(StoreCommand::DeleteAll(keys.into_iter().collect(), sender)) 231 | .await 232 | { 233 | panic!("Failed to send DeleteAll command to store: {e}"); 234 | } 235 | receiver 236 | .await 237 | .expect("Failed to receive reply to RemoveAll command from store") 238 | } 239 | 240 | /// Returns the read value in raw bincode bytes 241 | pub async fn read_raw_bytes(&self, key: Key) -> StoreResult>> { 242 | let (sender, receiver) = oneshot::channel(); 243 | if let Err(e) = self 244 | .channel 245 | .send(StoreCommand::ReadRawBytes(key, sender)) 246 | .await 247 | { 248 | panic!("Failed to send ReadRawBytes command to store: {e}"); 249 | } 250 | receiver 251 | .await 252 | .expect("Failed to receive reply to ReadRawBytes command from store") 253 | } 254 | 255 | pub async fn read(&self, key: Key) -> StoreResult> { 256 | let (sender, receiver) = oneshot::channel(); 257 | if let Err(e) = self.channel.send(StoreCommand::Read(key, sender)).await { 258 | panic!("Failed to send Read command to store: {e}"); 259 | } 260 | receiver 261 | .await 262 | .expect("Failed to receive reply to Read command from store") 263 | } 264 | 265 | /// Fetches all the values for the provided keys. 266 | pub async fn read_all( 267 | &self, 268 | keys: impl IntoIterator, 269 | ) -> StoreResult>> { 270 | let (sender, receiver) = oneshot::channel(); 271 | if let Err(e) = self 272 | .channel 273 | .send(StoreCommand::ReadAll(keys.into_iter().collect(), sender)) 274 | .await 275 | { 276 | panic!("Failed to send ReadAll command to store: {e}"); 277 | } 278 | receiver 279 | .await 280 | .expect("Failed to receive reply to ReadAll command from store") 281 | } 282 | 283 | pub async fn notify_read(&self, key: Key) -> StoreResult> { 284 | let (sender, receiver) = oneshot::channel(); 285 | if let Err(e) = self 286 | .channel 287 | .send(StoreCommand::NotifyRead(key, sender)) 288 | .await 289 | { 290 | panic!("Failed to send NotifyRead command to store: {e}"); 291 | } 292 | receiver 293 | .await 294 | .expect("Failed to receive reply to NotifyRead command from store") 295 | } 296 | 297 | pub async fn iter( 298 | &self, 299 | predicate: Option bool + Send>>, 300 | ) -> HashMap { 301 | let (sender, receiver) = oneshot::channel(); 302 | if let Err(e) = self 303 | .channel 304 | .send(StoreCommand::Iter(predicate, sender)) 305 | .await 306 | { 307 | panic!("Failed to send Iter command to store: {e}"); 308 | } 309 | receiver 310 | .await 311 | .expect("Failed to receive reply to Iter command from store") 312 | } 313 | } 314 | -------------------------------------------------------------------------------- /crates/typed-store/src/rocks/errors.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2022, Mysten Labs, Inc. 2 | // SPDX-License-Identifier: Apache-2.0 3 | 4 | use bincode::ErrorKind as BincodeErrorKind; 5 | 6 | use rocksdb::Error as RocksError; 7 | use serde::{Deserialize, Serialize}; 8 | use std::{fmt, fmt::Display}; 9 | use thiserror::Error; 10 | 11 | #[non_exhaustive] 12 | #[derive(Error, Debug, Serialize, Deserialize, PartialEq, Eq, Hash, Clone)] 13 | pub enum TypedStoreError { 14 | #[error("rocksdb error: {0}")] 15 | RocksDBError(String), 16 | #[error("(de)serialization error: {0}")] 17 | SerializationError(String), 18 | #[error("the column family {0} was not registered with the database")] 19 | UnregisteredColumn(String), 20 | #[error("a batch operation can't operate across databases")] 21 | CrossDBBatch, 22 | #[error("Metric reporting thread failed with error")] 23 | MetricsReporting, 24 | } 25 | 26 | #[derive(Serialize, Deserialize, Clone, Eq, PartialEq, Hash, Debug, Error)] 27 | pub(crate) struct RocksErrorDef { 28 | message: String, 29 | } 30 | 31 | impl From for RocksErrorDef { 32 | fn from(err: RocksError) -> Self { 33 | RocksErrorDef { 34 | message: err.as_ref().to_string(), 35 | } 36 | } 37 | } 38 | 39 | impl From for TypedStoreError { 40 | fn from(err: RocksError) -> Self { 41 | TypedStoreError::RocksDBError(format!("{err}")) 42 | } 43 | } 44 | 45 | impl Display for RocksErrorDef { 46 | fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> { 47 | self.message.fmt(formatter) 48 | } 49 | } 50 | 51 | #[derive(Serialize, Deserialize, Clone, Hash, Eq, PartialEq, Debug, Error)] 52 | pub(crate) enum BincodeErrorDef { 53 | Io(String), 54 | InvalidUtf8Encoding(String), 55 | InvalidBoolEncoding(u8), 56 | InvalidCharEncoding, 57 | InvalidTagEncoding(usize), 58 | DeserializeAnyNotSupported, 59 | SizeLimit, 60 | SequenceMustHaveLength, 61 | Custom(String), 62 | } 63 | 64 | impl fmt::Display for BincodeErrorDef { 65 | fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { 66 | match *self { 67 | BincodeErrorDef::Io(ref ioerr) => write!(fmt, "io error: {}", ioerr), 68 | BincodeErrorDef::InvalidUtf8Encoding(ref e) => { 69 | write!(fmt, "{}", e) 70 | } 71 | BincodeErrorDef::InvalidBoolEncoding(b) => { 72 | write!(fmt, "expected 0 or 1, found {}", b) 73 | } 74 | BincodeErrorDef::InvalidCharEncoding => write!(fmt, "{:?}", self), 75 | BincodeErrorDef::InvalidTagEncoding(tag) => { 76 | write!(fmt, "found {}", tag) 77 | } 78 | BincodeErrorDef::SequenceMustHaveLength => write!(fmt, "{:?}", self), 79 | BincodeErrorDef::SizeLimit => write!(fmt, "{:?}", self), 80 | BincodeErrorDef::DeserializeAnyNotSupported => write!( 81 | fmt, 82 | "Bincode does not support the serde::Deserializer::deserialize_any method" 83 | ), 84 | BincodeErrorDef::Custom(ref s) => s.fmt(fmt), 85 | } 86 | } 87 | } 88 | 89 | impl From for BincodeErrorDef { 90 | fn from(err: bincode::Error) -> Self { 91 | match err.as_ref() { 92 | BincodeErrorKind::Io(ioerr) => BincodeErrorDef::Io(ioerr.to_string()), 93 | BincodeErrorKind::InvalidUtf8Encoding(utf8err) => { 94 | BincodeErrorDef::InvalidUtf8Encoding(utf8err.to_string()) 95 | } 96 | BincodeErrorKind::InvalidBoolEncoding(byte) => { 97 | BincodeErrorDef::InvalidBoolEncoding(*byte) 98 | } 99 | BincodeErrorKind::InvalidCharEncoding => BincodeErrorDef::InvalidCharEncoding, 100 | BincodeErrorKind::InvalidTagEncoding(tag) => BincodeErrorDef::InvalidTagEncoding(*tag), 101 | BincodeErrorKind::DeserializeAnyNotSupported => { 102 | BincodeErrorDef::DeserializeAnyNotSupported 103 | } 104 | BincodeErrorKind::SizeLimit => BincodeErrorDef::SizeLimit, 105 | BincodeErrorKind::SequenceMustHaveLength => BincodeErrorDef::SequenceMustHaveLength, 106 | BincodeErrorKind::Custom(str) => BincodeErrorDef::Custom(str.to_owned()), 107 | } 108 | } 109 | } 110 | 111 | impl From for TypedStoreError { 112 | fn from(err: bincode::Error) -> Self { 113 | TypedStoreError::SerializationError(format!("{err}")) 114 | } 115 | } 116 | -------------------------------------------------------------------------------- /crates/typed-store/src/rocks/iter.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2022, Mysten Labs, Inc. 2 | // SPDX-License-Identifier: Apache-2.0 3 | use std::{marker::PhantomData, sync::Arc}; 4 | 5 | use bincode::Options; 6 | use rocksdb::Direction; 7 | 8 | use crate::metrics::{DBMetrics, SamplingInterval}; 9 | 10 | use super::{be_fix_int_ser, errors::TypedStoreError}; 11 | use serde::{de::DeserializeOwned, Serialize}; 12 | 13 | use super::DBRawIteratorMultiThreaded; 14 | 15 | /// An iterator over all key-value pairs in a data map. 16 | pub struct Iter<'a, K, V> { 17 | db_iter: DBRawIteratorMultiThreaded<'a>, 18 | _phantom: PhantomData<(K, V)>, 19 | direction: Direction, 20 | cf: String, 21 | db_metrics: Arc, 22 | iter_bytes_sample_interval: SamplingInterval, 23 | } 24 | 25 | impl<'a, K: DeserializeOwned, V: DeserializeOwned> Iter<'a, K, V> { 26 | pub(super) fn new( 27 | db_iter: DBRawIteratorMultiThreaded<'a>, 28 | cf: String, 29 | db_metrics: &Arc, 30 | iter_bytes_sample_interval: &SamplingInterval, 31 | ) -> Self { 32 | Self { 33 | db_iter, 34 | _phantom: PhantomData, 35 | direction: Direction::Forward, 36 | cf, 37 | db_metrics: db_metrics.clone(), 38 | iter_bytes_sample_interval: iter_bytes_sample_interval.clone(), 39 | } 40 | } 41 | } 42 | 43 | impl<'a, K: DeserializeOwned, V: DeserializeOwned> Iterator for Iter<'a, K, V> { 44 | type Item = (K, V); 45 | 46 | fn next(&mut self) -> Option { 47 | if self.db_iter.valid() { 48 | let config = bincode::DefaultOptions::new() 49 | .with_big_endian() 50 | .with_fixint_encoding(); 51 | let raw_key = self 52 | .db_iter 53 | .key() 54 | .expect("Valid iterator failed to get key"); 55 | let raw_value = self 56 | .db_iter 57 | .value() 58 | .expect("Valid iterator failed to get value"); 59 | let key = config.deserialize(raw_key).ok(); 60 | let value = bincode::deserialize(raw_value).ok(); 61 | if self.iter_bytes_sample_interval.sample() { 62 | let total_bytes_read = (raw_key.len() + raw_value.len()) as f64; 63 | self.db_metrics 64 | .op_metrics 65 | .rocksdb_iter_bytes 66 | .with_label_values(&[&self.cf]) 67 | .observe(total_bytes_read); 68 | } 69 | match self.direction { 70 | Direction::Forward => self.db_iter.next(), 71 | Direction::Reverse => self.db_iter.prev(), 72 | } 73 | 74 | key.and_then(|k| value.map(|v| (k, v))) 75 | } else { 76 | None 77 | } 78 | } 79 | } 80 | 81 | impl<'a, K: Serialize, V> Iter<'a, K, V> { 82 | /// Skips all the elements that are smaller than the given key, 83 | /// and either lands on the key or the first one greater than 84 | /// the key. 85 | pub fn skip_to(mut self, key: &K) -> Result { 86 | self.db_iter.seek(be_fix_int_ser(key)?); 87 | Ok(self) 88 | } 89 | 90 | /// Moves the iterator the element given or 91 | /// the one prior to it if it does not exist. If there is 92 | /// no element prior to it, it returns an empty iterator. 93 | pub fn skip_prior_to(mut self, key: &K) -> Result { 94 | self.db_iter.seek_for_prev(be_fix_int_ser(key)?); 95 | Ok(self) 96 | } 97 | 98 | /// Seeks to the last key in the database (at this column family). 99 | pub fn skip_to_last(mut self) -> Self { 100 | self.db_iter.seek_to_last(); 101 | self 102 | } 103 | 104 | /// Will make the direction of the iteration reverse and will 105 | /// create a new `RevIter` to consume. Every call to `next` method 106 | /// will give the next element from the end. 107 | pub fn reverse(mut self) -> RevIter<'a, K, V> { 108 | self.direction = Direction::Reverse; 109 | RevIter::new(self) 110 | } 111 | } 112 | 113 | /// An iterator with a reverted direction to the original. The `RevIter` 114 | /// is hosting an iteration which is consuming in the opposing direction. 115 | /// It's not possible to do further manipulation (ex re-reverse) to the 116 | /// iterator. 117 | pub struct RevIter<'a, K, V> { 118 | iter: Iter<'a, K, V>, 119 | } 120 | 121 | impl<'a, K, V> RevIter<'a, K, V> { 122 | fn new(iter: Iter<'a, K, V>) -> Self { 123 | Self { iter } 124 | } 125 | } 126 | 127 | impl<'a, K: DeserializeOwned, V: DeserializeOwned> Iterator for RevIter<'a, K, V> { 128 | type Item = (K, V); 129 | 130 | /// Will give the next item backwards 131 | fn next(&mut self) -> Option { 132 | self.iter.next() 133 | } 134 | } 135 | -------------------------------------------------------------------------------- /crates/typed-store/src/rocks/keys.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2022, Mysten Labs, Inc. 2 | // SPDX-License-Identifier: Apache-2.0 3 | use bincode::Options; 4 | 5 | use serde::{de::DeserializeOwned, Serialize}; 6 | use std::marker::PhantomData; 7 | 8 | use super::{be_fix_int_ser, DBRawIteratorMultiThreaded, TypedStoreError}; 9 | 10 | /// An iterator over the keys of a prefix. 11 | pub struct Keys<'a, K> { 12 | db_iter: DBRawIteratorMultiThreaded<'a>, 13 | _phantom: PhantomData, 14 | } 15 | 16 | impl<'a, K: DeserializeOwned> Keys<'a, K> { 17 | pub(crate) fn new(db_iter: DBRawIteratorMultiThreaded<'a>) -> Self { 18 | Self { 19 | db_iter, 20 | _phantom: PhantomData, 21 | } 22 | } 23 | } 24 | 25 | impl<'a, K: DeserializeOwned> Iterator for Keys<'a, K> { 26 | type Item = K; 27 | 28 | fn next(&mut self) -> Option { 29 | if self.db_iter.valid() { 30 | let config = bincode::DefaultOptions::new() 31 | .with_big_endian() 32 | .with_fixint_encoding(); 33 | let key = self.db_iter.key().and_then(|k| config.deserialize(k).ok()); 34 | 35 | self.db_iter.next(); 36 | key 37 | } else { 38 | None 39 | } 40 | } 41 | } 42 | 43 | impl<'a, K: Serialize> Keys<'a, K> { 44 | /// Skips all the elements that are smaller than the given key, 45 | /// and either lands on the key or the first one greater than 46 | /// the key. 47 | pub fn skip_to(mut self, key: &K) -> Result { 48 | self.db_iter.seek(be_fix_int_ser(key)?); 49 | Ok(self) 50 | } 51 | 52 | /// Moves the iterator the element given or 53 | /// the one prior to it if it does not exist. If there is 54 | /// no element prior to it, it returns an empty iterator. 55 | pub fn skip_prior_to(mut self, key: &K) -> Result { 56 | self.db_iter.seek_for_prev(be_fix_int_ser(key)?); 57 | Ok(self) 58 | } 59 | 60 | /// Seeks to the last key in the database (at this column family). 61 | /// 62 | pub fn skip_to_last(mut self) -> Self { 63 | self.db_iter.seek_to_last(); 64 | self 65 | } 66 | } 67 | -------------------------------------------------------------------------------- /crates/typed-store/src/rocks/values.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2022, Mysten Labs, Inc. 2 | // SPDX-License-Identifier: Apache-2.0 3 | use std::marker::PhantomData; 4 | 5 | use serde::de::DeserializeOwned; 6 | 7 | use super::DBRawIteratorMultiThreaded; 8 | 9 | /// An iterator over the values of a prefix. 10 | pub struct Values<'a, V> { 11 | db_iter: DBRawIteratorMultiThreaded<'a>, 12 | _phantom: PhantomData, 13 | } 14 | 15 | impl<'a, V: DeserializeOwned> Values<'a, V> { 16 | pub(crate) fn new(db_iter: DBRawIteratorMultiThreaded<'a>) -> Self { 17 | Self { 18 | db_iter, 19 | _phantom: PhantomData, 20 | } 21 | } 22 | } 23 | 24 | impl<'a, V: DeserializeOwned> Iterator for Values<'a, V> { 25 | type Item = V; 26 | 27 | fn next(&mut self) -> Option { 28 | if self.db_iter.valid() { 29 | let value = self.db_iter.key().and_then(|_| { 30 | self.db_iter 31 | .value() 32 | .and_then(|v| bincode::deserialize(v).ok()) 33 | }); 34 | 35 | self.db_iter.next(); 36 | value 37 | } else { 38 | None 39 | } 40 | } 41 | } 42 | -------------------------------------------------------------------------------- /crates/typed-store/src/tests/store_tests.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2021, Facebook, Inc. and its affiliates 2 | // Copyright (c) 2022, Mysten Labs, Inc. 3 | // SPDX-License-Identifier: Apache-2.0 4 | use super::*; 5 | 6 | fn temp_dir() -> std::path::PathBuf { 7 | tempfile::tempdir() 8 | .expect("Failed to open temporary directory") 9 | .into_path() 10 | } 11 | 12 | #[tokio::test] 13 | async fn create_store() { 14 | // Create new store. 15 | let db = rocks::DBMap::::open(temp_dir(), None, None).unwrap(); 16 | let _ = Store::::new(db); 17 | } 18 | 19 | #[tokio::test] 20 | async fn read_async_write_value() { 21 | // Create new store. 22 | let db = rocks::DBMap::, Vec>::open(temp_dir(), None, None).unwrap(); 23 | let store = Store::new(db); 24 | 25 | // Write value to the store. 26 | let key = vec![0u8, 1u8, 2u8, 3u8]; 27 | let value = vec![4u8, 5u8, 6u8, 7u8]; 28 | store.async_write(key.clone(), value.clone()).await; 29 | 30 | // Read value. 31 | let result = store.read(key).await; 32 | assert!(result.is_ok()); 33 | let read_value = result.unwrap(); 34 | assert!(read_value.is_some()); 35 | assert_eq!(read_value.unwrap(), value); 36 | } 37 | 38 | #[tokio::test] 39 | async fn read_sync_write_value() { 40 | // Create new store. 41 | let db = rocks::DBMap::, Vec>::open(temp_dir(), None, None).unwrap(); 42 | let store = Store::new(db); 43 | 44 | // Write value to the store. 45 | let key = vec![0u8, 1u8, 2u8, 3u8]; 46 | let value = vec![4u8, 5u8, 6u8, 7u8]; 47 | store.sync_write(key.clone(), value.clone()).await.unwrap(); 48 | 49 | // Read value. 50 | let result = store.read(key).await; 51 | assert!(result.is_ok()); 52 | let read_value = result.unwrap(); 53 | assert!(read_value.is_some()); 54 | assert_eq!(read_value.unwrap(), value); 55 | } 56 | 57 | #[tokio::test] 58 | async fn read_raw_write_value() { 59 | // Create new store. 60 | let db = rocks::DBMap::, String>::open(temp_dir(), None, None).unwrap(); 61 | let store = Store::new(db); 62 | 63 | // Write value to the store. 64 | let key = vec![0u8, 1u8, 2u8, 3u8]; 65 | let value = "123456".to_string(); 66 | store.async_write(key.clone(), value.clone()).await; 67 | 68 | // Read value. 69 | let result = store.read_raw_bytes(key).await; 70 | assert!(result.is_ok()); 71 | let read_value = result.unwrap(); 72 | assert!(read_value.is_some()); 73 | assert_eq!(read_value, Some(bincode::serialize(&value).unwrap())); 74 | } 75 | 76 | #[tokio::test] 77 | async fn read_unknown_key() { 78 | // Create new store. 79 | let db = rocks::DBMap::, Vec>::open(temp_dir(), None, None).unwrap(); 80 | let store = Store::new(db); 81 | 82 | // Try to read unknown key. 83 | let key = vec![0u8, 1u8, 2u8, 3u8]; 84 | let result = store.read(key).await; 85 | assert!(result.is_ok()); 86 | assert!(result.unwrap().is_none()); 87 | } 88 | 89 | #[tokio::test] 90 | async fn read_notify() { 91 | // Create new store. 92 | let db = rocks::DBMap::, Vec>::open(temp_dir(), None, None).unwrap(); 93 | let store = Store::new(db); 94 | 95 | // Try to read a kew that does not yet exist. Then write a value 96 | // for that key and check that notify read returns the result. 97 | let key = vec![0u8, 1u8, 2u8, 3u8]; 98 | let value = vec![4u8, 5u8, 6u8, 7u8]; 99 | 100 | // Try to read a missing value. 101 | let store_copy = store.clone(); 102 | let key_copy = key.clone(); 103 | let value_copy = value.clone(); 104 | let handle = tokio::spawn(async move { 105 | match store_copy.notify_read(key_copy).await { 106 | Ok(Some(v)) => assert_eq!(v, value_copy), 107 | _ => panic!("Failed to read from store"), 108 | } 109 | }); 110 | 111 | // Write the missing value and ensure the handle terminates correctly. 112 | store.async_write(key, value).await; 113 | assert!(handle.await.is_ok()); 114 | } 115 | 116 | #[tokio::test] 117 | async fn remove_all_successfully() { 118 | // GIVEN Create new store. 119 | let db = rocks::DBMap::, Vec>::open(temp_dir(), None, None).unwrap(); 120 | let store = Store::new(db); 121 | 122 | // AND Write values to the store. 123 | let keys = vec![ 124 | vec![0u8, 1u8, 2u8, 1u8], 125 | vec![0u8, 1u8, 2u8, 2u8], 126 | vec![0u8, 1u8, 2u8, 3u8], 127 | ]; 128 | let value = vec![4u8, 5u8, 6u8, 7u8]; 129 | 130 | for key in keys.clone() { 131 | store.async_write(key.clone(), value.clone()).await; 132 | } 133 | 134 | // WHEN multi remove values 135 | let result = store.remove_all(keys.clone().into_iter()).await; 136 | 137 | // THEN 138 | assert!(result.is_ok()); 139 | 140 | // AND values doesn't exist any more 141 | for key in keys { 142 | let result = store.read(key).await; 143 | assert!(result.is_ok()); 144 | assert!(result.unwrap().is_none()); 145 | } 146 | } 147 | 148 | #[tokio::test] 149 | async fn write_and_read_all_successfully() { 150 | // GIVEN Create new store. 151 | let db = rocks::DBMap::, Vec>::open(temp_dir(), None, None).unwrap(); 152 | let store = Store::new(db); 153 | 154 | // AND key-values to store. 155 | let key_values = vec![ 156 | (vec![0u8, 1u8, 2u8, 1u8], vec![4u8, 5u8, 6u8, 7u8]), 157 | (vec![0u8, 1u8, 2u8, 2u8], vec![4u8, 5u8, 6u8, 7u8]), 158 | (vec![0u8, 1u8, 2u8, 3u8], vec![4u8, 5u8, 6u8, 7u8]), 159 | ]; 160 | 161 | // WHEN 162 | let result = store.sync_write_all(key_values.clone()).await; 163 | 164 | // THEN 165 | assert!(result.is_ok()); 166 | 167 | // AND read_all to ensure that values have been written 168 | let keys: Vec> = key_values.clone().into_iter().map(|(key, _)| key).collect(); 169 | let result = store.read_all(keys).await; 170 | 171 | assert!(result.is_ok()); 172 | assert_eq!(result.as_ref().unwrap().len(), 3); 173 | 174 | for (i, value) in result.unwrap().into_iter().enumerate() { 175 | assert!(value.is_some()); 176 | assert_eq!(value.unwrap(), key_values[i].1); 177 | } 178 | } 179 | 180 | #[tokio::test] 181 | async fn iter_successfully() { 182 | // GIVEN Create new store. 183 | let db = rocks::DBMap::, Vec>::open(temp_dir(), None, None).unwrap(); 184 | let store = Store::new(db); 185 | 186 | // AND key-values to store. 187 | let key_values = vec![ 188 | (vec![0u8, 1u8], vec![4u8, 4u8]), 189 | (vec![0u8, 2u8], vec![4u8, 5u8]), 190 | (vec![0u8, 3u8], vec![4u8, 6u8]), 191 | ]; 192 | 193 | let result = store.sync_write_all(key_values.clone()).await; 194 | assert!(result.is_ok()); 195 | 196 | // Iter through the keys 197 | let output = store.iter(None).await; 198 | for (k, v) in &key_values { 199 | let v1 = output.get(k).unwrap(); 200 | assert_eq!(v1.first(), v.first()); 201 | assert_eq!(v1.last(), v.last()); 202 | } 203 | assert_eq!(output.len(), key_values.len()); 204 | } 205 | 206 | #[tokio::test] 207 | async fn iter_and_filter_successfully() { 208 | // GIVEN Create new store. 209 | let db = rocks::DBMap::, Vec>::open(temp_dir(), None, None).unwrap(); 210 | let store = Store::new(db); 211 | 212 | // AND key-values to store. 213 | let key_values = vec![ 214 | (vec![0u8, 1u8], vec![4u8, 4u8]), 215 | (vec![0u8, 2u8], vec![4u8, 5u8]), 216 | (vec![0u8, 3u8], vec![4u8, 6u8]), 217 | (vec![0u8, 4u8], vec![4u8, 7u8]), 218 | (vec![0u8, 5u8], vec![4u8, 0u8]), 219 | (vec![0u8, 6u8], vec![4u8, 1u8]), 220 | ]; 221 | 222 | let result = store.sync_write_all(key_values.clone()).await; 223 | assert!(result.is_ok()); 224 | 225 | // Iter through the keys 226 | let output = store 227 | .iter(Some(Box::new(|(k, _v)| { 228 | u16::from_le_bytes(k[..2].try_into().unwrap()) % 2 == 0 229 | }))) 230 | .await; 231 | for (k, v) in &key_values { 232 | let int = u16::from_le_bytes(k[..2].try_into().unwrap()); 233 | if int % 2 == 0 { 234 | let v1 = output.get(k).unwrap(); 235 | assert_eq!(v1.first(), v.first()); 236 | assert_eq!(v1.last(), v.last()); 237 | } else { 238 | assert!(output.get(k).is_none()); 239 | } 240 | } 241 | assert_eq!(output.len(), key_values.len()); 242 | } 243 | -------------------------------------------------------------------------------- /crates/typed-store/src/traits.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2022, Mysten Labs, Inc. 2 | // SPDX-License-Identifier: Apache-2.0 3 | use serde::{de::DeserializeOwned, Serialize}; 4 | use std::{borrow::Borrow, collections::BTreeMap, error::Error}; 5 | 6 | pub trait Map<'a, K, V> 7 | where 8 | K: Serialize + DeserializeOwned + ?Sized, 9 | V: Serialize + DeserializeOwned, 10 | { 11 | type Error: Error; 12 | type Iterator: Iterator; 13 | type Keys: Iterator; 14 | type Values: Iterator; 15 | 16 | /// Returns true if the map contains a value for the specified key. 17 | fn contains_key(&self, key: &K) -> Result; 18 | 19 | /// Returns the value for the given key from the map, if it exists. 20 | fn get(&self, key: &K) -> Result, Self::Error>; 21 | 22 | /// Returns the raw value (bincode serialized bytes) for the given key from the map, if it exists. 23 | fn get_raw_bytes(&self, key: &K) -> Result>, Self::Error>; 24 | 25 | /// Returns the value for the given key from the map, if it exists 26 | /// or the given default value if it does not. 27 | /// This method is not thread safe 28 | fn get_or_insert_unsafe V>( 29 | &self, 30 | key: &K, 31 | default: F, 32 | ) -> Result { 33 | self.get(key).and_then(|optv| match optv { 34 | Some(v) => Ok(v), 35 | None => { 36 | self.insert(key, &default())?; 37 | self.get(key).transpose().expect("default just inserted") 38 | } 39 | }) 40 | } 41 | 42 | /// Inserts the given key-value pair into the map. 43 | fn insert(&self, key: &K, value: &V) -> Result<(), Self::Error>; 44 | 45 | /// Removes the entry for the given key from the map. 46 | fn remove(&self, key: &K) -> Result<(), Self::Error>; 47 | 48 | /// Removes every key-value pair from the map. 49 | fn clear(&self) -> Result<(), Self::Error>; 50 | 51 | /// Returns true if the map is empty, otherwise false. 52 | fn is_empty(&self) -> bool; 53 | 54 | /// Returns an iterator visiting each key-value pair in the map. 55 | fn iter(&'a self) -> Self::Iterator; 56 | 57 | /// Returns an iterator over each key in the map. 58 | fn keys(&'a self) -> Self::Keys; 59 | 60 | /// Returns an iterator over each value in the map. 61 | fn values(&'a self) -> Self::Values; 62 | 63 | /// Returns a vector of values corresponding to the keys provided, non-atomically. 64 | fn multi_get(&self, keys: impl IntoIterator) -> Result>, Self::Error> 65 | where 66 | J: Borrow, 67 | { 68 | keys.into_iter().map(|key| self.get(key.borrow())).collect() 69 | } 70 | 71 | /// Inserts key-value pairs, non-atomically. 72 | fn multi_insert( 73 | &self, 74 | key_val_pairs: impl IntoIterator, 75 | ) -> Result<(), Self::Error> 76 | where 77 | J: Borrow, 78 | U: Borrow, 79 | { 80 | key_val_pairs 81 | .into_iter() 82 | .try_for_each(|(key, value)| self.insert(key.borrow(), value.borrow())) 83 | } 84 | 85 | /// Removes keys, non-atomically. 86 | fn multi_remove(&self, keys: impl IntoIterator) -> Result<(), Self::Error> 87 | where 88 | J: Borrow, 89 | { 90 | keys.into_iter() 91 | .try_for_each(|key| self.remove(key.borrow())) 92 | } 93 | 94 | /// Try to catch up with primary when running as secondary 95 | fn try_catch_up_with_primary(&self) -> Result<(), Self::Error>; 96 | } 97 | 98 | pub trait TypedStoreDebug { 99 | /// Dump a DB table with pagination 100 | fn dump_table( 101 | &self, 102 | table_name: String, 103 | page_size: u16, 104 | page_number: usize, 105 | ) -> eyre::Result>; 106 | 107 | /// Get the name of the DB. This is simply the name of the struct 108 | fn primary_db_name(&self) -> String; 109 | 110 | /// Get a map of table names to key-value types 111 | fn describe_all_tables(&self) -> BTreeMap; 112 | 113 | /// Count the entries in the table 114 | fn count_table_keys(&self, table_name: String) -> eyre::Result; 115 | } 116 | -------------------------------------------------------------------------------- /crates/typed-store/tests/macro_tests.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2022, Mysten Labs, Inc. 2 | // SPDX-License-Identifier: Apache-2.0 3 | #![allow(dead_code)] 4 | 5 | use once_cell::sync::Lazy; 6 | use serde::Deserialize; 7 | use serde::Serialize; 8 | use std::collections::HashSet; 9 | use std::fmt::Debug; 10 | use std::sync::Mutex; 11 | use std::time::Duration; 12 | use typed_store::metrics::SamplingInterval; 13 | use typed_store::rocks::list_tables; 14 | use typed_store::rocks::DBMap; 15 | use typed_store::traits::Map; 16 | use typed_store::traits::TypedStoreDebug; 17 | use typed_store::Store; 18 | use typed_store_derive::DBMapUtils; 19 | 20 | fn temp_dir() -> std::path::PathBuf { 21 | tempfile::tempdir() 22 | .expect("Failed to open temporary directory") 23 | .into_path() 24 | } 25 | /// This struct is used to illustrate how the utility works 26 | #[derive(DBMapUtils)] 27 | struct Tables { 28 | table1: DBMap, 29 | table2: DBMap, 30 | } 31 | 32 | // Check that generics work 33 | #[derive(DBMapUtils)] 34 | struct TablesGenerics { 35 | table1: DBMap, 36 | table2: DBMap>, 37 | } 38 | 39 | #[derive(Debug, Serialize, Deserialize)] 40 | pub struct Generic { 41 | field1: T, 42 | field2: V, 43 | } 44 | 45 | impl< 46 | T: Eq + Debug + Serialize + for<'de> Deserialize<'de>, 47 | V: Eq + Debug + Serialize + for<'de> Deserialize<'de>, 48 | > Generic 49 | { 50 | } 51 | 52 | /// This struct shows that single elem structs work 53 | #[derive(DBMapUtils)] 54 | struct TablesSingle { 55 | table1: DBMap, 56 | } 57 | 58 | #[tokio::test] 59 | async fn macro_test() { 60 | let primary_path = temp_dir(); 61 | let tbls_primary = Tables::open_tables_read_write(primary_path.clone(), None, None); 62 | 63 | // Write to both tables 64 | let keys_vals_1 = (1..10).map(|i| (i.to_string(), i.to_string())); 65 | tbls_primary 66 | .table1 67 | .multi_insert(keys_vals_1.clone()) 68 | .expect("Failed to multi-insert"); 69 | 70 | let keys_vals_2 = (3..10).map(|i| (i, i.to_string())); 71 | tbls_primary 72 | .table2 73 | .multi_insert(keys_vals_2.clone()) 74 | .expect("Failed to multi-insert"); 75 | 76 | // Open in secondary mode 77 | let tbls_secondary = Tables::get_read_only_handle(primary_path.clone(), None, None); 78 | 79 | // Check all the tables can be listed 80 | let actual_table_names: HashSet<_> = list_tables(primary_path).unwrap().into_iter().collect(); 81 | let observed_table_names: HashSet<_> = Tables::describe_tables() 82 | .iter() 83 | .map(|q| q.0.clone()) 84 | .collect(); 85 | 86 | let exp: HashSet = 87 | HashSet::from_iter(vec!["table1", "table2"].into_iter().map(|s| s.to_owned())); 88 | assert_eq!(HashSet::from_iter(actual_table_names), exp); 89 | assert_eq!(HashSet::from_iter(observed_table_names), exp); 90 | 91 | // Check the counts 92 | assert_eq!(9, tbls_secondary.count_keys("table1").unwrap()); 93 | assert_eq!(7, tbls_secondary.count_keys("table2").unwrap()); 94 | 95 | // Test all entries 96 | let m = tbls_secondary.dump("table1", 100, 0).unwrap(); 97 | for (k, v) in keys_vals_1 { 98 | assert_eq!(format!("\"{v}\""), *m.get(&format!("\"{k}\"")).unwrap()); 99 | } 100 | 101 | let m = tbls_secondary.dump("table2", 100, 0).unwrap(); 102 | for (k, v) in keys_vals_2 { 103 | assert_eq!(format!("\"{v}\""), *m.get(&k.to_string()).unwrap()); 104 | } 105 | 106 | // Check that catchup logic works 107 | let keys_vals_1 = (100..110).map(|i| (i.to_string(), i.to_string())); 108 | tbls_primary 109 | .table1 110 | .multi_insert(keys_vals_1) 111 | .expect("Failed to multi-insert"); 112 | // New entries should be present in secondary 113 | assert_eq!(19, tbls_secondary.count_keys("table1").unwrap()); 114 | 115 | // Test pagination 116 | let m = tbls_secondary.dump("table1", 2, 0).unwrap(); 117 | assert_eq!(2, m.len()); 118 | assert_eq!(format!("\"1\""), *m.get(&"\"1\"".to_string()).unwrap()); 119 | assert_eq!(format!("\"2\""), *m.get(&"\"2\"".to_string()).unwrap()); 120 | 121 | let m = tbls_secondary.dump("table1", 3, 2).unwrap(); 122 | assert_eq!(3, m.len()); 123 | assert_eq!(format!("\"7\""), *m.get(&"\"7\"".to_string()).unwrap()); 124 | assert_eq!(format!("\"8\""), *m.get(&"\"8\"".to_string()).unwrap()); 125 | } 126 | 127 | /// We show that custom functions can be applied 128 | #[derive(DBMapUtils)] 129 | struct TablesCustomOptions { 130 | #[default_options_override_fn = "another_custom_fn_name"] 131 | table1: DBMap, 132 | table2: DBMap, 133 | #[default_options_override_fn = "custom_fn_name"] 134 | table3: DBMap, 135 | #[default_options_override_fn = "another_custom_fn_name"] 136 | table4: DBMap, 137 | } 138 | 139 | static TABLE1_OPTIONS_SET_FLAG: Lazy>> = Lazy::new(|| Mutex::new(vec![])); 140 | static TABLE2_OPTIONS_SET_FLAG: Lazy>> = Lazy::new(|| Mutex::new(vec![])); 141 | 142 | fn custom_fn_name() -> typed_store::rocks::DBOptions { 143 | TABLE1_OPTIONS_SET_FLAG.lock().unwrap().push(false); 144 | typed_store::rocks::DBOptions::default() 145 | } 146 | 147 | fn another_custom_fn_name() -> typed_store::rocks::DBOptions { 148 | TABLE2_OPTIONS_SET_FLAG.lock().unwrap().push(false); 149 | TABLE2_OPTIONS_SET_FLAG.lock().unwrap().push(false); 150 | TABLE2_OPTIONS_SET_FLAG.lock().unwrap().push(false); 151 | typed_store::rocks::DBOptions::default() 152 | } 153 | 154 | #[tokio::test] 155 | async fn macro_test_configure() { 156 | let primary_path = temp_dir(); 157 | 158 | // Get a configurator for this table 159 | let mut config = Tables::configurator(); 160 | // Config table 1 161 | config.table1 = typed_store::rocks::DBOptions::default(); 162 | config.table1.options.create_if_missing(true); 163 | config.table1.options.set_write_buffer_size(123456); 164 | 165 | // Config table 2 166 | config.table2 = config.table1.clone(); 167 | 168 | config.table2.options.create_if_missing(false); 169 | 170 | // Build and open with new config 171 | let _ = Tables::open_tables_read_write(primary_path, None, Some(config.build())); 172 | 173 | // Test the static config options 174 | let primary_path = temp_dir(); 175 | 176 | assert_eq!(TABLE1_OPTIONS_SET_FLAG.lock().unwrap().len(), 0); 177 | 178 | let _ = TablesCustomOptions::open_tables_read_write(primary_path, None, None); 179 | 180 | // Ensures that the function to set options was called 181 | assert_eq!(TABLE1_OPTIONS_SET_FLAG.lock().unwrap().len(), 1); 182 | 183 | // `another_custom_fn_name` is called twice, so 6 items in vec 184 | assert_eq!(TABLE2_OPTIONS_SET_FLAG.lock().unwrap().len(), 6); 185 | } 186 | 187 | /// We show that custom functions can be applied 188 | #[derive(DBMapUtils)] 189 | struct TablesMemUsage { 190 | table1: DBMap, 191 | table2: DBMap, 192 | table3: DBMap, 193 | table4: DBMap, 194 | } 195 | 196 | #[tokio::test] 197 | async fn macro_test_get_memory_usage() { 198 | let primary_path = temp_dir(); 199 | let tables = TablesMemUsage::open_tables_read_write(primary_path, None, None); 200 | 201 | let keys_vals_1 = (1..1000).map(|i| (i.to_string(), i.to_string())); 202 | tables 203 | .table1 204 | .multi_insert(keys_vals_1) 205 | .expect("Failed to multi-insert"); 206 | 207 | let (mem_table, _) = tables.get_memory_usage().unwrap(); 208 | assert!(mem_table > 0); 209 | } 210 | 211 | #[derive(DBMapUtils)] 212 | struct StoreTables { 213 | table1: Store, Vec>, 214 | table2: Store, 215 | } 216 | #[tokio::test] 217 | async fn store_iter_and_filter_successfully() { 218 | // Use constom configurator 219 | let mut config = StoreTables::configurator(); 220 | // Config table 1 221 | config.table1 = typed_store::rocks::DBOptions::default(); 222 | config.table1.options.create_if_missing(true); 223 | config.table1.options.set_write_buffer_size(123456); 224 | 225 | // Config table 2 226 | config.table2 = config.table1.clone(); 227 | 228 | config.table2.options.create_if_missing(false); 229 | let path = temp_dir(); 230 | let str = StoreTables::open_tables_read_write(path.clone(), None, Some(config.build())); 231 | 232 | // AND key-values to store. 233 | let key_values = vec![ 234 | (vec![0u8, 1u8], vec![4u8, 4u8]), 235 | (vec![0u8, 2u8], vec![4u8, 5u8]), 236 | (vec![0u8, 3u8], vec![4u8, 6u8]), 237 | (vec![0u8, 4u8], vec![4u8, 7u8]), 238 | (vec![0u8, 5u8], vec![4u8, 0u8]), 239 | (vec![0u8, 6u8], vec![4u8, 1u8]), 240 | ]; 241 | 242 | let result = str.table1.sync_write_all(key_values.clone()).await; 243 | assert!(result.is_ok()); 244 | 245 | // Iter through the keys 246 | let output = str 247 | .table1 248 | .iter(Some(Box::new(|(k, _v)| { 249 | u16::from_le_bytes(k[..2].try_into().unwrap()) % 2 == 0 250 | }))) 251 | .await; 252 | for (k, v) in &key_values { 253 | let int = u16::from_le_bytes(k[..2].try_into().unwrap()); 254 | if int % 2 == 0 { 255 | let v1 = output.get(k).unwrap(); 256 | assert_eq!(v1.first(), v.first()); 257 | assert_eq!(v1.last(), v.last()); 258 | } else { 259 | assert!(output.get(k).is_none()); 260 | } 261 | } 262 | assert_eq!(output.len(), key_values.len()); 263 | } 264 | 265 | #[tokio::test] 266 | async fn test_sampling() { 267 | let sampling_interval = SamplingInterval::new(Duration::ZERO, 10); 268 | for _i in 0..10 { 269 | assert!(!sampling_interval.sample()); 270 | } 271 | assert!(sampling_interval.sample()); 272 | for _i in 0..10 { 273 | assert!(!sampling_interval.sample()); 274 | } 275 | assert!(sampling_interval.sample()); 276 | } 277 | 278 | #[tokio::test(flavor = "current_thread", start_paused = true)] 279 | async fn test_sampling_time() { 280 | let sampling_interval = SamplingInterval::new(Duration::from_secs(1), 10); 281 | for _i in 0..10 { 282 | assert!(!sampling_interval.sample()); 283 | } 284 | assert!(!sampling_interval.sample()); 285 | tokio::time::advance(Duration::from_secs(1)).await; 286 | tokio::task::yield_now().await; 287 | assert!(sampling_interval.sample()); 288 | for _i in 0..10 { 289 | assert!(!sampling_interval.sample()); 290 | } 291 | assert!(!sampling_interval.sample()); 292 | tokio::time::advance(Duration::from_secs(1)).await; 293 | tokio::task::yield_now().await; 294 | assert!(sampling_interval.sample()); 295 | } 296 | -------------------------------------------------------------------------------- /crates/x/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "x" 3 | version = "0.1.0" 4 | license = "Apache-2.0" 5 | publish = false 6 | edition = "2021" 7 | 8 | [dependencies] 9 | anyhow = "1.0.58" 10 | clap = { version = "4.0.8", features = ["derive"] } 11 | nexlint = { git = "https://github.com/nextest-rs/nexlint.git", rev = "5926141c20414814290bb1b04bd3b2238bbbc90e" } 12 | nexlint-lints = { git = "https://github.com/nextest-rs/nexlint.git", rev = "5926141c20414814290bb1b04bd3b2238bbbc90e" } 13 | -------------------------------------------------------------------------------- /crates/x/src/lint.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2022, Mysten Labs, Inc. 2 | // SPDX-License-Identifier: Apache-2.0 3 | 4 | use clap::Parser; 5 | use nexlint::{prelude::*, NexLintContext}; 6 | use nexlint_lints::{ 7 | content::*, 8 | file_path::{AllowedPaths, DEFAULT_ALLOWED_PATHS_REGEX}, 9 | handle_lint_results, 10 | package::*, 11 | project::{DirectDepDups, DirectDepDupsConfig}, 12 | }; 13 | 14 | static LICENSE_HEADER: &str = "Copyright (c) 2022, Mysten Labs, Inc.\n\ 15 | SPDX-License-Identifier: Apache-2.0\n\ 16 | "; 17 | #[derive(Debug, Parser)] 18 | pub struct Args { 19 | #[clap(long)] 20 | fail_fast: bool, 21 | } 22 | 23 | pub fn run(args: Args) -> crate::Result<()> { 24 | let direct_dups_config = DirectDepDupsConfig { allow: vec![] }; 25 | let project_linters: &[&dyn ProjectLinter] = &[&DirectDepDups::new(&direct_dups_config)]; 26 | 27 | let package_linters: &[&dyn PackageLinter] = &[ 28 | &CrateNamesPaths, 29 | &IrrelevantBuildDeps, 30 | // This one seems to be broken 31 | //&UnpublishedPackagesOnlyUsePathDependencies::new(), 32 | &PublishedPackagesDontDependOnUnpublishedPackages, 33 | &OnlyPublishToCratesIo, 34 | &CratesInCratesDirectory, 35 | &CratesOnlyInCratesDirectory, 36 | ]; 37 | 38 | let file_path_linters: &[&dyn FilePathLinter] = 39 | &[&AllowedPaths::new(DEFAULT_ALLOWED_PATHS_REGEX)?]; 40 | 41 | let whitespace_exceptions = build_exceptions(&[])?; 42 | let content_linters: &[&dyn ContentLinter] = &[ 43 | &LicenseHeader::new(LICENSE_HEADER), 44 | &RootToml, 45 | &EofNewline::new(&whitespace_exceptions), 46 | &TrailingWhitespace::new(&whitespace_exceptions), 47 | ]; 48 | 49 | let nexlint_context = NexLintContext::from_current_dir()?; 50 | let engine = LintEngineConfig::new(&nexlint_context) 51 | .with_project_linters(project_linters) 52 | .with_package_linters(package_linters) 53 | .with_file_path_linters(file_path_linters) 54 | .with_content_linters(content_linters) 55 | .fail_fast(args.fail_fast) 56 | .build(); 57 | 58 | let results = engine.run()?; 59 | 60 | handle_lint_results(results) 61 | } 62 | -------------------------------------------------------------------------------- /crates/x/src/main.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2022, Mysten Labs, Inc. 2 | // SPDX-License-Identifier: Apache-2.0 3 | 4 | use anyhow::Result; 5 | use clap::Parser; 6 | 7 | mod lint; 8 | 9 | /// Simple program to greet a person 10 | #[derive(Parser, Debug)] 11 | #[clap(author, version, about, long_about = None)] 12 | struct Args { 13 | #[clap(subcommand)] 14 | cmd: Command, 15 | } 16 | 17 | #[derive(Debug, Parser)] 18 | enum Command { 19 | #[clap(name = "lint")] 20 | /// Run lints 21 | Lint(lint::Args), 22 | } 23 | 24 | fn main() -> Result<()> { 25 | let args = Args::parse(); 26 | 27 | match args.cmd { 28 | Command::Lint(args) => lint::run(args), 29 | } 30 | } 31 | -------------------------------------------------------------------------------- /deny.toml: -------------------------------------------------------------------------------- 1 | # This template contains all of the possible sections and their default values 2 | 3 | # Note that all fields that take a lint level have these possible values: 4 | # * deny - An error will be produced and the check will fail 5 | # * warn - A warning will be produced, but the check will not fail 6 | # * allow - No warning or error will be produced, though in some cases a note 7 | # will be 8 | 9 | # The values provided in this template are the default values that will be used 10 | # when any section or field is not specified in your own configuration 11 | 12 | # If 1 or more target triples (and optionally, target_features) are specified, 13 | # only the specified targets will be checked when running `cargo deny check`. 14 | # This means, if a particular package is only ever used as a target specific 15 | # dependency, such as, for example, the `nix` crate only being used via the 16 | # `target_family = "unix"` configuration, that only having windows targets in 17 | # this list would mean the nix crate, as well as any of its exclusive 18 | # dependencies not shared by any other crates, would be ignored, as the target 19 | # list here is effectively saying which targets you are building for. 20 | targets = [ 21 | # The triple can be any string, but only the target triples built in to 22 | # rustc (as of 1.40) can be checked against actual config expressions 23 | #{ triple = "x86_64-unknown-linux-musl" }, 24 | # You can also specify which target_features you promise are enabled for a 25 | # particular target. target_features are currently not validated against 26 | # the actual valid features supported by the target architecture. 27 | #{ triple = "wasm32-unknown-unknown", features = ["atomics"] }, 28 | ] 29 | 30 | # This section is considered when running `cargo deny check advisories` 31 | # More documentation for the advisories section can be found here: 32 | # https://embarkstudios.github.io/cargo-deny/checks/advisories/cfg.html 33 | [advisories] 34 | # The path where the advisory database is cloned/fetched into 35 | db-path = "~/.cargo/advisory-db" 36 | # The url(s) of the advisory databases to use 37 | db-urls = ["https://github.com/rustsec/advisory-db"] 38 | # The lint level for security vulnerabilities 39 | vulnerability = "deny" 40 | # The lint level for unmaintained crates 41 | unmaintained = "warn" 42 | # The lint level for crates that have been yanked from their source registry 43 | yanked = "warn" 44 | # The lint level for crates with security notices. Note that as of 45 | # 2019-12-17 there are no security notice advisories in 46 | # https://github.com/rustsec/advisory-db 47 | notice = "warn" 48 | # A list of advisory IDs to ignore. Note that ignored advisories will still 49 | # output a note when they are encountered. 50 | ignore = [ 51 | "RUSTSEC-2020-0159", 52 | #"RUSTSEC-0000-0000", 53 | ] 54 | # Threshold for security vulnerabilities, any vulnerability with a CVSS score 55 | # lower than the range specified will be ignored. Note that ignored advisories 56 | # will still output a note when they are encountered. 57 | # * None - CVSS Score 0.0 58 | # * Low - CVSS Score 0.1 - 3.9 59 | # * Medium - CVSS Score 4.0 - 6.9 60 | # * High - CVSS Score 7.0 - 8.9 61 | # * Critical - CVSS Score 9.0 - 10.0 62 | #severity-threshold = 63 | 64 | # This section is considered when running `cargo deny check licenses` 65 | # More documentation for the licenses section can be found here: 66 | # https://embarkstudios.github.io/cargo-deny/checks/licenses/cfg.html 67 | [licenses] 68 | # The lint level for crates which do not have a detectable license 69 | unlicensed = "deny" 70 | # List of explictly allowed licenses 71 | # See https://spdx.org/licenses/ for list of possible licenses 72 | # [possible values: any SPDX 3.11 short identifier (+ optional exception)]. 73 | allow = [ 74 | "MIT", 75 | "BSD-2-Clause", 76 | "BSD-3-Clause", 77 | "CC0-1.0", 78 | "Apache-2.0", 79 | "ISC", 80 | "LicenseRef-ring", 81 | "Unicode-DFS-2016", 82 | #"Apache-2.0 WITH LLVM-exception", 83 | ] 84 | # List of explictly disallowed licenses 85 | # See https://spdx.org/licenses/ for list of possible licenses 86 | # [possible values: any SPDX 3.11 short identifier (+ optional exception)]. 87 | deny = [ 88 | #"Nokia", 89 | ] 90 | # Lint level for licenses considered copyleft 91 | copyleft = "warn" 92 | # Blanket approval or denial for OSI-approved or FSF Free/Libre licenses 93 | # * both - The license will be approved if it is both OSI-approved *AND* FSF 94 | # * either - The license will be approved if it is either OSI-approved *OR* FSF 95 | # * osi-only - The license will be approved if is OSI-approved *AND NOT* FSF 96 | # * fsf-only - The license will be approved if is FSF *AND NOT* OSI-approved 97 | # * neither - This predicate is ignored and the default lint level is used 98 | allow-osi-fsf-free = "neither" 99 | # Lint level used when no other predicates are matched 100 | # 1. License isn't in the allow or deny lists 101 | # 2. License isn't copyleft 102 | # 3. License isn't OSI/FSF, or allow-osi-fsf-free = "neither" 103 | default = "deny" 104 | # The confidence threshold for detecting a license from license text. 105 | # The higher the value, the more closely the license text must be to the 106 | # canonical license text of a valid SPDX license file. 107 | # [possible values: any between 0.0 and 1.0]. 108 | confidence-threshold = 0.8 109 | # Allow 1 or more licenses on a per-crate basis, so that particular licenses 110 | # aren't accepted for every possible crate as with the normal allow list 111 | exceptions = [ 112 | # Each entry is the crate and version constraint, and its specific allow 113 | # list 114 | #{ allow = ["Zlib"], name = "adler32", version = "*" }, 115 | ] 116 | 117 | # Some crates don't have (easily) machine readable licensing information, 118 | # adding a clarification entry for it allows you to manually specify the 119 | # licensing information 120 | #[[licenses.clarify]] 121 | # The name of the crate the clarification applies to 122 | #name = "ring" 123 | # The optional version constraint for the crate 124 | #version = "*" 125 | # The SPDX expression for the license requirements of the crate 126 | #expression = "MIT AND ISC AND OpenSSL" 127 | # One or more files in the crate's source used as the "source of truth" for 128 | # the license expression. If the contents match, the clarification will be used 129 | # when running the license check, otherwise the clarification will be ignored 130 | # and the crate will be checked normally, which may produce warnings or errors 131 | # depending on the rest of your configuration 132 | #license-files = [ 133 | # Each entry is a crate relative path, and the (opaque) hash of its contents 134 | #{ path = "LICENSE", hash = 0xbd0eed23 } 135 | #] 136 | [[licenses.clarify]] 137 | name = "bls-crypto" 138 | version = "*" 139 | expression = "MIT AND Apache-2.0" 140 | license-files = [ 141 | ] 142 | [[licenses.clarify]] 143 | name = "ring" 144 | expression = "LicenseRef-ring" 145 | license-files = [ 146 | { path = "LICENSE", hash = 0xbd0eed23 }, 147 | ] 148 | [[licenses.clarify]] 149 | name = "target-lexicon" 150 | version = "*" 151 | expression = "Apache-2.0" 152 | license-files = [ 153 | ] 154 | 155 | [licenses.private] 156 | # If true, ignores workspace crates that aren't published, or are only 157 | # published to private registries 158 | ignore = false 159 | # One or more private registries that you might publish crates to, if a crate 160 | # is only published to private registries, and ignore is true, the crate will 161 | # not have its license(s) checked 162 | registries = [ 163 | #"https://sekretz.com/registry 164 | ] 165 | 166 | # This section is considered when running `cargo deny check bans`. 167 | # More documentation about the 'bans' section can be found here: 168 | # https://embarkstudios.github.io/cargo-deny/checks/bans/cfg.html 169 | [bans] 170 | # Lint level for when multiple versions of the same crate are detected 171 | multiple-versions = "warn" 172 | # Lint level for when a crate version requirement is `*` 173 | wildcards = "allow" 174 | # The graph highlighting used when creating dotgraphs for crates 175 | # with multiple versions 176 | # * lowest-version - The path to the lowest versioned duplicate is highlighted 177 | # * simplest-path - The path to the version with the fewest edges is highlighted 178 | # * all - Both lowest-version and simplest-path are used 179 | highlight = "all" 180 | # List of crates that are allowed. Use with care! 181 | allow = [ 182 | #{ name = "ansi_term", version = "=0.11.0" }, 183 | ] 184 | # List of crates to deny 185 | deny = [ 186 | # Each entry the name of a crate and a version range. If version is 187 | # not specified, all versions will be matched. 188 | #{ name = "ansi_term", version = "=0.11.0" }, 189 | # 190 | # Wrapper crates can optionally be specified to allow the crate when it 191 | # is a direct dependency of the otherwise banned crate 192 | #{ name = "ansi_term", version = "=0.11.0", wrappers = [] }, 193 | ] 194 | # Certain crates/versions that will be skipped when doing duplicate detection. 195 | skip = [ 196 | #{ name = "ansi_term", version = "=0.11.0" }, 197 | ] 198 | # Similarly to `skip` allows you to skip certain crates during duplicate 199 | # detection. Unlike skip, it also includes the entire tree of transitive 200 | # dependencies starting at the specified crate, up to a certain depth, which is 201 | # by default infinite 202 | skip-tree = [ 203 | #{ name = "ansi_term", version = "=0.11.0", depth = 20 }, 204 | ] 205 | 206 | # This section is considered when running `cargo deny check sources`. 207 | # More documentation about the 'sources' section can be found here: 208 | # https://embarkstudios.github.io/cargo-deny/checks/sources/cfg.html 209 | [sources] 210 | # Lint level for what to happen when a crate from a crate registry that is not 211 | # in the allow list is encountered 212 | unknown-registry = "warn" 213 | # Lint level for what to happen when a crate from a git repository that is not 214 | # in the allow list is encountered 215 | unknown-git = "warn" 216 | # List of URLs for allowed crate registries. Defaults to the crates.io index 217 | # if not specified. If it is specified but empty, no registries are allowed. 218 | allow-registry = ["https://github.com/rust-lang/crates.io-index"] 219 | # List of URLs for allowed Git repositories 220 | allow-git = [] 221 | 222 | [sources.allow-org] 223 | -------------------------------------------------------------------------------- /rust-toolchain: -------------------------------------------------------------------------------- 1 | 1.64.0 2 | -------------------------------------------------------------------------------- /scripts/changed-files.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Copyright (c) 2022, Mysten Labs, Inc. 3 | # SPDX-License-Identifier: Apache-2.0 4 | 5 | set -e 6 | 7 | # Check for modified or untracked files after CI has run 8 | diff="$(git diff)" 9 | echo "${diff}" 10 | [[ -z "${diff}" ]] 11 | 12 | changed_files="$(git status --porcelain)" 13 | echo "${changed_files}" 14 | [[ -z "${changed_files}" ]] 15 | -------------------------------------------------------------------------------- /scripts/get_current_version.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # Copyright (c) 2022, Mysten Labs, Inc. 3 | # SPDX-License-Identifier: Apache-2.0 4 | 5 | # Script requirements: 6 | # - curl 7 | # - jq 8 | 9 | # Fail on first error, on undefined variables, and on failures in pipelines. 10 | set -euo pipefail 11 | 12 | # Go to the repo root directory. 13 | cd "$(git rev-parse --show-toplevel)" 14 | 15 | # Check 1 argument is given 16 | if [ $# -lt 1 ] 17 | then 18 | echo "Usage : $0 " 19 | exit 1 20 | fi 21 | 22 | # The first argument should be the name of a crate. 23 | CRATE_NAME="$1" 24 | 25 | cargo metadata --format-version 1 | \ 26 | jq --arg crate_name "$CRATE_NAME" --exit-status -r \ 27 | '.packages[] | select(.name == $crate_name) | .version' 28 | -------------------------------------------------------------------------------- /scripts/is_version_already_uploaded.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # Copyright (c) 2022, Mysten Labs, Inc. 3 | # SPDX-License-Identifier: Apache-2.0 4 | 5 | # Script requirements: 6 | # - curl 7 | # - jq 8 | # - sort with `-V` flag, available in `coreutils-7` 9 | # On macOS this may require `brew install coreutils`. 10 | 11 | # Fail on first error, on undefined variables, and on failures in pipelines. 12 | set -euo pipefail 13 | 14 | # source directory 15 | DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" 16 | 17 | # Go to the repo root directory. 18 | cd "$(git rev-parse --show-toplevel)" 19 | 20 | # Check 1 arguments is given 21 | if [ $# -lt 1 ] 22 | then 23 | echo "Usage : $0 " 24 | exit 1 25 | fi 26 | 27 | 28 | # The first argument should be the name of a crate. 29 | CRATE_NAME="$1" 30 | 31 | CURRENT_VERSION="$("$DIR"/get_current_version.sh "$CRATE_NAME")" || \ 32 | (echo >&2 "No crate named $CRATE_NAME found in workspace."; exit 1) 33 | echo >&2 "Crate $CRATE_NAME current version: $CURRENT_VERSION" 34 | 35 | # The leading whitespace is important! With it, we know that every version is both 36 | # preceded by and followed by whitespace. We use this fact to avoid matching 37 | # on substrings of versions. 38 | EXISTING_VERSIONS=" 39 | $( \ 40 | curl 2>/dev/null "https://crates.io/api/v1/crates/$CRATE_NAME" | \ 41 | jq --exit-status -r 'try .versions[].num' \ 42 | )" 43 | echo >&2 -e "Versions on crates.io:$EXISTING_VERSIONS\n" 44 | 45 | # Use version sort (sort -V) to get all versions in ascending order, then use grep to: 46 | # - grab the first line that matches the current version (--max-count=1) 47 | # - only match full lines (--line-regexp) 48 | OUTPUT="$( \ 49 | echo -e "$EXISTING_VERSIONS" | \ 50 | sort -V | \ 51 | grep --line-regexp --max-count=1 "$CURRENT_VERSION" || true 52 | )" 53 | 54 | if [[ "$OUTPUT" == "$CURRENT_VERSION" ]]; then 55 | echo >&2 "The current version $CURRENT_VERSION is already on crates.io" 56 | exit 7 57 | fi 58 | --------------------------------------------------------------------------------