├── .cargo └── config.toml ├── .codecov.yml ├── .github ├── dependabot.yml └── workflows │ ├── ci.yml │ ├── coverage.yml │ ├── mysql.yml │ ├── publish.yml │ └── wait-for-crate-dependency.sh ├── .gitignore ├── .vscode ├── launch.json └── settings.json ├── CHANGELOG.md ├── CODE_OF_CONDUCT.md ├── CONTRIBUTING.md ├── Cargo.toml ├── LICENSE-APACHE ├── LICENSE-MIT ├── README.md ├── TESTING.md ├── akd ├── Cargo.toml ├── benches │ ├── azks.rs │ ├── common.rs │ └── directory.rs └── src │ ├── append_only_zks.rs │ ├── auditor.rs │ ├── client.rs │ ├── directory.rs │ ├── errors.rs │ ├── helper_structs.rs │ ├── lib.rs │ ├── local_auditing.rs │ ├── storage │ ├── cache │ │ ├── high_parallelism.rs │ │ ├── mod.rs │ │ └── tests.rs │ ├── manager │ │ ├── mod.rs │ │ └── tests.rs │ ├── memory.rs │ ├── mod.rs │ ├── tests.rs │ ├── transaction.rs │ └── types.rs │ ├── test_utils.rs │ ├── tests │ ├── mod.rs │ ├── test_core_protocol.rs │ ├── test_errors.rs │ └── test_preloads.rs │ ├── tree_node.rs │ └── utils.rs ├── akd_core ├── Cargo.toml ├── benches │ └── parallel_vrfs.rs └── src │ ├── build.rs │ ├── configuration │ ├── experimental.rs │ ├── mod.rs │ ├── traits.rs │ └── whatsapp_v1.rs │ ├── ecvrf │ ├── ecvrf_impl.rs │ ├── mod.rs │ ├── tests.rs │ └── traits.rs │ ├── hash │ ├── mod.rs │ └── tests.rs │ ├── lib.rs │ ├── proto │ ├── mod.rs │ ├── specs │ │ ├── mod.rs │ │ ├── types.proto │ │ └── types.rs │ └── tests.rs │ ├── types │ ├── mod.rs │ └── node_label │ │ ├── mod.rs │ │ └── tests.rs │ ├── utils.rs │ └── verify │ ├── base.rs │ ├── history.rs │ ├── lookup.rs │ └── mod.rs ├── docker-compose.yml ├── examples ├── Cargo.toml ├── README.md └── src │ ├── fixture_generator │ ├── examples │ │ ├── example_tests.rs │ │ ├── experimental.yaml │ │ ├── mod.rs │ │ └── whatsapp_v1.yaml │ ├── generator.rs │ ├── mod.rs │ ├── parser.rs │ ├── reader │ │ ├── mod.rs │ │ ├── tests.rs │ │ └── yaml.rs │ └── writer │ │ ├── mod.rs │ │ └── yaml.rs │ ├── main.rs │ ├── mysql_demo │ ├── commands.rs │ ├── directory_host.rs │ ├── logs.rs │ ├── mod.rs │ ├── mysql.rs │ ├── mysql_storables.rs │ └── tests │ │ ├── memory_tests.rs │ │ ├── mod.rs │ │ ├── mysql_db_tests.rs │ │ ├── mysql_tests.rs │ │ └── test_util.rs │ ├── test_vectors │ ├── experimental.yaml │ ├── mod.rs │ └── whatsapp_v1.yaml │ ├── wasm_client │ └── mod.rs │ └── whatsapp_kt_auditor │ ├── auditor.rs │ └── mod.rs └── xtask ├── Cargo.toml ├── README.md └── src └── main.rs /.cargo/config.toml: -------------------------------------------------------------------------------- 1 | [alias] 2 | xtask = "run --package xtask --" 3 | -------------------------------------------------------------------------------- /.codecov.yml: -------------------------------------------------------------------------------- 1 | codecov: 2 | require_ci_to_pass: false 3 | 4 | ignore: 5 | - "examples" 6 | - "xtask" 7 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | 3 | updates: 4 | - package-ecosystem: cargo 5 | directory: / 6 | schedule: 7 | interval: daily 8 | 9 | - package-ecosystem: github-actions 10 | directory: / 11 | schedule: 12 | interval: daily -------------------------------------------------------------------------------- /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | name: CI 2 | on: 3 | push: 4 | branches: 5 | - main 6 | pull_request: 7 | types: [opened, reopened, synchronize] 8 | 9 | jobs: 10 | test: 11 | name: ${{matrix.name}} 12 | runs-on: ubuntu-latest 13 | strategy: 14 | fail-fast: false 15 | matrix: 16 | include: 17 | - name: Test the core crate (akd_core) 18 | package: akd_core 19 | flags: 20 | 21 | - name: Test the core crate (akd_core) with no standard library 22 | package: akd_core 23 | flags: --features nostd 24 | 25 | - name: Test the core crate (akd_core) with protobuf and serde enabled 26 | package: akd_core 27 | flags: --features protobuf,serde_serialization 28 | 29 | - name: Test the base library, with default features 30 | package: akd 31 | 32 | - name: Test the base library, enabling runtime metrics processing 33 | package: akd 34 | flags: --features runtime_metrics 35 | 36 | steps: 37 | - uses: actions/checkout@main 38 | 39 | - name: Install rust 40 | uses: actions-rs/toolchain@v1 41 | with: 42 | toolchain: stable 43 | override: true 44 | 45 | - name: Run test 46 | uses: actions-rs/cargo@v1 47 | with: 48 | command: test 49 | args: --package ${{matrix.package}} ${{matrix.flags}} 50 | 51 | clippy: 52 | name: Clippy 53 | runs-on: ubuntu-latest 54 | steps: 55 | - uses: actions/checkout@main 56 | - name: Install minimal stable with clippy 57 | uses: actions-rs/toolchain@v1 58 | with: 59 | profile: minimal 60 | toolchain: stable 61 | components: clippy 62 | override: true 63 | 64 | - name: Run Clippy 65 | uses: actions-rs/cargo@v1 66 | with: 67 | command: clippy 68 | args: --all -- -D clippy::all -D warnings 69 | 70 | rustfmt: 71 | name: rustfmt 72 | runs-on: ubuntu-latest 73 | steps: 74 | - uses: actions/checkout@main 75 | - name: Install minimal stable with rustfmt 76 | uses: actions-rs/toolchain@v1 77 | with: 78 | profile: minimal 79 | toolchain: stable 80 | components: rustfmt 81 | override: true 82 | 83 | - name: Build library (make sure all build.rs files have generated necessary code) 84 | uses: actions-rs/cargo@v1 85 | with: 86 | command: build 87 | args: --package akd_core 88 | 89 | - name: Run rustfmt 90 | uses: actions-rs/cargo@v1 91 | with: 92 | command: fmt 93 | args: --all -- --check 94 | 95 | benches: 96 | name: benches 97 | runs-on: ubuntu-latest 98 | strategy: 99 | matrix: 100 | include: 101 | - name: Build the akd_core benches 102 | package: akd_core 103 | flags: -F bench 104 | - name: Build the akd benches 105 | package: akd 106 | flags: -F bench 107 | steps: 108 | - uses: actions/checkout@main 109 | - name: Install rust 110 | uses: actions-rs/toolchain@v1 111 | with: 112 | toolchain: stable 113 | override: true 114 | - name: Run test 115 | uses: actions-rs/cargo@v1 116 | with: 117 | command: bench 118 | args: --package ${{matrix.package}} ${{matrix.flags}} 119 | 120 | docs: 121 | name: docs 122 | runs-on: ubuntu-latest 123 | env: 124 | RUSTDOCFLAGS: -Dwarnings 125 | strategy: 126 | fail-fast: false 127 | matrix: 128 | include: 129 | - package: akd 130 | - package: akd_core 131 | steps: 132 | - uses: actions/checkout@main 133 | - name: Install rust 134 | uses: actions-rs/toolchain@v1 135 | with: 136 | toolchain: stable 137 | override: true 138 | 139 | - name: Run rustdoc for ${{matrix.package}} 140 | uses: actions-rs/cargo@v1 141 | with: 142 | command: doc 143 | args: --package ${{matrix.package}} 144 | -------------------------------------------------------------------------------- /.github/workflows/coverage.yml: -------------------------------------------------------------------------------- 1 | name: Code Coverage 2 | on: 3 | push: 4 | branches: 5 | - main 6 | pull_request: 7 | types: [opened, reopened, synchronize] 8 | 9 | jobs: 10 | coverage: 11 | name: Coverage using xtask 12 | strategy: 13 | matrix: 14 | os: [ubuntu-latest] 15 | rust: [stable] 16 | runs-on: ${{ matrix.os }} 17 | steps: 18 | - name: Checkout sources 19 | uses: actions/checkout@v4 20 | 21 | - name: Install stable toolchain 22 | uses: actions-rs/toolchain@v1 23 | with: 24 | toolchain: ${{ matrix.rust }} 25 | override: true 26 | components: llvm-tools-preview 27 | 28 | - uses: Swatinem/rust-cache@v2 29 | 30 | - name: Download grcov 31 | run: | 32 | mkdir -p "${HOME}/.local/bin" 33 | curl -sL https://github.com/mozilla/grcov/releases/download/v0.8.10/grcov-x86_64-unknown-linux-gnu.tar.bz2 | tar jxf - -C "${HOME}/.local/bin" 34 | echo "$HOME/.local/bin" >> $GITHUB_PATH 35 | - name: Run xtask coverage 36 | uses: actions-rs/cargo@v1 37 | with: 38 | command: xtask 39 | args: coverage 40 | 41 | 42 | - name: Upload to codecov.io 43 | uses: codecov/codecov-action@v5 44 | with: 45 | files: coverage/*.lcov -------------------------------------------------------------------------------- /.github/workflows/mysql.yml: -------------------------------------------------------------------------------- 1 | name: MySQL and Integration Tests 2 | on: 3 | push: 4 | branches: 5 | - main 6 | pull_request: 7 | types: [opened, reopened, synchronize] 8 | 9 | jobs: 10 | run-tests: 11 | name: Run tests (Rust ${{matrix.toolchain}} on ${{matrix.os}}) 12 | runs-on: ${{matrix.os}}-latest 13 | strategy: 14 | fail-fast: false 15 | matrix: 16 | toolchain: [stable] 17 | os: [ubuntu] 18 | steps: 19 | - uses: actions/checkout@main 20 | 21 | - name: Install rust 22 | uses: actions-rs/toolchain@v1 23 | with: 24 | toolchain: ${{matrix.toolchain}} 25 | override: true 26 | 27 | - name: Set up protoc 28 | uses: arduino/setup-protoc@v3.0.0 29 | with: 30 | repo-token: ${{ secrets.GITHUB_TOKEN }} 31 | 32 | - name: Cargo build 33 | uses: actions-rs/cargo@v1 34 | with: 35 | command: build 36 | 37 | - name: Build the docker-compose stack 38 | run: docker compose -f docker-compose.yml up -d 39 | 40 | - name: Check running containers 41 | run: docker ps -a 42 | 43 | - name: Verify MySQL db connection 44 | run: | 45 | while ! docker exec akd-test-db mysql --user=root --password=example -e "SHOW DATABASES" >/dev/null 2>&1; do 46 | sleep 1 47 | done 48 | echo "MySQL container is up" 49 | 50 | - name: Check container akd-test-db logs 51 | run: docker logs akd-test-db 52 | 53 | - name: Run MySQL tests and integration tests in examples package 54 | uses: actions-rs/cargo@v1 55 | with: 56 | command: test 57 | args: --manifest-path Cargo.toml -p examples 58 | 59 | - name: Cleanup docker container 60 | run: docker compose -f docker-compose.yml down -v 61 | 62 | - name: Copy integration test logs for review 63 | run: cat examples/integration_test.log 64 | -------------------------------------------------------------------------------- /.github/workflows/publish.yml: -------------------------------------------------------------------------------- 1 | name: Publish 2 | 3 | on: 4 | release: 5 | types: [published] 6 | 7 | jobs: 8 | publish: 9 | runs-on: ${{ matrix.os }} 10 | strategy: 11 | matrix: 12 | os: [ubuntu-latest] 13 | rust: [stable] 14 | 15 | steps: 16 | - uses: hecrj/setup-rust-action@v2 17 | with: 18 | rust-version: ${{ matrix.rust }} 19 | 20 | - uses: actions/checkout@main 21 | 22 | - name: Login to crates.io 23 | run: cargo login $CRATES_IO_TOKEN 24 | env: 25 | CRATES_IO_TOKEN: ${{ secrets.crates_io_token }} 26 | 27 | - name: Dry run publish akd_core 28 | run: cargo publish --dry-run --manifest-path Cargo.toml -p akd_core 29 | 30 | - name: Publish crate akd_core 31 | run: cargo publish --manifest-path Cargo.toml -p akd_core 32 | env: 33 | CARGO_REGISTRY_TOKEN: ${{ secrets.crates_io_token }} 34 | 35 | - name: Wait for necessary akd_core version to be available 36 | run: bash ./.github/workflows/wait-for-crate-dependency.sh akd akd_core 37 | 38 | - name: Dry run publish AKD 39 | run: cargo publish --dry-run --manifest-path Cargo.toml -p akd 40 | 41 | - name: Publish crate AKD 42 | run: cargo publish --manifest-path Cargo.toml -p akd 43 | env: 44 | CARGO_REGISTRY_TOKEN: ${{ secrets.crates_io_token }} 45 | -------------------------------------------------------------------------------- /.github/workflows/wait-for-crate-dependency.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Finds expected version of a crate in another crate's Cargo.toml file 3 | get_crate_expected_version_number() 4 | { 5 | local INDEX_CRATE=$1 6 | local TARGET_CRATE=$2 7 | 8 | local INDEX_TOML_FILE="$INDEX_CRATE/Cargo.toml" 9 | # Issue #174. The script is looking for multiple entries if the dependency is listed multiple times 10 | # Additionally this regex with grep works for both the notations 11 | # 1. crate = { some_other_options ... version = "x.y.z" ... other_options } 12 | # 2. crate = "x.y.z" 13 | # It also supports optional pre-release suffixes in the form of "-pre.x" 14 | # 3. crate = "w.x.y-pre.z" 15 | local EXPECTED_VERSION=$(grep "$TARGET_CRATE" $INDEX_TOML_FILE | grep -o '[0-9]\.[0-9\.]\+\(-pre\.[0-9]\+\)\?'| head -n 1) 16 | echo $EXPECTED_VERSION 17 | } 18 | 19 | # Get published versions of a crate from https://github.com/rust-lang/crates.io-index/ 20 | get_crate_published_versions() 21 | { 22 | local CRATE_INDEX_URL=$1 23 | 24 | local PUBLISHED_VERSIONS=$(curl -sS "$CRATE_INDEX_URL" | jq .vers) 25 | echo "$PUBLISHED_VERSIONS" 26 | } 27 | 28 | # Retrieve the raw github url for a given crate based on the crate name following 29 | # crates.io's strange indexing strategy 30 | get_crate_raw_github_url() { 31 | local CRATE=$1 32 | 33 | local STR_LEN=$(echo "$CRATE" | wc -c) 34 | STR_LEN=$((STR_LEN - 1)) 35 | if (($STR_LEN > 3)); then 36 | local FIRST_TWO=$(echo ${CRATE:0:2}) 37 | local SECOND_TWO=$(echo ${CRATE:2:2}) 38 | echo "https://raw.githubusercontent.com/rust-lang/crates.io-index/master/$FIRST_TWO/$SECOND_TWO/$CRATE" 39 | else 40 | local FIRST_ONE=$(echo ${CRATE:0:1}) 41 | echo "https://raw.githubusercontent.com/rust-lang/crates.io-index/master/$STR_LEN/$FIRST_ONE/$CRATE" 42 | fi 43 | } 44 | 45 | # Wait for a specific crate version to be published to crates.io. 46 | # See https://github.com/novifinancial/akd/issues/116. 47 | # Must be run in the project root folder. 48 | INDEX_CRATE=$1 49 | TARGET_CRATE=$2 50 | 51 | if [ "$INDEX_CRATE" == "" ] || [ "$TARGET_CRATE" == "" ] 52 | then 53 | echo "Both the target crate and index crate are required arguments." 54 | echo "Usage:" 55 | echo "bash ./.github/workflows/wait-for-crate-dependency.sh INDEX_CRATE TARGET_CRATE" 56 | echo " - INDEX_CRATE : The crate which contains the dependency version specification" 57 | echo " - TARGET_CRATE : The crate which version needs to be published to build the INDEX_CRATE" 58 | exit 1 59 | fi 60 | 61 | EXPECTED_VERSION=$(get_crate_expected_version_number "$INDEX_CRATE" "$TARGET_CRATE" || exit 1) 62 | echo "Expecting $TARGET_CRATE = { version = $EXPECTED_VERSION } for $INDEX_CRATE" 63 | TARGET_URL=$(get_crate_raw_github_url "$TARGET_CRATE" || exit 1) 64 | echo "Target URL for $TARGET_CRATE is $TARGET_URL" 65 | WAIT_TIME=1 66 | while sleep $WAIT_TIME; 67 | do 68 | PUBLISHED_VERSIONS=$(get_crate_published_versions "$TARGET_URL" | tr '\n' " ") 69 | echo "Available $TARGET_CRATE versions: $PUBLISHED_VERSIONS" 70 | EXISTS=$(echo $PUBLISHED_VERSIONS | grep "\"$EXPECTED_VERSION\"") 71 | if [[ $EXISTS != "" ]]; then 72 | echo "Expected version of $TARGET_CRATE ($EXPECTED_VERSION) has been published" 73 | break 74 | fi 75 | echo "Expected version of $TARGET_CRATE ($EXPECTED_VERSION) is not yet published. Retrying after a wait" 76 | WAIT_TIME=$((WAIT_TIME+1)) 77 | if [[ $WAIT_TIME == 42 ]]; then 78 | echo "Giving up after 42 wait periods" 79 | exit 1 80 | fi 81 | done 82 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .DS_Store 2 | .vs 3 | 4 | debug/ 5 | coverage/ 6 | 7 | .vscode/** 8 | !.vscode/launch.json 9 | !.vscode/settings.json 10 | 11 | 12 | **/*/.DS_Store 13 | Cargo.lock 14 | **/Cargo.lock 15 | **/*.rs.bk 16 | 17 | **/target 18 | 19 | **/*.log 20 | **/*.profraw 21 | 22 | .idea/** 23 | *.iml 24 | -------------------------------------------------------------------------------- /.vscode/launch.json: -------------------------------------------------------------------------------- 1 | { 2 | // Use IntelliSense to learn about possible attributes. 3 | // Hover to view descriptions of existing attributes. 4 | // For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387 5 | "version": "0.2.0", 6 | "configurations": [ 7 | { 8 | "type": "lldb", 9 | "request": "launch", 10 | "name": "Debug AKD Auditor User Interface", 11 | "cargo": { 12 | "args": [ 13 | "build", 14 | "--bin=akd_local_auditor", 15 | "--package=akd_local_auditor" 16 | ], 17 | }, 18 | "args": [ 19 | "ui" 20 | ], 21 | "cwd": "${workspaceFolder}" 22 | }, 23 | { 24 | "type": "lldb", 25 | "request": "launch", 26 | "name": "Debug integration tests in library 'akd_integration_tests'", 27 | "cargo": { 28 | "args": [ 29 | "test", 30 | "--no-run", 31 | "--lib", 32 | "--package=akd_integration_tests" 33 | ], 34 | }, 35 | "args": [], 36 | "cwd": "${workspaceFolder}" 37 | }, 38 | { 39 | "type": "lldb", 40 | "request": "launch", 41 | "name": "Debug unit tests in library 'akd_core'", 42 | "cargo": { 43 | "args": [ 44 | "test", 45 | "--no-run", 46 | "--lib", 47 | "--package=akd_core", 48 | "--features=protobuf,blake3" 49 | ], 50 | }, 51 | "args": ["test_minimum_encoding_label_bytes"], 52 | "cwd": "${workspaceFolder}" 53 | }, 54 | { 55 | "type": "lldb", 56 | "request": "launch", 57 | "name": "Debug benchmark 'azks'", 58 | "cargo": { 59 | "args": [ 60 | "test", 61 | "--no-run", 62 | "--bench=azks", 63 | "--package=akd" 64 | ], 65 | "filter": { 66 | "name": "azks", 67 | "kind": "bench" 68 | } 69 | }, 70 | "args": [], 71 | "cwd": "${workspaceFolder}" 72 | }, 73 | { 74 | "type": "lldb", 75 | "request": "launch", 76 | "name": "Debug MySQL unit tests", 77 | "cargo": { 78 | "args": [ 79 | "test", 80 | "--no-run", 81 | "--lib", 82 | "--package=akd_mysql" 83 | ], 84 | "filter": { 85 | "name": "akd_mysql", 86 | "kind": "lib" 87 | } 88 | }, 89 | "args": [], 90 | "cwd": "${workspaceFolder}" 91 | }, 92 | { 93 | "type": "lldb", 94 | "request": "launch", 95 | "name": "Debug POC Repl", 96 | "cargo": { 97 | "args": [ 98 | "build", 99 | "--bin=akd_app", 100 | "--package=akd_app" 101 | ], 102 | "filter": { 103 | "name": "akd_app", 104 | "kind": "bin" 105 | } 106 | }, 107 | "args": ["-d", "--memory"], 108 | "cwd": "${workspaceFolder}" 109 | }, 110 | { 111 | "type": "lldb", 112 | "request": "launch", 113 | "name": "Debug POC Publish", 114 | "cargo": { 115 | "args": [ 116 | "build", 117 | "--bin=akd_app", 118 | "--package=akd_app" 119 | ], 120 | "filter": { 121 | "name": "akd_app", 122 | "kind": "bin" 123 | } 124 | }, 125 | "args": ["-d", "--memory", "bench-publish", "100000", "2", "true"], 126 | "cwd": "${workspaceFolder}" 127 | }, 128 | { 129 | "type": "lldb", 130 | "request": "launch", 131 | "name": "Debug POC Db Flush", 132 | "cargo": { 133 | "args": [ 134 | "build", 135 | "--bin=akd_app", 136 | "--package=akd_app" 137 | ], 138 | "filter": { 139 | "name": "akd_app", 140 | "kind": "bin" 141 | } 142 | }, 143 | "args": ["-d", "flush"], 144 | "cwd": "${workspaceFolder}" 145 | } 146 | ] 147 | } 148 | -------------------------------------------------------------------------------- /.vscode/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "rust-analyzer.cargo.unsetTest": [ 3 | "core", 4 | "ed25519-dalek" 5 | ] 6 | } -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Code of Conduct 2 | 3 | ## Our Pledge 4 | 5 | In the interest of fostering an open and welcoming environment, we as 6 | contributors and maintainers pledge to make participation in our project and 7 | our community a harassment-free experience for everyone, regardless of age, body 8 | size, disability, ethnicity, sex characteristics, gender identity and expression, 9 | level of experience, education, socio-economic status, nationality, personal 10 | appearance, race, religion, or sexual identity and orientation. 11 | 12 | ## Our Standards 13 | 14 | Examples of behavior that contributes to creating a positive environment 15 | include: 16 | 17 | * Using welcoming and inclusive language 18 | * Being respectful of differing viewpoints and experiences 19 | * Gracefully accepting constructive criticism 20 | * Focusing on what is best for the community 21 | * Showing empathy towards other community members 22 | 23 | Examples of unacceptable behavior by participants include: 24 | 25 | * The use of sexualized language or imagery and unwelcome sexual attention or 26 | advances 27 | * Trolling, insulting/derogatory comments, and personal or political attacks 28 | * Public or private harassment 29 | * Publishing others' private information, such as a physical or electronic 30 | address, without explicit permission 31 | * Other conduct which could reasonably be considered inappropriate in a 32 | professional setting 33 | 34 | ## Our Responsibilities 35 | 36 | Project maintainers are responsible for clarifying the standards of acceptable 37 | behavior and are expected to take appropriate and fair corrective action in 38 | response to any instances of unacceptable behavior. 39 | 40 | Project maintainers have the right and responsibility to remove, edit, or 41 | reject comments, commits, code, wiki edits, issues, and other contributions 42 | that are not aligned to this Code of Conduct, or to ban temporarily or 43 | permanently any contributor for other behaviors that they deem inappropriate, 44 | threatening, offensive, or harmful. 45 | 46 | ## Scope 47 | 48 | This Code of Conduct applies within all project spaces, and it also applies when 49 | an individual is representing the project or its community in public spaces. 50 | Examples of representing a project or community include using an official 51 | project e-mail address, posting via an official social media account, or acting 52 | as an appointed representative at an online or offline event. Representation of 53 | a project may be further defined and clarified by project maintainers. 54 | 55 | ## Enforcement 56 | 57 | Instances of abusive, harassing, or otherwise unacceptable behavior may be 58 | reported by contacting the project team at . All 59 | complaints will be reviewed and investigated and will result in a response that 60 | is deemed necessary and appropriate to the circumstances. The project team is 61 | obligated to maintain confidentiality with regard to the reporter of an incident. 62 | Further details of specific enforcement policies may be posted separately. 63 | 64 | Project maintainers who do not follow or enforce the Code of Conduct in good 65 | faith may face temporary or permanent repercussions as determined by other 66 | members of the project's leadership. 67 | 68 | ## Attribution 69 | 70 | This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, 71 | available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html 72 | 73 | [homepage]: https://www.contributor-covenant.org 74 | 75 | For answers to common questions about this code of conduct, see 76 | https://www.contributor-covenant.org/faq 77 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing to this library 2 | We want to make contributing to this project as easy and transparent as 3 | possible. 4 | 5 | ## Pull Requests 6 | We actively welcome your pull requests. 7 | 8 | 1. Fork the repo and create your branch from `main`. 9 | 2. If you've added code that should be tested, add tests. 10 | 3. If you've changed APIs, update the documentation. 11 | 4. Ensure the test suite passes. 12 | 5. If you haven't already, complete the Contributor License Agreement ("CLA"). 13 | 14 | ### Special note regarding MySQL based tests 15 | We support MySQL directly within this repository. In order to utilize a MySQL database you may utilize the supplied [```docker-compose.yml```](docker-compose.yml) specification. It will create a basic database (named ```default```) and configure a container with the appropriate ports opened and mapped to the MySQL port. A valid [docker](https://www.docker.com/products/docker-desktop) instance is a dependency for this tool. 16 | 17 | You can instantiate the container with 18 | ```bash 19 | cd 20 | 21 | docker compose up [-d] 22 | ``` 23 | where the ```-d``` flag indicates to background the process. If you want to run the container interactively, don't add this flag. 24 | 25 | When finished you can terminate the container you can terminate it with ```CTRL-C``` if you ran it interactively and ```docker compose down``` if you ran it with the ```-d``` flag. 26 | 27 | The MySQL connection info for this test container is 28 | ``` 29 | MySQL port opened on local machine: 8001 30 | User: "root" 31 | Password: "example" 32 | Default database: "default" 33 | ``` 34 | 35 | You can see an example configured connection in the code [here](akd_mysql/src/mysql_db_tests.rs), line 29. 36 | 37 | ## Contributor License Agreement ("CLA") 38 | In order to accept your pull request, we need you to submit a CLA. You only need 39 | to do this once to work on any of Facebook's open source projects. 40 | 41 | Complete your CLA here: 42 | 43 | ## Issues 44 | We use GitHub issues to track public bugs. Please ensure your description is 45 | clear and has sufficient instructions to be able to reproduce the issue. 46 | 47 | Facebook has a [bounty program](https://www.facebook.com/whitehat/) for the safe 48 | disclosure of security bugs. In those cases, please go through the process 49 | outlined on that page and do not file a public issue. 50 | 51 | ## License 52 | 53 | By contributing to akd, you agree that your contributions will be 54 | licensed under both the LICENSE-MIT and LICENSE-APACHE files in the root 55 | directory of this source tree. -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [workspace] 2 | 3 | members = ["akd", "akd_core", "examples", "xtask"] 4 | resolver = "2" 5 | -------------------------------------------------------------------------------- /LICENSE-MIT: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) Meta Platforms, Inc. and affiliates. 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ## akd ![Build Status](https://github.com/facebook/akd/workflows/CI/badge.svg) 2 | 3 | An implementation of an auditable key directory (also known as a verifiable registry or authenticated dictionary). 4 | 5 | Auditable key directories can be used to help provide key transparency for end-to-end encrypted 6 | messaging. 7 | 8 | This implementation is based off of the protocols described in 9 | [SEEMless](https://eprint.iacr.org/2018/607), with ideas incorporated from [Parakeet](https://eprint.iacr.org/2023/081). 10 | 11 | This library provides a stateless API for an auditable key directory, meaning that a consumer of this library must provide their own solution for the storage of the entries of the directory. 12 | 13 | Documentation 14 | ------------- 15 | 16 | The API can be found [here](https://docs.rs/akd/) along with an example for usage. To learn more about the technical details 17 | behind how the directory is constructed, see [here](https://docs.rs/akd_core/). 18 | 19 | Installation 20 | ------------ 21 | 22 | Add the following line to the dependencies of your `Cargo.toml`: 23 | 24 | ``` 25 | akd = "0.12.0-pre.11" 26 | ``` 27 | 28 | ### Minimum Supported Rust Version 29 | 30 | Rust **1.51** or higher. 31 | 32 | Top-Level Directory Organization 33 | -------------------------------- 34 | 35 | | Subfolder | On crates.io? | Description | 36 | | :--- | :---: | :--- | 37 | | `akd` | ✓ | Main implementation of AKD which a service provider that manages the underlying directory would need to run. A good starting point for diving into this implementation. | 38 | | `akd_core` | ✓ | Minimal library consisting of core operations in AKD. | 39 | | `examples` | | Contains various examples for using AKD, along with utilities such as locally verifying audit proofs that are produced by WhatsApp's key transparency deployment. More details are contained [here](examples/README.md). | 40 | | `xtask` | | Used for running the code coverage pipeline. | 41 | 42 | 43 | Audit 44 | ----- 45 | 46 | This library was audited by NCC Group in August of 2023. The audit was sponsored by Meta for its use in [WhatsApp's key transparency deployment](https://engineering.fb.com/2023/04/13/security/whatsapp-key-transparency/). 47 | 48 | The audit found issues in release `v0.9.0`, and the fixes were subsequently incorporated into release `v0.11.0`. See the [full audit report here](https://research.nccgroup.com/2023/11/14/public-report-whatsapp-auditable-key-directory-akd-implementation-review/). 49 | 50 | Contributors 51 | ------------ 52 | 53 | The original authors of this code are 54 | Evan Au ([@afterdusk](https://github.com/afterdusk)), 55 | Alex Chernyak ([@alexme22](https://github.com/alexme22)), 56 | Dillon George ([@dillonrg](https://github.com/dillonrg)), 57 | Sean Lawlor ([@slawlor](https://github.com/slawlor)), 58 | Kevin Lewi ([@kevinlewi](https://github.com/kevinlewi)), 59 | Jasleen Malvai ([@jasleen1](https://github.com/jasleen1)), and 60 | Ercan Ozturk ([@eozturk1](https://github.com/eozturk1)). 61 | To learn more about contributing to this project, [see this document](https://github.com/facebook/akd/blob/main/CONTRIBUTING.md). 62 | 63 | License 64 | ------- 65 | 66 | This project is dual-licensed under either the [MIT license](https://github.com/facebook/akd/blob/main/LICENSE-MIT) 67 | or the [Apache License, Version 2.0](https://github.com/facebook/akd/blob/main/LICENSE-APACHE). 68 | You may select, at your option, one of the above-listed licenses. 69 | -------------------------------------------------------------------------------- /TESTING.md: -------------------------------------------------------------------------------- 1 | # Testing code changes within `akd` 2 | 3 | We are running a few types of tests within the AKD crate. They can be broken into the following categories 4 | 5 | 1. Unit tests 6 | 2. Integration tests 7 | 3. Manual testing through the proof-of-concept application 8 | 9 | ## Unit tests 10 | 11 | Unit tests are pretty simple Rust tests utilizing [built-in testing best practices](https://doc.rust-lang.org/book/ch11-01-writing-tests.html). A few caveats worth noting however: 12 | 13 | 1. If your test is going to require calling async code you'll need to use ```#[tokio::test]``` instead of ```#[test]``` as the function decorator. It'll look something like 14 | ```rust 15 | #[test] 16 | fn my_test() { 17 | panic!("boom"); 18 | } 19 | 20 | #[tokio::test] 21 | async fn my_async_test() { 22 | panic!("async boom!"); 23 | } 24 | ``` 25 | 2. Test organization is generally done by decorating a ```test``` or ```tests``` sub-module to the module under test with the ```#[cfg(test)]``` attribute which only makes the code included in the binary in test compilation. 26 | 3. Test log output is managed centrally with a single global startup function in [test_utils.rs](akd/src/test_utils.rs). If you're adding a new crate, you may want to add this in your crate as well to make sure you benefit from log messages when tests fail 27 | ```rust 28 | /// Global test startup constructor. Only runs in the TEST profile. Each 29 | /// crate which wants logging enabled in tests being run should make this call 30 | /// itself. 31 | #[cfg(test)] 32 | #[ctor::ctor] 33 | fn test_start() { 34 | init_logger(Level::Info); 35 | } 36 | ``` 37 | You'll need to add a dev-dependency on the `ctor` crate for this as well. 38 | 39 | ### `Storage` trait consistency testing 40 | 41 | If you write a new storage layer for the AKD crate, you can run our standard suite of storage tests by adding a dev-dependency on the `akd` crate with the following configuration 42 | 43 | ```toml 44 | [dev-dependencies] 45 | akd = { path = "../akd", version = "^0.5.0", features = ["public_tests", "serde"] } 46 | ``` 47 | 48 | which will expose a common-testing pattern with the `public_tests` feature. The [`akd_mysql`](akd_mysql/src/mysql_db_tests.rs) crate does exactly this. You can simply run ths same test-suite for your new storage implementation that we run against all of them (and you'll benefit from downstream storage testing changes as well). Once you've setup your storage layer in your test case you simply invoke the suite 49 | 50 | ```rust 51 | #[tokio::test] 52 | async fn my_new_storage_tests() { 53 | // setup 54 | let storage = ...; 55 | 56 | // Run the test cases (will panic if error occurs so you get a stack trace) 57 | akd::storage::tests::run_test_cases_for_storage_impl(&storage).await; 58 | 59 | // teardown / cleanup (if necessary) 60 | } 61 | ``` 62 | 63 | ## Integration tests 64 | 65 | If you want to add integration tests, they are organized in their own crate (`akd_integration_tests` in the [`integration_tests`](integration_tests/src) folder). We are still using the `#[cfg(test)]` build target and the test cases are still decorated with `#[tokio::test]`, however they run more full end-to-end test cases against real storage implementations. 66 | 67 | The test organization is pretty straightforward. We have a common test structure defined in [`test_util.rs`](integration_tests/src/test_util.rs) as `directory_test_suite` which takes a database, number of users for the test, and VRF signing function. You can add tests in this location, and it is assuming the storage layer has been initialized and is ready for use. This is a common test-flow for all storage implementations we provide to make sure we don't break compatability with new implementations. 68 | 69 | You can additionally add a new data-layer to the integration tests by adding a dev-dependency in the `akd_integration_tests` crate and adding a new `_tests.rs` file along with referencing it in [`lib.rs`](integration_tests/src/lib.rs). 70 | 71 | ## Manual testing 72 | 73 | We additionally have a "proof-of-concept" (POC) application in the [`poc`](poc/src) folder. This application is a small command-line REPL (read-eval-print-loop) application to interact directly with an AKD hosted in a variety of configurations. You can see all the command line options and experiment with the app with 74 | 75 | ```bash 76 | > cargo run -- --help 77 | ...truncated... 78 | akd_app 0.0.0 79 | applicationModes 80 | 81 | USAGE: 82 | akd_app [FLAGS] [OPTIONS] [SUBCOMMAND] 83 | 84 | FLAGS: 85 | -d, --debug Activate debuging mode 86 | --memory The database implementation to utilize 87 | -h, --help Prints help information 88 | -V, --version Prints version information 89 | 90 | OPTIONS: 91 | -l, --log_level 92 | [default: Info] [possible values: Error, Warn, Info, Debug, Trace] 93 | 94 | -m, --multirow_size [default: 100] 95 | 96 | SUBCOMMANDS: 97 | bench-db-insert Benchmark database insertion 98 | bench-lookup Benchmark lookup API 99 | bench-publish Benchmark publish API 100 | drop Drop existing database tables (for schema migration etc.) 101 | flush Flush data from database tables 102 | help Prints this message or the help of the given subcommand(s) 103 | ``` 104 | 105 | Note: The actual output of the command may differ if its arguments have been updated since this document was written. 106 | 107 | # Running tests 108 | 109 | Tests are run a few ways for this repository. 110 | 111 | ## CI pipeline 112 | 113 | We have a [CI workflow](.github/ci.yml) which will run on any pull request. If you're adding special compilation flags to crates, you may need to add test coverage here for PRs for future devs. 114 | 115 | ## Local testing 116 | 117 | Local testing is pretty straightforward with the standard Rust practice of 118 | 119 | ```bash 120 | cargo test 121 | ``` 122 | 123 | run at the root of the repository. This will run all of the tests from all of the crates utilizing the default features for all crates. If you're trying to test just a single crate you can run 124 | 125 | ```bash 126 | cargo test --package akd 127 | ``` 128 | 129 | to isolate what runs (some of the integration tests take some time to run and require a live Docker instance for example). You can optionally `cd` into a specific crate's root folder and run the tests for that crate there specifically. Example 130 | 131 | ```bash 132 | cd akd 133 | cargo test 134 | ``` 135 | 136 | is equivalent. Otherwise the full Rust suite of testing options with [Cargo Test](https://doc.rust-lang.org/cargo/commands/cargo-test.html) are available as well in this repo. Feel free to run the suite as you see fit. Another common adjustment done in this repository worth nothing is the used of specific features. For example, to test the `akd` crate with no verifiable random function (VRF) implementation, you can use the arguments 137 | 138 | ```bash 139 | cargo test --package akd --no-default-features --features public_tests 140 | ``` 141 | 142 | which will disable the feature `vrf`, effectively running code paths tagged with 143 | 144 | ```rust 145 | #[cfg(not(feature = "vrf"))] 146 | ``` 147 | 148 | See [no_vrf.rs](akd/src/ecvrf/no_vrf.rs) for an example of this in practice. 149 | -------------------------------------------------------------------------------- /akd/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "akd" 3 | version = "0.12.0-pre.12" 4 | authors = ["akd contributors"] 5 | description = "An implementation of an auditable key directory" 6 | license = "MIT OR Apache-2.0" 7 | edition = "2021" 8 | keywords = ["key-transparency", "akd"] 9 | repository = "https://github.com/facebook/akd" 10 | readme = "../README.md" 11 | 12 | [features] 13 | # Supported configurations 14 | whatsapp_v1 = ["akd_core/whatsapp_v1"] 15 | experimental = ["akd_core/experimental"] 16 | 17 | # Default features mix (experimental + audit-proof protobuf mgmt support) 18 | default = [ 19 | "public_auditing", 20 | "parallel_vrf", 21 | "preload_history", 22 | "greedy_lookup_preload", 23 | "experimental", 24 | ] 25 | 26 | bench = ["experimental", "public_tests", "tokio/rt-multi-thread"] 27 | # Greedy loading of lookup proof nodes 28 | greedy_lookup_preload = [] 29 | public_auditing = ["dep:protobuf", "akd_core/protobuf"] 30 | # Parallelize VRF calculations during publish 31 | parallel_vrf = ["akd_core/parallel_vrf"] 32 | # Enable pre-loading of the nodes when generating history proofs 33 | preload_history = [] 34 | public_tests = [ 35 | "rand", 36 | "dep:colored", 37 | "dep:once_cell", 38 | "serde_serialization", 39 | "akd_core/public_tests", 40 | "akd_core/rand", 41 | "dep:paste", 42 | ] 43 | rand = ["dep:rand"] 44 | # Collect runtime metrics on db access calls + timing 45 | runtime_metrics = [] 46 | serde_serialization = ["dep:serde", "akd_core/serde_serialization"] 47 | # TESTING ONLY: Artifically slow the in-memory database (for benchmarking) 48 | slow_internal_db = [] 49 | # Tracing instrumentation 50 | tracing = ["dep:tracing"] 51 | # Tracing-based instrumentation 52 | tracing_instrument = ["tracing/attributes"] 53 | 54 | [dependencies] 55 | ## Required dependencies ## 56 | akd_core = { version = "0.12.0-pre.12", path = "../akd_core", default-features = false, features = [ 57 | "vrf", 58 | ] } 59 | async-recursion = "1" 60 | async-trait = "0.1" 61 | dashmap = "5" 62 | hex = "0.4" 63 | log = { version = "0.4", features = ["kv_unstable"] } 64 | tokio = { version = "1", features = ["sync", "time", "rt"] } 65 | 66 | ## Optional dependencies ## 67 | colored = { version = "2", optional = true } 68 | once_cell = { version = "1", optional = true } 69 | paste = { version = "1", optional = true } 70 | protobuf = { version = "3", optional = true } 71 | rand = { version = "0.8", optional = true } 72 | serde = { version = "1", features = ["derive"], optional = true } 73 | tracing = { version = "0.1.40", optional = true } 74 | 75 | [dev-dependencies] 76 | criterion = "0.5" 77 | serial_test = "2" 78 | proptest = "1" 79 | proptest-derive = "0.4" 80 | colored = "2" 81 | once_cell = "1" 82 | ctor = "0.2" 83 | tokio-test = "0.4" 84 | tokio = { version = "1", features = ["rt", "sync", "time", "macros"] } 85 | mockall = "0.11" 86 | futures = "0.3" 87 | itertools = "0.11" 88 | 89 | # To enable the public_tests feature in tests 90 | akd = { path = ".", features = [ 91 | "public_tests", 92 | "whatsapp_v1", 93 | "experimental", 94 | ], default-features = false } 95 | 96 | [[bench]] 97 | name = "azks" 98 | harness = false 99 | required-features = ["bench"] 100 | 101 | [[bench]] 102 | name = "directory" 103 | harness = false 104 | required-features = ["bench"] 105 | -------------------------------------------------------------------------------- /akd/benches/azks.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) Meta Platforms, Inc. and affiliates. 2 | // 3 | // This source code is dual-licensed under either the MIT license found in the 4 | // LICENSE-MIT file in the root directory of this source tree or the Apache 5 | // License, Version 2.0 found in the LICENSE-APACHE file in the root directory 6 | // of this source tree. You may select, at your option, one of the above-listed licenses. 7 | 8 | #[macro_use] 9 | extern crate criterion; 10 | 11 | mod common; 12 | 13 | use akd::append_only_zks::{AzksParallelismConfig, InsertMode}; 14 | use akd::auditor; 15 | use akd::storage::manager::StorageManager; 16 | use akd::storage::memory::AsyncInMemoryDatabase; 17 | use akd::NamedConfiguration; 18 | use akd::{Azks, AzksElement, AzksValue, NodeLabel}; 19 | use criterion::{BatchSize, Criterion}; 20 | use rand::rngs::StdRng; 21 | use rand::{Rng, SeedableRng}; 22 | 23 | bench_config!(batch_insertion); 24 | fn batch_insertion(c: &mut Criterion) { 25 | let num_initial_leaves = 1000; 26 | let num_inserted_leaves = 1000; 27 | 28 | let mut rng = StdRng::seed_from_u64(42); 29 | let runtime = tokio::runtime::Builder::new_multi_thread().build().unwrap(); 30 | 31 | // prepare node set for initial leaves 32 | let initial_node_set = gen_nodes(&mut rng, num_initial_leaves); 33 | 34 | // prepare node set for batch insertion 35 | let node_set = gen_nodes(&mut rng, num_inserted_leaves); 36 | 37 | // benchmark batch insertion 38 | let id = format!( 39 | "Batch insertion ({} initial leaves, {} inserted leaves) ({})", 40 | num_initial_leaves, 41 | num_inserted_leaves, 42 | TC::name(), 43 | ); 44 | c.bench_function(&id, move |b| { 45 | b.iter_batched( 46 | || { 47 | let database = AsyncInMemoryDatabase::new(); 48 | let db = StorageManager::new(database, None, None, None); 49 | let mut azks = runtime.block_on(Azks::new::(&db)).unwrap(); 50 | 51 | // create transaction object 52 | db.begin_transaction(); 53 | 54 | // insert initial leaves as part of setup 55 | runtime 56 | .block_on(azks.batch_insert_nodes::( 57 | &db, 58 | initial_node_set.clone(), 59 | InsertMode::Directory, 60 | AzksParallelismConfig::default(), 61 | )) 62 | .unwrap(); 63 | (azks, db, node_set.clone()) 64 | }, 65 | |(mut azks, db, node_set)| { 66 | runtime 67 | .block_on(azks.batch_insert_nodes::( 68 | &db, 69 | node_set, 70 | InsertMode::Directory, 71 | AzksParallelismConfig::default(), 72 | )) 73 | .unwrap(); 74 | }, 75 | BatchSize::PerIteration, 76 | ); 77 | }); 78 | } 79 | 80 | bench_config!(audit_verify); 81 | fn audit_verify(c: &mut Criterion) { 82 | let num_initial_leaves = 10000; 83 | let num_inserted_leaves = 10000; 84 | 85 | let mut rng = StdRng::seed_from_u64(42); 86 | let runtime = tokio::runtime::Builder::new_multi_thread().build().unwrap(); 87 | 88 | // prepare node sets for start and end epochs 89 | let initial_node_set = gen_nodes(&mut rng, num_initial_leaves); 90 | let node_set = gen_nodes(&mut rng, num_inserted_leaves); 91 | 92 | // benchmark audit verify 93 | let id = format!( 94 | "Audit verify (epoch 1: {} leaves, epoch 2: {} leaves) ({})", 95 | num_initial_leaves, 96 | num_inserted_leaves, 97 | TC::name(), 98 | ); 99 | c.bench_function(&id, move |b| { 100 | b.iter_batched( 101 | || { 102 | let database = AsyncInMemoryDatabase::new(); 103 | let db = StorageManager::new(database, None, None, None); 104 | let mut azks = runtime.block_on(Azks::new::(&db)).unwrap(); 105 | 106 | // epoch 1 107 | runtime 108 | .block_on(azks.batch_insert_nodes::( 109 | &db, 110 | initial_node_set.clone(), 111 | InsertMode::Directory, 112 | AzksParallelismConfig::default(), 113 | )) 114 | .unwrap(); 115 | 116 | let start_hash = runtime.block_on(azks.get_root_hash::(&db)).unwrap(); 117 | 118 | // epoch 2 119 | runtime 120 | .block_on(azks.batch_insert_nodes::( 121 | &db, 122 | node_set.clone(), 123 | InsertMode::Directory, 124 | AzksParallelismConfig::default(), 125 | )) 126 | .unwrap(); 127 | 128 | let end_hash = runtime.block_on(azks.get_root_hash::(&db)).unwrap(); 129 | let proof = runtime 130 | .block_on(azks.get_append_only_proof::( 131 | &db, 132 | 1, 133 | 2, 134 | AzksParallelismConfig::default(), 135 | )) 136 | .unwrap(); 137 | 138 | (start_hash, end_hash, proof) 139 | }, 140 | |(start_hash, end_hash, proof)| { 141 | runtime 142 | .block_on(auditor::audit_verify::( 143 | vec![start_hash, end_hash], 144 | proof, 145 | )) 146 | .unwrap(); 147 | }, 148 | BatchSize::PerIteration, 149 | ); 150 | }); 151 | } 152 | 153 | bench_config!(audit_generate); 154 | fn audit_generate(c: &mut Criterion) { 155 | let num_leaves = 10000; 156 | let num_epochs = 100; 157 | 158 | let mut rng = StdRng::seed_from_u64(42); 159 | let runtime = tokio::runtime::Builder::new_multi_thread().build().unwrap(); 160 | 161 | let database = AsyncInMemoryDatabase::new(); 162 | let db = StorageManager::new(database, None, None, None); 163 | let mut azks = runtime.block_on(Azks::new::(&db)).unwrap(); 164 | 165 | // publish 10 epochs 166 | for _epoch in 0..num_epochs { 167 | let node_set = gen_nodes(&mut rng, num_leaves); 168 | runtime 169 | .block_on(azks.batch_insert_nodes::( 170 | &db, 171 | node_set, 172 | InsertMode::Directory, 173 | AzksParallelismConfig::default(), 174 | )) 175 | .unwrap(); 176 | } 177 | let epoch = azks.get_latest_epoch(); 178 | 179 | // benchmark audit verify 180 | let id = format!( 181 | "Audit proof generation. {num_leaves} leaves over {num_epochs} epochs ({})", 182 | TC::name() 183 | ); 184 | c.bench_function(&id, move |b| { 185 | b.iter_batched( 186 | || {}, 187 | |_| { 188 | let _proof = runtime 189 | .block_on(azks.get_append_only_proof::( 190 | &db, 191 | epoch - 1, 192 | epoch, 193 | AzksParallelismConfig::default(), 194 | )) 195 | .unwrap(); 196 | }, 197 | BatchSize::PerIteration, 198 | ); 199 | }); 200 | } 201 | 202 | fn gen_nodes(rng: &mut impl Rng, num_nodes: usize) -> Vec { 203 | (0..num_nodes) 204 | .map(|_| { 205 | let label = NodeLabel { 206 | label_val: rng.gen::<[u8; 32]>(), 207 | label_len: 256, 208 | }; 209 | let value = AzksValue(rng.gen::<[u8; 32]>()); 210 | AzksElement { label, value } 211 | }) 212 | .collect() 213 | } 214 | 215 | group_config!(azks_benches, batch_insertion, audit_verify, audit_generate); 216 | 217 | fn main() { 218 | // NOTE(new_config): Add a new configuration here 219 | 220 | #[cfg(feature = "whatsapp_v1")] 221 | azks_benches_whatsapp_v1_config(); 222 | #[cfg(feature = "experimental")] 223 | azks_benches_experimental_config(); 224 | 225 | Criterion::default().configure_from_args().final_summary(); 226 | } 227 | -------------------------------------------------------------------------------- /akd/benches/common.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) Meta Platforms, Inc. and affiliates. 2 | // 3 | // This source code is dual-licensed under either the MIT license found in the 4 | // LICENSE-MIT file in the root directory of this source tree or the Apache 5 | // License, Version 2.0 found in the LICENSE-APACHE file in the root directory 6 | // of this source tree. You may select, at your option, one of the above-listed licenses. 7 | 8 | #[macro_export] 9 | macro_rules! bench_config { 10 | ( $x:ident ) => { 11 | paste::paste! { 12 | // NOTE(new_config): Add a new configuration here 13 | 14 | #[cfg(feature = "whatsapp_v1")] 15 | fn [<$x _ whatsapp_v1_config>](c: &mut Criterion) { 16 | $x::(c) 17 | } 18 | 19 | #[cfg(feature = "experimental")] 20 | fn [<$x _ experimental_config>](c: &mut Criterion) { 21 | $x::>(c) 22 | } 23 | } 24 | }; 25 | } 26 | 27 | #[macro_export] 28 | macro_rules! group_config { 29 | ( $( $group:path ),+ $(,)* ) => { 30 | paste::paste! { 31 | // NOTE(new_config): Add a new configuration here 32 | 33 | #[cfg(feature = "whatsapp_v1")] 34 | criterion_group!( 35 | $( 36 | [<$group _ whatsapp_v1_config>], 37 | )+ 38 | ); 39 | 40 | #[cfg(feature = "experimental")] 41 | criterion_group!( 42 | $( 43 | [<$group _ experimental_config>], 44 | )+ 45 | ); 46 | } 47 | }; 48 | } 49 | -------------------------------------------------------------------------------- /akd/benches/directory.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) Meta Platforms, Inc. and affiliates. 2 | // 3 | // This source code is dual-licensed under either the MIT license found in the 4 | // LICENSE-MIT file in the root directory of this source tree or the Apache 5 | // License, Version 2.0 found in the LICENSE-APACHE file in the root directory 6 | // of this source tree. You may select, at your option, one of the above-listed licenses. 7 | 8 | #[macro_use] 9 | extern crate criterion; 10 | 11 | mod common; 12 | 13 | use akd::append_only_zks::AzksParallelismConfig; 14 | use akd::ecvrf::HardCodedAkdVRF; 15 | use akd::storage::manager::StorageManager; 16 | use akd::storage::memory::AsyncInMemoryDatabase; 17 | use akd::NamedConfiguration; 18 | use akd::{AkdLabel, AkdValue, Directory}; 19 | use criterion::{BatchSize, Criterion}; 20 | use rand::distributions::Alphanumeric; 21 | use rand::rngs::StdRng; 22 | use rand::{Rng, SeedableRng}; 23 | 24 | bench_config!(history_generation); 25 | fn history_generation(c: &mut Criterion) { 26 | let num_users = 1000; 27 | let num_updates = 10; 28 | let runtime = tokio::runtime::Builder::new_multi_thread() 29 | .enable_time() 30 | .build() 31 | .unwrap(); 32 | 33 | let idata = (1..num_users) 34 | .into_iter() 35 | .map(|i| { 36 | let user = format!("User {}", i); 37 | AkdLabel::from(&user) 38 | }) 39 | .collect::>(); 40 | 41 | let id = format!( 42 | "Benchmark key history proof generation on a small tree ({})", 43 | TC::name() 44 | ); 45 | 46 | c.bench_function(&id, move |b| { 47 | b.iter_batched( 48 | || { 49 | let mut rng = StdRng::seed_from_u64(42); 50 | let database = AsyncInMemoryDatabase::new(); 51 | let vrf = HardCodedAkdVRF {}; 52 | let db = StorageManager::new( 53 | database, 54 | Some(std::time::Duration::from_secs(60)), 55 | None, 56 | Some(std::time::Duration::from_secs(60)), 57 | ); 58 | let db_clone = db.clone(); 59 | let directory = runtime 60 | .block_on(async move { 61 | Directory::::new(db, vrf, AzksParallelismConfig::default()).await 62 | }) 63 | .unwrap(); 64 | 65 | for _epoch in 1..num_updates { 66 | let value: String = (0..rng.gen_range(10..20)) 67 | .map(|_| rng.sample(&Alphanumeric)) 68 | .map(char::from) 69 | .collect(); 70 | let data = idata 71 | .iter() 72 | .map(|k| (k.clone(), AkdValue::from(&value))) 73 | .collect::>(); 74 | runtime.block_on(directory.publish(data)).unwrap(); 75 | } 76 | 77 | (directory, db_clone) 78 | }, 79 | |(directory, db)| { 80 | // flush the cache prior to each generation to get fresh results 81 | runtime.block_on(db.flush_cache()); 82 | 83 | // generate for the most recent 10 updates 84 | let label = AkdLabel::from("User 1"); 85 | let params = akd::HistoryParams::MostRecent(5); 86 | runtime 87 | .block_on(directory.key_history(&label, params)) 88 | .unwrap(); 89 | }, 90 | BatchSize::PerIteration, 91 | ); 92 | }); 93 | } 94 | 95 | group_config!(directory_benches, history_generation); 96 | 97 | fn main() { 98 | // NOTE(new_config): Add a new configuration here 99 | 100 | #[cfg(feature = "whatsapp_v1")] 101 | directory_benches_whatsapp_v1_config(); 102 | #[cfg(feature = "experimental")] 103 | directory_benches_experimental_config(); 104 | 105 | Criterion::default().configure_from_args().final_summary(); 106 | } 107 | -------------------------------------------------------------------------------- /akd/src/auditor.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) Meta Platforms, Inc. and affiliates. 2 | // 3 | // This source code is dual-licensed under either the MIT license found in the 4 | // LICENSE-MIT file in the root directory of this source tree or the Apache 5 | // License, Version 2.0 found in the LICENSE-APACHE file in the root directory 6 | // of this source tree. You may select, at your option, one of the above-listed licenses. 7 | 8 | //! Code for an auditor of a authenticated key directory 9 | 10 | use akd_core::configuration::Configuration; 11 | 12 | use crate::append_only_zks::AzksParallelismConfig; 13 | use crate::AzksValue; 14 | use crate::{ 15 | append_only_zks::InsertMode, 16 | errors::{AkdError, AuditorError, AzksError}, 17 | storage::{manager::StorageManager, memory::AsyncInMemoryDatabase}, 18 | AppendOnlyProof, Azks, Digest, SingleAppendOnlyProof, 19 | }; 20 | 21 | /// Verifies an audit proof, given start and end hashes for a merkle patricia tree. 22 | #[cfg_attr(feature = "tracing_instrument", tracing::instrument(skip_all))] 23 | pub async fn audit_verify( 24 | hashes: Vec, 25 | proof: AppendOnlyProof, 26 | ) -> Result<(), AkdError> { 27 | if proof.epochs.len() + 1 != hashes.len() { 28 | return Err(AkdError::AuditErr(AuditorError::VerifyAuditProof(format!( 29 | "The proof has a different number of epochs than needed for hashes. 30 | The number of hashes you provide should be one more than the number of epochs! 31 | Number of epochs = {}, number of hashes = {}", 32 | proof.epochs.len(), 33 | hashes.len() 34 | )))); 35 | } 36 | if proof.epochs.len() != proof.proofs.len() { 37 | return Err(AkdError::AuditErr(AuditorError::VerifyAuditProof(format!( 38 | "The proof has {} epochs and {} proofs. These should be equal!", 39 | proof.epochs.len(), 40 | proof.proofs.len() 41 | )))); 42 | } 43 | for i in 0..hashes.len() - 1 { 44 | let start_hash = hashes[i]; 45 | let end_hash = hashes[i + 1]; 46 | verify_consecutive_append_only::( 47 | &proof.proofs[i], 48 | start_hash, 49 | end_hash, 50 | proof.epochs[i] + 1, 51 | ) 52 | .await?; 53 | } 54 | Ok(()) 55 | } 56 | 57 | /// Helper for audit, verifies an append-only proof. 58 | #[cfg_attr(feature = "tracing_instrument", tracing::instrument(skip_all))] 59 | pub async fn verify_consecutive_append_only( 60 | proof: &SingleAppendOnlyProof, 61 | start_hash: Digest, 62 | end_hash: Digest, 63 | end_epoch: u64, 64 | ) -> Result<(), AkdError> { 65 | let db = AsyncInMemoryDatabase::new(); 66 | let manager = StorageManager::new_no_cache(db); 67 | 68 | let mut azks = Azks::new::(&manager).await?; 69 | azks.batch_insert_nodes::( 70 | &manager, 71 | proof.unchanged_nodes.clone(), 72 | InsertMode::Auditor, 73 | AzksParallelismConfig::default(), 74 | ) 75 | .await?; 76 | let computed_start_root_hash: Digest = azks.get_root_hash::(&manager).await?; 77 | let mut verified = computed_start_root_hash == start_hash; 78 | azks.latest_epoch = end_epoch - 1; 79 | let updated_inserted = proof 80 | .inserted 81 | .iter() 82 | .map(|x| { 83 | let mut y = *x; 84 | y.value = AzksValue(TC::hash_leaf_with_commitment(x.value, end_epoch).0); 85 | y 86 | }) 87 | .collect(); 88 | azks.batch_insert_nodes::( 89 | &manager, 90 | updated_inserted, 91 | InsertMode::Auditor, 92 | AzksParallelismConfig::default(), 93 | ) 94 | .await?; 95 | let computed_end_root_hash: Digest = azks.get_root_hash::(&manager).await?; 96 | verified = verified && (computed_end_root_hash == end_hash); 97 | if !verified { 98 | return Err(AkdError::AzksErr(AzksError::VerifyAppendOnlyProof)); 99 | } 100 | Ok(()) 101 | } 102 | -------------------------------------------------------------------------------- /akd/src/client.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) Meta Platforms, Inc. and affiliates. 2 | // 3 | // This source code is dual-licensed under either the MIT license found in the 4 | // LICENSE-MIT file in the root directory of this source tree or the Apache 5 | // License, Version 2.0 found in the LICENSE-APACHE file in the root directory 6 | // of this source tree. You may select, at your option, one of the above-listed licenses. 7 | 8 | //! Code for a client of a auditable key directory 9 | 10 | // Just re-export the verification calls here 11 | pub use akd_core::verify::*; 12 | -------------------------------------------------------------------------------- /akd/src/helper_structs.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) Meta Platforms, Inc. and affiliates. 2 | // 3 | // This source code is dual-licensed under either the MIT license found in the 4 | // LICENSE-MIT file in the root directory of this source tree or the Apache 5 | // License, Version 2.0 found in the LICENSE-APACHE file in the root directory 6 | // of this source tree. You may select, at your option, one of the above-listed licenses. 7 | 8 | //! Helper structs that are used for various data structures, 9 | //! to make it easier to pass arguments around. 10 | 11 | use crate::Digest; 12 | use crate::{storage::types::ValueState, NodeLabel}; 13 | 14 | /// Root hash of the tree and its associated epoch 15 | #[derive(Debug, Clone, Hash, PartialEq, Eq)] 16 | pub struct EpochHash(pub u64, pub Digest); 17 | 18 | impl EpochHash { 19 | /// Get the contained epoch 20 | pub fn epoch(&self) -> u64 { 21 | self.0 22 | } 23 | /// Get the contained hash 24 | pub fn hash(&self) -> Digest { 25 | self.1 26 | } 27 | } 28 | 29 | #[derive(Clone, Debug)] 30 | /// Info needed for a lookup of a user for an epoch 31 | pub struct LookupInfo { 32 | pub(crate) value_state: ValueState, 33 | pub(crate) marker_version: u64, 34 | pub(crate) existent_label: NodeLabel, 35 | pub(crate) marker_label: NodeLabel, 36 | pub(crate) non_existent_label: NodeLabel, 37 | } 38 | -------------------------------------------------------------------------------- /akd/src/storage/cache/mod.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) Meta Platforms, Inc. and affiliates. 2 | // 3 | // This source code is dual-licensed under either the MIT license found in the 4 | // LICENSE-MIT file in the root directory of this source tree or the Apache 5 | // License, Version 2.0 found in the LICENSE-APACHE file in the root directory 6 | // of this source tree. You may select, at your option, one of the above-listed licenses. 7 | 8 | //! This module handles the caching implementation and testing for a time-based cache 9 | //! which supports memory pressure shedding 10 | 11 | use crate::storage::DbRecord; 12 | use std::time::Instant; 13 | 14 | #[cfg(test)] 15 | mod tests; 16 | 17 | /// items live for 30s by default 18 | pub(crate) const DEFAULT_ITEM_LIFETIME_MS: u64 = 30000; 19 | /// clean the cache every 15s by default 20 | pub(crate) const DEFAULT_CACHE_CLEAN_FREQUENCY_MS: u64 = 15000; 21 | 22 | pub(crate) struct CachedItem { 23 | pub(crate) expiration: Instant, 24 | pub(crate) data: DbRecord, 25 | } 26 | 27 | impl akd_core::SizeOf for CachedItem { 28 | fn size_of(&self) -> usize { 29 | // the size of an "Instant" varies based on the underlying implementation, so 30 | // we assume the largest which is 16 bytes on linux 31 | 16 + self.data.size_of() 32 | } 33 | } 34 | 35 | // -------- sub modules -------- // 36 | 37 | pub mod high_parallelism; 38 | 39 | // -------- cache exports -------- // 40 | 41 | pub use high_parallelism::TimedCache; 42 | -------------------------------------------------------------------------------- /akd/src/storage/cache/tests.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) Meta Platforms, Inc. and affiliates. 2 | // 3 | // This source code is dual-licensed under either the MIT license found in the 4 | // LICENSE-MIT file in the root directory of this source tree or the Apache 5 | // License, Version 2.0 found in the LICENSE-APACHE file in the root directory 6 | // of this source tree. You may select, at your option, one of the above-listed licenses. 7 | 8 | //! Caching tests 9 | 10 | use super::*; 11 | use std::time::Duration; 12 | 13 | use crate::storage::types::{ValueState, ValueStateKey}; 14 | use crate::storage::DbRecord; 15 | use crate::{AkdLabel, AkdValue, NodeLabel}; 16 | 17 | #[tokio::test] 18 | async fn test_cache_put_and_expires() { 19 | let cache = TimedCache::new( 20 | Some(Duration::from_millis(10)), 21 | None, 22 | Some(Duration::from_millis(50)), 23 | ); 24 | 25 | let value_state = DbRecord::ValueState(ValueState { 26 | epoch: 1, 27 | version: 1, 28 | label: NodeLabel { 29 | label_len: 1, 30 | label_val: [0u8; 32], 31 | }, 32 | value: AkdValue::from("some value"), 33 | username: AkdLabel::from("user"), 34 | }); 35 | let key = ValueStateKey(AkdLabel::from("user").0.to_vec(), 1); 36 | cache.put(&value_state).await; 37 | 38 | let got = cache.hit_test::(&key).await; 39 | assert!(got.is_some()); 40 | assert_eq!(Some(value_state), got); 41 | 42 | tokio::time::sleep(tokio::time::Duration::from_millis(100)).await; 43 | let got = cache.hit_test::(&key).await; 44 | assert_eq!(None, got); 45 | } 46 | 47 | #[tokio::test] 48 | async fn test_cache_overwrite() { 49 | let cache = TimedCache::new(Some(Duration::from_millis(1000)), None, None); 50 | 51 | let value_state = ValueState { 52 | epoch: 1, 53 | version: 1, 54 | label: NodeLabel { 55 | label_len: 1, 56 | label_val: [0u8; 32], 57 | }, 58 | value: AkdValue::from("some value"), 59 | username: AkdLabel::from("user"), 60 | }; 61 | let key = ValueStateKey(AkdLabel::from("user").0.to_vec(), 1); 62 | 63 | let value_state_2 = ValueState { 64 | epoch: 1, 65 | version: 2, 66 | label: NodeLabel { 67 | label_len: 2, 68 | label_val: [0u8; 32], 69 | }, 70 | value: AkdValue::from("some value"), 71 | username: AkdLabel::from("user"), 72 | }; 73 | cache.put(&DbRecord::ValueState(value_state)).await; 74 | cache 75 | .put(&DbRecord::ValueState(value_state_2.clone())) 76 | .await; 77 | 78 | let got = cache.hit_test::(&key).await; 79 | assert_eq!(Some(DbRecord::ValueState(value_state_2)), got); 80 | } 81 | 82 | #[tokio::test] 83 | async fn test_cache_memory_pressure() { 84 | let cache = TimedCache::new( 85 | Some(Duration::from_millis(1000)), 86 | Some(10), 87 | Some(Duration::from_millis(50)), 88 | ); 89 | 90 | let value_state = DbRecord::ValueState(ValueState { 91 | epoch: 1, 92 | version: 1, 93 | label: NodeLabel { 94 | label_len: 1, 95 | label_val: [0u8; 32], 96 | }, 97 | value: AkdValue::from("some value"), 98 | username: AkdLabel::from("user"), 99 | }); 100 | let key = ValueStateKey(AkdLabel::from("user").0.to_vec(), 1); 101 | cache.put(&value_state).await; 102 | 103 | // we only do an "automated" clean every 50ms in test, which is when memory pressure is evaluated. 104 | // 100ms will make sure the clean op will run on the next `hit_test` op 105 | tokio::time::sleep(tokio::time::Duration::from_millis(100)).await; 106 | // This get should return none, even though the cache expiration time is 1s. This is because 107 | // we should exceed 10 bytes of storage utilization so the cache should clean the item. 108 | let got = cache.hit_test::(&key).await; 109 | assert_eq!(None, got); 110 | } 111 | 112 | #[tokio::test] 113 | async fn test_many_memory_pressure() { 114 | let cache = TimedCache::new( 115 | Some(Duration::from_millis(1000)), 116 | Some(1024 * 5), 117 | Some(Duration::from_millis(50)), 118 | ); 119 | 120 | let value_states = (1..100) 121 | .map(|i| ValueState { 122 | epoch: i as u64, 123 | version: i as u64, 124 | label: NodeLabel { 125 | label_len: 1, 126 | label_val: [0u8; 32], 127 | }, 128 | value: AkdValue::from("test"), 129 | username: AkdLabel::from("user"), 130 | }) 131 | .map(DbRecord::ValueState) 132 | .collect::>(); 133 | 134 | cache.batch_put(&value_states).await; 135 | 136 | // we only do an "automated" clean every 50ms in test, which is when memory pressure is evaluated. 137 | // 100ms will make sure the clean op will run on the next `hit_test` op 138 | tokio::time::sleep(tokio::time::Duration::from_millis(100)).await; 139 | 140 | let all = cache.get_all().await; 141 | assert!(all.len() < 99); 142 | } 143 | -------------------------------------------------------------------------------- /akd/src/storage/mod.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) Meta Platforms, Inc. and affiliates. 2 | // 3 | // This source code is dual-licensed under either the MIT license found in the 4 | // LICENSE-MIT file in the root directory of this source tree or the Apache 5 | // License, Version 2.0 found in the LICENSE-APACHE file in the root directory 6 | // of this source tree. You may select, at your option, one of the above-listed licenses. 7 | 8 | //! Storage module for a auditable key directory 9 | 10 | use crate::errors::StorageError; 11 | use crate::storage::types::{DbRecord, StorageType}; 12 | use crate::{AkdLabel, AkdValue}; 13 | 14 | use async_trait::async_trait; 15 | #[cfg(feature = "serde_serialization")] 16 | use serde::{de::DeserializeOwned, Serialize}; 17 | use std::collections::HashMap; 18 | use std::hash::Hash; 19 | use std::marker::{Send, Sync}; 20 | 21 | pub mod cache; 22 | pub mod transaction; 23 | pub mod types; 24 | 25 | /* 26 | Various implementations supported by the library are imported here and usable at various checkpoints 27 | */ 28 | pub mod manager; 29 | pub mod memory; 30 | 31 | pub use manager::StorageManager; 32 | 33 | #[cfg(any(test, feature = "public_tests"))] 34 | pub mod tests; 35 | 36 | /// Denotes the "state" when a batch_set is being called in the data layer 37 | pub enum DbSetState { 38 | /// Being called as part of a transaction commit operation 39 | TransactionCommit, 40 | /// Being called as a general, in-line operation 41 | General, 42 | } 43 | 44 | /// Storable represents an _item_ which can be stored in the storage layer 45 | #[cfg(feature = "serde_serialization")] 46 | pub trait Storable: Clone + Serialize + DeserializeOwned + Sync + 'static { 47 | /// This particular storage will have a key type 48 | type StorageKey: Clone + Serialize + Eq + Hash + Send + Sync + std::fmt::Debug; 49 | 50 | /// Must return a valid storage type 51 | fn data_type() -> StorageType; 52 | 53 | /// Retrieve an instance of the id of this storable. The combination of the 54 | /// storable's StorageType and this id are _globally_ unique 55 | fn get_id(&self) -> Self::StorageKey; 56 | 57 | /// Retrieve the full binary version of a key (for comparisons) 58 | fn get_full_binary_id(&self) -> Vec { 59 | Self::get_full_binary_key_id(&self.get_id()) 60 | } 61 | 62 | /// Retrieve the full binary version of a key (for comparisons) 63 | fn get_full_binary_key_id(key: &Self::StorageKey) -> Vec; 64 | 65 | /// Reformat a key from the full-binary specification 66 | fn key_from_full_binary(bin: &[u8]) -> Result; 67 | } 68 | 69 | /// Storable represents an _item_ which can be stored in the storage layer 70 | #[cfg(not(feature = "serde_serialization"))] 71 | pub trait Storable: Clone + Sync + 'static { 72 | /// This particular storage will have a key type 73 | type StorageKey: Clone + Eq + Hash + Send + Sync + std::fmt::Debug; 74 | 75 | /// Must return a valid storage type 76 | fn data_type() -> StorageType; 77 | 78 | /// Retrieve an instance of the id of this storable. The combination of the 79 | /// storable's StorageType and this id are _globally_ unique 80 | fn get_id(&self) -> Self::StorageKey; 81 | 82 | /// Retrieve the full binary version of a key (for comparisons) 83 | fn get_full_binary_id(&self) -> Vec { 84 | Self::get_full_binary_key_id(&self.get_id()) 85 | } 86 | 87 | /// Retrieve the full binary version of a key (for comparisons) 88 | fn get_full_binary_key_id(key: &Self::StorageKey) -> Vec; 89 | 90 | /// Reformat a key from the full-binary specification 91 | fn key_from_full_binary(bin: &[u8]) -> Result; 92 | } 93 | 94 | /// A database implementation backing storage for the AKD 95 | #[async_trait] 96 | pub trait Database: Send + Sync { 97 | /// Set a record in the database 98 | async fn set(&self, record: DbRecord) -> Result<(), StorageError>; 99 | 100 | /// Set multiple records in the database with a minimal set of operations 101 | async fn batch_set( 102 | &self, 103 | records: Vec, 104 | state: DbSetState, 105 | ) -> Result<(), StorageError>; 106 | 107 | /// Retrieve a stored record from the database 108 | async fn get(&self, id: &St::StorageKey) -> Result; 109 | 110 | /// Retrieve a batch of records by id from the database 111 | async fn batch_get( 112 | &self, 113 | ids: &[St::StorageKey], 114 | ) -> Result, StorageError>; 115 | 116 | /* User data searching */ 117 | 118 | /// Retrieve the user data for a given user 119 | async fn get_user_data(&self, username: &AkdLabel) -> Result; 120 | 121 | /// Retrieve a specific state for a given user 122 | async fn get_user_state( 123 | &self, 124 | username: &AkdLabel, 125 | flag: types::ValueStateRetrievalFlag, 126 | ) -> Result; 127 | 128 | /// Retrieve the user -> state version mapping in bulk. This is the same as get_user_states but with less data retrieved from the storage layer 129 | async fn get_user_state_versions( 130 | &self, 131 | usernames: &[AkdLabel], 132 | flag: types::ValueStateRetrievalFlag, 133 | ) -> Result, StorageError>; 134 | } 135 | 136 | /// Optional storage layer utility functions for debug and test purposes 137 | #[async_trait] 138 | pub trait StorageUtil: Database { 139 | /// Retrieves all stored records of a given type from the data layer, ignoring any caching or transaction pending 140 | async fn batch_get_type_direct(&self) -> Result, StorageError>; 141 | 142 | /// Retrieves all stored records from the data layer, ignoring any caching or transaction pending 143 | async fn batch_get_all_direct(&self) -> Result, StorageError>; 144 | } 145 | -------------------------------------------------------------------------------- /akd/src/test_utils.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) Meta Platforms, Inc. and affiliates. 2 | // 3 | // This source code is dual-licensed under either the MIT license found in the 4 | // LICENSE-MIT file in the root directory of this source tree or the Apache 5 | // License, Version 2.0 found in the LICENSE-APACHE file in the root directory 6 | // of this source tree. You may select, at your option, one of the above-listed licenses. 7 | 8 | //! This module contains common test utilities for crates generating tests utilizing the 9 | //! AKD crate 10 | 11 | use colored::*; 12 | use log::{Level, Metadata, Record}; 13 | use once_cell::sync::OnceCell; 14 | use std::sync::Once; 15 | use std::time::{Duration, Instant}; 16 | 17 | static EPOCH: OnceCell = OnceCell::new(); 18 | static LOGGER: TestConsoleLogger = TestConsoleLogger {}; 19 | static INIT_ONCE: Once = Once::new(); 20 | 21 | pub(crate) struct TestConsoleLogger; 22 | 23 | impl TestConsoleLogger { 24 | pub(crate) fn format_log_record(record: &Record) { 25 | let target = { 26 | if let Some(target_str) = record.target().split(':').next_back() { 27 | if let Some(line) = record.line() { 28 | format!(" ({target_str}:{line})") 29 | } else { 30 | format!(" ({target_str})") 31 | } 32 | } else { 33 | "".to_string() 34 | } 35 | }; 36 | 37 | let toc = if let Some(epoch) = EPOCH.get() { 38 | Instant::now() - *epoch 39 | } else { 40 | Duration::from_millis(0) 41 | }; 42 | 43 | let seconds = toc.as_secs(); 44 | let hours = seconds / 3600; 45 | let minutes = (seconds / 60) % 60; 46 | let seconds = seconds % 60; 47 | let miliseconds = toc.subsec_millis(); 48 | 49 | let msg = format!( 50 | "[{:02}:{:02}:{:02}.{:03}] {:6} {}{}", 51 | hours, 52 | minutes, 53 | seconds, 54 | miliseconds, 55 | record.level(), 56 | record.args(), 57 | target 58 | ); 59 | let msg = match record.level() { 60 | Level::Trace | Level::Debug => msg.white(), 61 | Level::Info => msg.blue(), 62 | Level::Warn => msg.yellow(), 63 | Level::Error => msg.red(), 64 | }; 65 | println!("{msg}"); 66 | } 67 | } 68 | 69 | impl log::Log for TestConsoleLogger { 70 | fn enabled(&self, _metadata: &Metadata) -> bool { 71 | true 72 | } 73 | 74 | fn log(&self, record: &Record) { 75 | if !self.enabled(record.metadata()) { 76 | return; 77 | } 78 | TestConsoleLogger::format_log_record(record); 79 | } 80 | 81 | fn flush(&self) {} 82 | } 83 | 84 | /// Initialize the logger for console logging within test environments. 85 | /// This is safe to call multiple times, but it will only initialize the logger 86 | /// to the log-level _first_ set. If you want a specific log-level (e.g. Debug) 87 | /// for a specific test, make sure to only run that single test after editing that 88 | /// test's log-level. 89 | /// 90 | /// The default level applied everywhere is Info 91 | pub fn init_logger(level: Level) { 92 | EPOCH.get_or_init(Instant::now); 93 | 94 | INIT_ONCE.call_once(|| { 95 | log::set_logger(&LOGGER) 96 | .map(|()| log::set_max_level(level.to_level_filter())) 97 | .unwrap(); 98 | }); 99 | } 100 | 101 | /// Global test startup constructor. Only runs in the TEST profile. Each 102 | /// crate which wants logging enabled in tests being run should make this call 103 | /// itself. 104 | #[cfg(test)] 105 | #[ctor::ctor] 106 | fn test_start() { 107 | init_logger(Level::Info); 108 | } 109 | -------------------------------------------------------------------------------- /akd/src/tests/mod.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) Meta Platforms, Inc. and affiliates. 2 | // 3 | // This source code is dual-licensed under either the MIT license found in the 4 | // LICENSE-MIT file in the root directory of this source tree or the Apache 5 | // License, Version 2.0 found in the LICENSE-APACHE file in the root directory 6 | // of this source tree. You may select, at your option, one of the above-listed licenses. 7 | 8 | //! Contains the tests for the high-level API (directory, auditor, client) 9 | 10 | mod test_core_protocol; 11 | mod test_errors; 12 | mod test_preloads; 13 | 14 | use std::collections::HashMap; 15 | 16 | use crate::{ 17 | errors::StorageError, 18 | storage::{ 19 | memory::AsyncInMemoryDatabase, 20 | types::{DbRecord, KeyData, ValueState, ValueStateRetrievalFlag}, 21 | Database, DbSetState, Storable, 22 | }, 23 | tree_node::TreeNodeWithPreviousValue, 24 | AkdLabel, AkdValue, Azks, 25 | }; 26 | 27 | // Below contains the mock code for constructing a `MockLocalDatabase` 28 | 29 | #[allow(dead_code)] 30 | #[derive(Clone)] 31 | pub struct LocalDatabase; 32 | 33 | unsafe impl Send for LocalDatabase {} 34 | 35 | unsafe impl Sync for LocalDatabase {} 36 | 37 | // Note that this macro produces a `MockLocalDatabase` struct 38 | mockall::mock! { 39 | pub LocalDatabase { 40 | 41 | } 42 | impl Clone for LocalDatabase { 43 | fn clone(&self) -> Self; 44 | } 45 | #[async_trait::async_trait] 46 | impl Database for LocalDatabase { 47 | async fn set(&self, record: DbRecord) -> Result<(), StorageError>; 48 | async fn batch_set( 49 | &self, 50 | records: Vec, 51 | state: DbSetState, 52 | ) -> Result<(), StorageError>; 53 | async fn get(&self, id: &St::StorageKey) -> Result; 54 | async fn batch_get( 55 | &self, 56 | ids: &[St::StorageKey], 57 | ) -> Result, StorageError>; 58 | async fn get_user_data(&self, username: &AkdLabel) -> Result; 59 | async fn get_user_state( 60 | &self, 61 | username: &AkdLabel, 62 | flag: ValueStateRetrievalFlag, 63 | ) -> Result; 64 | async fn get_user_state_versions( 65 | &self, 66 | usernames: &[AkdLabel], 67 | flag: ValueStateRetrievalFlag, 68 | ) -> Result, StorageError>; 69 | } 70 | } 71 | 72 | fn setup_mocked_db(db: &mut MockLocalDatabase, test_db: &AsyncInMemoryDatabase) { 73 | // ===== Set ===== // 74 | let tmp_db = test_db.clone(); 75 | db.expect_set() 76 | .returning(move |record| futures::executor::block_on(tmp_db.set(record))); 77 | 78 | // ===== Batch Set ===== // 79 | let tmp_db = test_db.clone(); 80 | db.expect_batch_set().returning(move |record, other| { 81 | futures::executor::block_on(tmp_db.batch_set(record, other)) 82 | }); 83 | 84 | // ===== Get ===== // 85 | let tmp_db = test_db.clone(); 86 | db.expect_get::() 87 | .returning(move |key| futures::executor::block_on(tmp_db.get::(key))); 88 | 89 | let tmp_db = test_db.clone(); 90 | db.expect_get::() 91 | .returning(move |key| { 92 | futures::executor::block_on(tmp_db.get::(key)) 93 | }); 94 | 95 | let tmp_db = test_db.clone(); 96 | db.expect_get::() 97 | .returning(move |key| futures::executor::block_on(tmp_db.get::(key))); 98 | 99 | // ===== Batch Get ===== // 100 | let tmp_db = test_db.clone(); 101 | db.expect_batch_get::() 102 | .returning(move |key| futures::executor::block_on(tmp_db.batch_get::(key))); 103 | 104 | let tmp_db = test_db.clone(); 105 | db.expect_batch_get::() 106 | .returning(move |key| { 107 | futures::executor::block_on(tmp_db.batch_get::(key)) 108 | }); 109 | 110 | // ===== Get User Data ===== // 111 | let tmp_db = test_db.clone(); 112 | db.expect_get_user_data() 113 | .returning(move |arg| futures::executor::block_on(tmp_db.get_user_data(arg))); 114 | 115 | // ===== Get User State ===== // 116 | let tmp_db = test_db.clone(); 117 | db.expect_get_user_state() 118 | .returning(move |arg, flag| futures::executor::block_on(tmp_db.get_user_state(arg, flag))); 119 | 120 | // ===== Get User State Versions ===== // 121 | let tmp_db = test_db.clone(); 122 | db.expect_get_user_state_versions() 123 | .returning(move |arg, flag| { 124 | futures::executor::block_on(tmp_db.get_user_state_versions(arg, flag)) 125 | }); 126 | } 127 | -------------------------------------------------------------------------------- /akd/src/tests/test_preloads.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) Meta Platforms, Inc. and affiliates. 2 | // 3 | // This source code is dual-licensed under either the MIT license found in the 4 | // LICENSE-MIT file in the root directory of this source tree or the Apache 5 | // License, Version 2.0 found in the LICENSE-APACHE file in the root directory 6 | // of this source tree. You may select, at your option, one of the above-listed licenses. 7 | 8 | //! Contains the tests for ensuring that preloading of nodes works as intended 9 | 10 | use akd_core::configuration::Configuration; 11 | 12 | use crate::{ 13 | append_only_zks::AzksParallelismConfig, 14 | directory::Directory, 15 | ecvrf::HardCodedAkdVRF, 16 | errors::{AkdError, StorageError}, 17 | storage::{manager::StorageManager, memory::AsyncInMemoryDatabase}, 18 | test_config, 19 | tests::{setup_mocked_db, MockLocalDatabase}, 20 | tree_node::TreeNodeWithPreviousValue, 21 | AkdLabel, AkdValue, 22 | }; 23 | 24 | test_config!(test_publish_op_makes_no_get_requests); 25 | async fn test_publish_op_makes_no_get_requests() -> Result<(), AkdError> { 26 | let test_db = AsyncInMemoryDatabase::new(); 27 | 28 | let mut db = MockLocalDatabase { 29 | ..Default::default() 30 | }; 31 | setup_mocked_db(&mut db, &test_db); 32 | 33 | let storage = StorageManager::new_no_cache(db); 34 | let vrf = HardCodedAkdVRF {}; 35 | let akd = Directory::::new(storage, vrf, AzksParallelismConfig::default()) 36 | .await 37 | .expect("Failed to create directory"); 38 | 39 | // Create a set with 2 updates, (label, value) pairs 40 | // ("hello10", "hello10") 41 | // ("hello11", "hello11") 42 | let mut updates = vec![]; 43 | for i in 0..2 { 44 | updates.push(( 45 | AkdLabel(format!("hello1{i}").as_bytes().to_vec()), 46 | AkdValue(format!("hello1{i}").as_bytes().to_vec()), 47 | )); 48 | } 49 | // Publish the updates. Now the akd's epoch will be 1. 50 | akd.publish(updates) 51 | .await 52 | .expect("Failed to do initial publish"); 53 | 54 | // create a new mock, this time which explodes on any "get" of tree-nodes (shouldn't happen). It is still backed by the same 55 | // async in-mem db so all previous data should be there 56 | let mut db2 = MockLocalDatabase { 57 | ..Default::default() 58 | }; 59 | setup_mocked_db(&mut db2, &test_db); 60 | db2.expect_get::() 61 | .returning(|_| Err(StorageError::Other("Boom!".to_string()))); 62 | 63 | let storage = StorageManager::new_no_cache(db2); 64 | let vrf = HardCodedAkdVRF {}; 65 | let akd = Directory::::new(storage, vrf, AzksParallelismConfig::default()) 66 | .await 67 | .expect("Failed to create directory"); 68 | 69 | // create more updates 70 | let mut updates = vec![]; 71 | for i in 0..2 { 72 | updates.push(( 73 | AkdLabel(format!("hello1{i}").as_bytes().to_vec()), 74 | AkdValue(format!("hello1{}", i + 1).as_bytes().to_vec()), 75 | )); 76 | } 77 | 78 | // try to publish again, this time with the "boom" returning from any mocked get-calls 79 | // on tree nodes 80 | akd.publish(updates) 81 | .await 82 | .expect("Failed to do subsequent publish"); 83 | 84 | Ok(()) 85 | } 86 | -------------------------------------------------------------------------------- /akd/src/utils.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) Meta Platforms, Inc. and affiliates. 2 | // 3 | // This source code is dual-licensed under either the MIT license found in the 4 | // LICENSE-MIT file in the root directory of this source tree or the Apache 5 | // License, Version 2.0 found in the LICENSE-APACHE file in the root directory 6 | // of this source tree. You may select, at your option, one of the above-listed licenses. 7 | 8 | // 1. Create a hashmap of all prefixes of all elements of the node set 9 | // 2. For each node in current_nodes set, check if each child is in prefix hashmap 10 | // 3. If so, add child label to batch set 11 | 12 | // Creates a byte array of 32 bytes from a u64 13 | // Note that this representation is big-endian, and 14 | // places the bits to the front of the output byte_array. 15 | #[cfg(any(test, feature = "public_tests"))] 16 | pub(crate) fn byte_arr_from_u64(input_int: u64) -> [u8; 32] { 17 | let mut output_arr = [0u8; 32]; 18 | let input_arr = input_int.to_be_bytes(); 19 | output_arr[..8].clone_from_slice(&input_arr[..8]); 20 | output_arr 21 | } 22 | 23 | #[allow(unused)] 24 | #[cfg(any(test, feature = "public_tests"))] 25 | pub(crate) fn random_label(rng: &mut impl rand::Rng) -> crate::NodeLabel { 26 | crate::NodeLabel { 27 | label_val: rng.gen::<[u8; 32]>(), 28 | label_len: 256, 29 | } 30 | } 31 | 32 | // NOTE(new_config): Add a new configuration here 33 | 34 | /// Macro used for running tests with different configurations 35 | #[cfg(any(test, feature = "public_tests"))] 36 | #[macro_export] 37 | macro_rules! test_config { 38 | ( $x:ident ) => { 39 | paste::paste! { 40 | #[cfg(feature = "whatsapp_v1")] 41 | #[tokio::test] 42 | async fn [<$x _ whatsapp_v1_config>]() -> Result<(), AkdError> { 43 | $x::<$crate::WhatsAppV1Configuration>().await 44 | } 45 | 46 | #[cfg(feature = "experimental")] 47 | #[tokio::test] 48 | async fn [<$x _ experimental_config>]() -> Result<(), AkdError> { 49 | $x::<$crate::ExperimentalConfiguration<$crate::ExampleLabel>>().await 50 | } 51 | } 52 | }; 53 | } 54 | -------------------------------------------------------------------------------- /akd_core/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "akd_core" 3 | version = "0.12.0-pre.12" 4 | authors = ["akd contributors"] 5 | description = "Core utilities for the akd crate" 6 | license = "MIT OR Apache-2.0" 7 | edition = "2021" 8 | keywords = ["key-transparency", "akd"] 9 | repository = "https://github.com/facebook/akd" 10 | readme = "../README.md" 11 | # Uncomment for automated building of the protobuf Rust sources. Necessary if the .proto specs change 12 | build = "src/build.rs" 13 | 14 | [build-dependencies] 15 | protobuf-codegen = "3" 16 | protobuf-parse = "3" 17 | 18 | [features] 19 | # Disable all STD for the crate 20 | nostd = [] 21 | # Supported configurations 22 | whatsapp_v1 = ["dep:blake3"] 23 | experimental = ["dep:blake3"] 24 | # Include the VRF verification logic 25 | vrf = ["ed25519-dalek", "curve25519-dalek"] 26 | serde_serialization = ["dep:serde", "dep:serde_bytes", "ed25519-dalek/serde"] 27 | # Parallelize VRF calculations during publish 28 | parallel_vrf = ["tokio"] 29 | 30 | bench = ["parallel_vrf", "experimental", "vrf", "tokio/rt-multi-thread"] 31 | public_tests = ["dep:paste"] 32 | protobuf = ["dep:protobuf"] 33 | 34 | # Default features mix 35 | default = ["vrf", "experimental"] 36 | 37 | [dependencies] 38 | ## Required dependencies ## 39 | async-trait = "0.1" 40 | curve25519-dalek = { version = "4", optional = true } 41 | ed25519-dalek = { version = "2", features = [ 42 | "digest", 43 | "legacy_compatibility", 44 | ], optional = true } 45 | hex = "0.4" 46 | zeroize = "1" 47 | 48 | ## Optional dependencies ## 49 | blake3 = { version = "1", optional = true, default-features = false } 50 | protobuf = { version = "3", optional = true } 51 | rand = { version = "0.8", optional = true } 52 | serde = { version = "1", features = ["derive"], optional = true } 53 | serde_bytes = { version = "0.11", optional = true } 54 | tokio = { version = "1", features = ["rt"], optional = true } 55 | paste = { version = "1", optional = true } 56 | 57 | [dev-dependencies] 58 | bincode = "1" 59 | itertools = "0.13" 60 | proptest = "1" 61 | proptest-derive = "0.4" 62 | rand = "0.8" 63 | serde = { version = "1", features = ["derive"] } 64 | criterion = "0.5" 65 | 66 | # To enable the public-tests feature in tests 67 | akd_core = { path = ".", features = ["public_tests"] } 68 | 69 | [[bench]] 70 | name = "parallel_vrfs" 71 | harness = false 72 | required-features = ["bench"] 73 | -------------------------------------------------------------------------------- /akd_core/benches/parallel_vrfs.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) Meta Platforms, Inc. and affiliates. 2 | // 3 | // This source code is dual-licensed under either the MIT license found in the 4 | // LICENSE-MIT file in the root directory of this source tree or the Apache 5 | // License, Version 2.0 found in the LICENSE-APACHE file in the root directory 6 | // of this source tree. You may select, at your option, one of the above-listed licenses. 7 | 8 | //! Benchmarks for parallel vs sequential VRF calculations 9 | 10 | extern crate criterion; 11 | use self::criterion::*; 12 | use akd_core::configuration::NamedConfiguration; 13 | use akd_core::ecvrf::{VRFExpandedPrivateKey, VRFPublicKey}; 14 | use akd_core::VersionFreshness; 15 | use akd_core::{ecvrf::VRFKeyStorage, AkdLabel, AkdValue}; 16 | use rand::distributions::Alphanumeric; 17 | use rand::Rng; 18 | 19 | macro_rules! bench_config { 20 | ( $x:ident ) => { 21 | paste::paste! { 22 | // NOTE(new_config): Add a new configuration here 23 | 24 | #[cfg(feature = "whatsapp_v1")] 25 | fn [<$x _ whatsapp_v1_config>](c: &mut Criterion) { 26 | $x::(c) 27 | } 28 | 29 | #[cfg(feature = "experimental")] 30 | fn [<$x _ experimental_config>](c: &mut Criterion) { 31 | $x::>(c) 32 | } 33 | } 34 | }; 35 | } 36 | 37 | macro_rules! group_config { 38 | ( $( $group:path ),+ $(,)* ) => { 39 | paste::paste! { 40 | // NOTE(new_config): Add a new configuration here 41 | 42 | #[cfg(feature = "whatsapp_v1")] 43 | criterion_group!( 44 | $( 45 | [<$group _ whatsapp_v1_config>], 46 | )+ 47 | ); 48 | 49 | #[cfg(feature = "experimental")] 50 | criterion_group!( 51 | $( 52 | [<$group _ experimental_config>], 53 | )+ 54 | ); 55 | } 56 | }; 57 | } 58 | 59 | group_config!(benches, bench_single_vrf, bench_parallel_vrfs); 60 | 61 | fn main() { 62 | // NOTE(new_config): Add a new configuration here 63 | 64 | #[cfg(feature = "whatsapp_v1")] 65 | benches_whatsapp_v1_config(); 66 | #[cfg(feature = "experimental")] 67 | benches_experimental_config(); 68 | 69 | Criterion::default().configure_from_args().final_summary(); 70 | } 71 | 72 | bench_config!(bench_single_vrf); 73 | fn bench_single_vrf(c: &mut Criterion) { 74 | let rng = rand::rngs::OsRng; 75 | 76 | // Generate a random label 77 | let label = AkdLabel::from( 78 | &rng.sample_iter(&Alphanumeric) 79 | .take(32) 80 | .map(char::from) 81 | .collect::(), 82 | ); 83 | 84 | let runtime = tokio::runtime::Builder::new_multi_thread().build().unwrap(); 85 | let key = runtime 86 | .block_on(akd_core::ecvrf::HardCodedAkdVRF.get_vrf_private_key()) 87 | .unwrap(); 88 | let expanded_key = VRFExpandedPrivateKey::from(&key); 89 | let pk = VRFPublicKey::from(&key); 90 | 91 | c.bench_function( 92 | &format!("Single VRF label generation ({})", TC::name()), 93 | |b| { 94 | b.iter(|| { 95 | akd_core::ecvrf::HardCodedAkdVRF::get_node_label_with_expanded_key::( 96 | &expanded_key, 97 | &pk, 98 | &label, 99 | VersionFreshness::Fresh, 100 | 1, 101 | ); 102 | }) 103 | }, 104 | ); 105 | } 106 | 107 | bench_config!(bench_parallel_vrfs); 108 | fn bench_parallel_vrfs(c: &mut Criterion) { 109 | // utilize all cores available 110 | let runtime = tokio::runtime::Builder::new_multi_thread().build().unwrap(); 111 | // A runtime which is capped at 4 worker threads (cores) 112 | let limited_runtime = tokio::runtime::Builder::new_multi_thread() 113 | .worker_threads(4) 114 | .build() 115 | .unwrap(); 116 | 117 | // generate 1K labels to do VRFs for 118 | let labels = (0..1_000) 119 | .into_iter() 120 | .map(|i| { 121 | let name = format!("user {}", i); 122 | ( 123 | AkdLabel::from(&name), 124 | VersionFreshness::Fresh, 125 | i as u64, 126 | AkdValue::from(&name), 127 | ) 128 | }) 129 | .collect::>(); 130 | let labels_clone = labels.clone(); 131 | 132 | c.bench_function(&format!("Sequential VRFs ({})", TC::name()), |b| { 133 | b.iter(|| { 134 | let key = runtime 135 | .block_on(akd_core::ecvrf::HardCodedAkdVRF.get_vrf_private_key()) 136 | .unwrap(); 137 | let expanded_key = VRFExpandedPrivateKey::from(&key); 138 | let pk = VRFPublicKey::from(&key); 139 | for (label, stale, version, _) in labels.iter() { 140 | akd_core::ecvrf::HardCodedAkdVRF::get_node_label_with_expanded_key::( 141 | &expanded_key, 142 | &pk, 143 | label, 144 | *stale, 145 | *version, 146 | ); 147 | } 148 | }) 149 | }); 150 | 151 | c.bench_function( 152 | &format!("Parallel VRFs (all cores) ({})", TC::name()), 153 | |b| { 154 | b.iter(|| { 155 | runtime.block_on(async { 156 | let vrf = akd_core::ecvrf::HardCodedAkdVRF; 157 | vrf.get_node_labels::(&labels_clone).await.unwrap(); 158 | }) 159 | }) 160 | }, 161 | ); 162 | 163 | c.bench_function(&format!("Parallel VRFs (4 cores) ({})", TC::name()), |b| { 164 | b.iter(|| { 165 | limited_runtime.block_on(async { 166 | let vrf = akd_core::ecvrf::HardCodedAkdVRF; 167 | vrf.get_node_labels::(&labels_clone).await.unwrap(); 168 | }) 169 | }) 170 | }); 171 | } 172 | -------------------------------------------------------------------------------- /akd_core/src/build.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) Meta Platforms, Inc. and affiliates. 2 | // 3 | // This source code is dual-licensed under either the MIT license found in the 4 | // LICENSE-MIT file in the root directory of this source tree or the Apache 5 | // License, Version 2.0 found in the LICENSE-APACHE file in the root directory 6 | // of this source tree. You may select, at your option, one of the above-listed licenses. 7 | 8 | //! This is the pre-compilation build script for the crate `akd_core`. Mainly it's used to compile 9 | //! protobuf files into rust code prior to compilation. 10 | 11 | // NOTE: build.rs documentation = https://doc.rust-lang.org/cargo/reference/build-scripts.html 12 | 13 | /// The shared-path for all protobuf specifications 14 | const PROTOBUF_BASE_DIRECTORY: &str = "src/proto/specs"; 15 | /// The list of protobuf files to generate inside PROBUF_BASE_DIRECTORY 16 | const PROTOBUF_FILES: [&str; 1] = ["types"]; 17 | /// The output directory in the cargo build folder to emit the generated sources to 18 | const PROTOS_OUTPUT_DIR: &str = "protos"; 19 | 20 | fn build_protobufs() { 21 | let mut protobuf_files = Vec::with_capacity(PROTOBUF_FILES.len()); 22 | 23 | for file in PROTOBUF_FILES.iter() { 24 | let proto_file = format!("{PROTOBUF_BASE_DIRECTORY}/{file}.proto"); 25 | println!("cargo:rerun-if-changed={proto_file}"); 26 | protobuf_files.push(proto_file); 27 | } 28 | 29 | // Code generator writes to the output directory 30 | protobuf_codegen::Codegen::new() 31 | .pure() 32 | .includes([PROTOBUF_BASE_DIRECTORY]) 33 | .inputs(&protobuf_files) 34 | .cargo_out_dir(PROTOS_OUTPUT_DIR) 35 | .run_from_script(); 36 | } 37 | 38 | fn main() { 39 | // compile the spec files into Rust code 40 | build_protobufs(); 41 | } 42 | -------------------------------------------------------------------------------- /akd_core/src/configuration/experimental.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) Meta Platforms, Inc. and affiliates. 2 | // 3 | // This source code is dual-licensed under either the MIT license found in the 4 | // LICENSE-MIT file in the root directory of this source tree or the Apache 5 | // License, Version 2.0 found in the LICENSE-APACHE file in the root directory 6 | // of this source tree. You may select, at your option, one of the above-listed licenses. 7 | 8 | //! Defines the current (experimental) configuration 9 | 10 | use core::marker::PhantomData; 11 | 12 | use super::traits::DomainLabel; 13 | use crate::configuration::Configuration; 14 | use crate::hash::{Digest, DIGEST_BYTES}; 15 | use crate::utils::i2osp_array; 16 | use crate::{AkdLabel, AkdValue, AzksValue, AzksValueWithEpoch, NodeLabel, VersionFreshness}; 17 | 18 | #[cfg(feature = "nostd")] 19 | use alloc::vec::Vec; 20 | 21 | /// An experimental configuration 22 | #[derive(Clone)] 23 | pub struct ExperimentalConfiguration(PhantomData); 24 | 25 | unsafe impl Send for ExperimentalConfiguration {} 26 | unsafe impl Sync for ExperimentalConfiguration {} 27 | 28 | impl ExperimentalConfiguration { 29 | /// Used by the client to supply a commitment nonce and value to reconstruct the commitment, via: 30 | /// commitment = H(i2osp_array(value), i2osp_array(nonce)) 31 | fn generate_commitment_from_nonce_client(value: &crate::AkdValue, nonce: &[u8]) -> AzksValue { 32 | AzksValue(::hash( 33 | &[i2osp_array(value), i2osp_array(nonce)].concat(), 34 | )) 35 | } 36 | } 37 | 38 | impl Configuration for ExperimentalConfiguration { 39 | fn hash(item: &[u8]) -> crate::hash::Digest { 40 | // Hash(domain label || item) 41 | let mut hasher = blake3::Hasher::new(); 42 | hasher.update(L::domain_label()); 43 | hasher.update(item); 44 | hasher.finalize().into() 45 | } 46 | 47 | fn empty_root_value() -> AzksValue { 48 | AzksValue([0u8; 32]) 49 | } 50 | 51 | fn empty_node_hash() -> AzksValue { 52 | AzksValue([0u8; 32]) 53 | } 54 | 55 | fn hash_leaf_with_value( 56 | value: &crate::AkdValue, 57 | epoch: u64, 58 | nonce: &[u8], 59 | ) -> AzksValueWithEpoch { 60 | let commitment = Self::generate_commitment_from_nonce_client(value, nonce); 61 | Self::hash_leaf_with_commitment(commitment, epoch) 62 | } 63 | 64 | fn hash_leaf_with_commitment(commitment: AzksValue, epoch: u64) -> AzksValueWithEpoch { 65 | let mut data = [0; DIGEST_BYTES + 8]; 66 | data[..DIGEST_BYTES].copy_from_slice(&commitment.0); 67 | data[DIGEST_BYTES..].copy_from_slice(&epoch.to_be_bytes()); 68 | AzksValueWithEpoch(Self::hash(&data)) 69 | } 70 | 71 | /// Used by the server to produce a commitment nonce for an AkdLabel, version, and AkdValue. 72 | /// Computes nonce = H(commitment key || label) 73 | fn get_commitment_nonce( 74 | commitment_key: &[u8], 75 | label: &NodeLabel, 76 | _version: u64, 77 | _value: &AkdValue, 78 | ) -> Digest { 79 | Self::hash(&[commitment_key, &label.to_bytes()].concat()) 80 | } 81 | 82 | /// Used by the server to produce a commitment for an AkdLabel, version, and AkdValue 83 | /// 84 | /// nonce = H(commitment key || label) 85 | /// commmitment = H(i2osp_array(value), i2osp_array(nonce)) 86 | /// 87 | /// The nonce value is used to create a hiding and binding commitment using a 88 | /// cryptographic hash function. Note that it is derived from the label, version, and 89 | /// value (even though the binding to value is somewhat optional). 90 | /// 91 | /// Note that this commitment needs to be a hash function (random oracle) output 92 | fn compute_fresh_azks_value( 93 | commitment_key: &[u8], 94 | label: &NodeLabel, 95 | version: u64, 96 | value: &AkdValue, 97 | ) -> AzksValue { 98 | let nonce = Self::get_commitment_nonce(commitment_key, label, version, value); 99 | AzksValue(Self::hash( 100 | &[i2osp_array(value), i2osp_array(&nonce)].concat(), 101 | )) 102 | } 103 | 104 | /// To convert a regular label (arbitrary string of bytes) into a [NodeLabel], we compute the 105 | /// output as: H(label || freshness || version) 106 | /// 107 | /// Specifically, we concatenate the following together: 108 | /// - I2OSP(len(label) as u64, label) 109 | /// - A single byte encoded as 0u8 if "stale", 1u8 if "fresh" 110 | /// - A u64 representing the version 111 | /// 112 | /// These are all interpreted as a single byte array and hashed together, with the output 113 | /// of the hash returned. 114 | fn get_hash_from_label_input( 115 | label: &AkdLabel, 116 | freshness: VersionFreshness, 117 | version: u64, 118 | ) -> Vec { 119 | let freshness_bytes = [freshness as u8]; 120 | let hashed_label = Self::hash( 121 | &[ 122 | &crate::utils::i2osp_array(label)[..], 123 | &freshness_bytes, 124 | &version.to_be_bytes(), 125 | ] 126 | .concat(), 127 | ); 128 | hashed_label.to_vec() 129 | } 130 | 131 | /// Computes the parent hash from the children hashes and labels 132 | fn compute_parent_hash_from_children( 133 | left_val: &AzksValue, 134 | left_label: &[u8], 135 | right_val: &AzksValue, 136 | right_label: &[u8], 137 | ) -> AzksValue { 138 | AzksValue(Self::hash( 139 | &[&left_val.0, left_label, &right_val.0, right_label].concat(), 140 | )) 141 | } 142 | 143 | /// Given the top-level hash, compute the "actual" root hash that is published 144 | /// by the directory maintainer 145 | fn compute_root_hash_from_val(root_val: &AzksValue) -> Digest { 146 | root_val.0 147 | } 148 | 149 | /// Similar to commit_fresh_value, but used for stale values. 150 | fn stale_azks_value() -> AzksValue { 151 | AzksValue(crate::hash::EMPTY_DIGEST) 152 | } 153 | 154 | fn compute_node_label_value(bytes: &[u8]) -> Vec { 155 | bytes.to_vec() 156 | } 157 | 158 | fn empty_label() -> NodeLabel { 159 | NodeLabel { 160 | label_val: [ 161 | 1u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 162 | 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 163 | ], 164 | label_len: 0, 165 | } 166 | } 167 | } 168 | 169 | #[cfg(feature = "public_tests")] 170 | impl super::traits::NamedConfiguration for ExperimentalConfiguration { 171 | fn name() -> &'static str { 172 | "experimental" 173 | } 174 | } 175 | -------------------------------------------------------------------------------- /akd_core/src/configuration/mod.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) Meta Platforms, Inc. and affiliates. 2 | // 3 | // This source code is dual-licensed under either the MIT license found in the 4 | // LICENSE-MIT file in the root directory of this source tree or the Apache 5 | // License, Version 2.0 found in the LICENSE-APACHE file in the root directory 6 | // of this source tree. You may select, at your option, one of the above-listed licenses. 7 | 8 | //! Defines the configuration trait and implementations for various configurations 9 | 10 | mod traits; 11 | pub use traits::{Configuration, DomainLabel, ExampleLabel}; 12 | 13 | #[cfg(feature = "public_tests")] 14 | pub use traits::NamedConfiguration; 15 | 16 | // Note(new_config): Update this when adding a new configuration 17 | 18 | #[cfg(feature = "whatsapp_v1")] 19 | pub(crate) mod whatsapp_v1; 20 | #[cfg(feature = "whatsapp_v1")] 21 | pub use whatsapp_v1::WhatsAppV1Configuration; 22 | 23 | #[cfg(feature = "experimental")] 24 | pub(crate) mod experimental; 25 | #[cfg(feature = "experimental")] 26 | pub use experimental::ExperimentalConfiguration; 27 | -------------------------------------------------------------------------------- /akd_core/src/configuration/traits.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) Meta Platforms, Inc. and affiliates. 2 | // 3 | // This source code is dual-licensed under either the MIT license found in the 4 | // LICENSE-MIT file in the root directory of this source tree or the Apache 5 | // License, Version 2.0 found in the LICENSE-APACHE file in the root directory 6 | // of this source tree. You may select, at your option, one of the above-listed licenses. 7 | 8 | //! Defines the configuration trait for customizing the directory's cryptographic operations 9 | 10 | use crate::hash::Digest; 11 | use crate::{AkdLabel, AkdValue, AzksValue, AzksValueWithEpoch, NodeLabel, VersionFreshness}; 12 | 13 | #[cfg(feature = "nostd")] 14 | use alloc::vec::Vec; 15 | 16 | /// Trait for specifying a domain separation label that should be specific to the 17 | /// application 18 | pub trait DomainLabel: Clone + 'static { 19 | /// Returns a label, which is used as a domain separator when computing hashes 20 | fn domain_label() -> &'static [u8]; 21 | } 22 | 23 | /// An example domain separation label (this should not be used in a production setting!) 24 | #[derive(Clone)] 25 | pub struct ExampleLabel; 26 | 27 | impl DomainLabel for ExampleLabel { 28 | fn domain_label() -> &'static [u8] { 29 | "ExampleLabel".as_bytes() 30 | } 31 | } 32 | 33 | /// Trait for customizing the directory's cryptographic operations 34 | pub trait Configuration: Clone + Send + Sync + 'static { 35 | /// Hash a single byte array 36 | fn hash(item: &[u8]) -> crate::hash::Digest; 37 | 38 | /// The value stored in the root node upon initialization, with no children 39 | fn empty_root_value() -> AzksValue; 40 | 41 | /// AZKS value corresponding to an empty node 42 | fn empty_node_hash() -> AzksValue; 43 | 44 | /// Hash a leaf epoch and nonce with a given [AkdValue] 45 | fn hash_leaf_with_value( 46 | value: &crate::AkdValue, 47 | epoch: u64, 48 | nonce: &[u8], 49 | ) -> AzksValueWithEpoch; 50 | 51 | /// Hash a commit and epoch together to get the leaf's hash value 52 | fn hash_leaf_with_commitment(commitment: AzksValue, epoch: u64) -> AzksValueWithEpoch; 53 | 54 | /// Used by the server to produce a commitment nonce for an AkdLabel, version, and AkdValue. 55 | fn get_commitment_nonce( 56 | commitment_key: &[u8], 57 | label: &NodeLabel, 58 | version: u64, 59 | value: &AkdValue, 60 | ) -> Digest; 61 | 62 | /// Used by the server to produce a commitment for an AkdLabel, version, and AkdValue 63 | fn compute_fresh_azks_value( 64 | commitment_key: &[u8], 65 | label: &NodeLabel, 66 | version: u64, 67 | value: &AkdValue, 68 | ) -> AzksValue; 69 | 70 | /// To convert a regular label (arbitrary string of bytes) into a [NodeLabel], we compute the 71 | /// output as: H(label || freshness || version) 72 | /// 73 | /// Specifically, we concatenate the following together: 74 | /// - I2OSP(len(label) as u64, label) 75 | /// - A single byte encoded as 0u8 if "stale", 1u8 if "fresh" 76 | /// - A u64 representing the version 77 | /// 78 | /// These are all interpreted as a single byte array and hashed together, with the output 79 | /// of the hash returned. 80 | fn get_hash_from_label_input( 81 | label: &AkdLabel, 82 | freshness: VersionFreshness, 83 | version: u64, 84 | ) -> Vec; 85 | 86 | /// Computes the parent hash from the children hashes and labels 87 | fn compute_parent_hash_from_children( 88 | left_val: &AzksValue, 89 | left_label: &[u8], 90 | right_val: &AzksValue, 91 | right_label: &[u8], 92 | ) -> AzksValue; 93 | 94 | /// Given the top-level hash, compute the "actual" root hash that is published 95 | /// by the directory maintainer 96 | fn compute_root_hash_from_val(root_val: &AzksValue) -> Digest; 97 | 98 | /// Similar to commit_fresh_value, but used for stale values. 99 | fn stale_azks_value() -> AzksValue; 100 | 101 | /// Computes the node label value from the bytes of the label 102 | fn compute_node_label_value(bytes: &[u8]) -> Vec; 103 | 104 | /// Returns the representation of the empty label 105 | fn empty_label() -> NodeLabel; 106 | } 107 | 108 | /// For fixture generation / testing purposes only 109 | #[cfg(feature = "public_tests")] 110 | pub trait NamedConfiguration: Configuration { 111 | /// The name of the configuration 112 | fn name() -> &'static str; 113 | } 114 | -------------------------------------------------------------------------------- /akd_core/src/configuration/whatsapp_v1.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) Meta Platforms, Inc. and affiliates. 2 | // 3 | // This source code is dual-licensed under either the MIT license found in the 4 | // LICENSE-MIT file in the root directory of this source tree or the Apache 5 | // License, Version 2.0 found in the LICENSE-APACHE file in the root directory 6 | // of this source tree. You may select, at your option, one of the above-listed licenses. 7 | 8 | //! Defines the WhatsApp v1 configuration 9 | 10 | use crate::configuration::Configuration; 11 | use crate::hash::{Digest, DIGEST_BYTES}; 12 | use crate::utils::i2osp_array; 13 | use crate::{ 14 | AkdLabel, AkdValue, AzksValue, AzksValueWithEpoch, NodeLabel, VersionFreshness, EMPTY_VALUE, 15 | }; 16 | 17 | #[cfg(feature = "nostd")] 18 | use alloc::vec::Vec; 19 | 20 | /// The configuration used by WhatsApp 21 | #[derive(Clone)] 22 | pub struct WhatsAppV1Configuration; 23 | 24 | unsafe impl Send for WhatsAppV1Configuration {} 25 | unsafe impl Sync for WhatsAppV1Configuration {} 26 | 27 | impl WhatsAppV1Configuration { 28 | /// Used by the client to supply a commitment nonce and value to reconstruct the commitment, via: 29 | /// commitment = H(i2osp_array(value), i2osp_array(nonce)) 30 | fn generate_commitment_from_nonce_client(value: &crate::AkdValue, nonce: &[u8]) -> AzksValue { 31 | AzksValue(Self::hash( 32 | &[i2osp_array(value), i2osp_array(nonce)].concat(), 33 | )) 34 | } 35 | } 36 | 37 | impl Configuration for WhatsAppV1Configuration { 38 | fn hash(item: &[u8]) -> crate::hash::Digest { 39 | ::blake3::hash(item).into() 40 | } 41 | 42 | fn empty_root_value() -> AzksValue { 43 | AzksValue(Self::hash(&crate::EMPTY_VALUE)) 44 | } 45 | 46 | fn empty_node_hash() -> AzksValue { 47 | AzksValue(Self::hash( 48 | &[ 49 | Self::hash(&EMPTY_VALUE).to_vec(), 50 | Self::empty_label().value::(), 51 | ] 52 | .concat(), 53 | )) 54 | } 55 | 56 | fn hash_leaf_with_value( 57 | value: &crate::AkdValue, 58 | epoch: u64, 59 | nonce: &[u8], 60 | ) -> AzksValueWithEpoch { 61 | let commitment = Self::generate_commitment_from_nonce_client(value, nonce); 62 | Self::hash_leaf_with_commitment(commitment, epoch) 63 | } 64 | 65 | fn hash_leaf_with_commitment(commitment: AzksValue, epoch: u64) -> AzksValueWithEpoch { 66 | let mut data = [0; DIGEST_BYTES + 8]; 67 | data[..DIGEST_BYTES].copy_from_slice(&commitment.0); 68 | data[DIGEST_BYTES..].copy_from_slice(&epoch.to_be_bytes()); 69 | AzksValueWithEpoch(Self::hash(&data)) 70 | } 71 | 72 | /// Used by the server to produce a commitment nonce for an AkdLabel, version, and AkdValue. 73 | /// Computes nonce = H(commitment key || label || version || value) 74 | fn get_commitment_nonce( 75 | commitment_key: &[u8], 76 | label: &NodeLabel, 77 | version: u64, 78 | value: &AkdValue, 79 | ) -> Digest { 80 | Self::hash( 81 | &[ 82 | commitment_key, 83 | &label.to_bytes(), 84 | &version.to_be_bytes(), 85 | &i2osp_array(value), 86 | ] 87 | .concat(), 88 | ) 89 | } 90 | 91 | /// Used by the server to produce a commitment for an AkdLabel, version, and AkdValue 92 | /// 93 | /// nonce = H(commitment_key, label, version, i2osp_array(value)) 94 | /// commmitment = H(i2osp_array(value), i2osp_array(nonce)) 95 | /// 96 | /// The nonce value is used to create a hiding and binding commitment using a 97 | /// cryptographic hash function. Note that it is derived from the label, version, and 98 | /// value (even though the binding to value is somewhat optional). 99 | /// 100 | /// Note that this commitment needs to be a hash function (random oracle) output 101 | fn compute_fresh_azks_value( 102 | commitment_key: &[u8], 103 | label: &NodeLabel, 104 | version: u64, 105 | value: &AkdValue, 106 | ) -> AzksValue { 107 | let nonce = Self::get_commitment_nonce(commitment_key, label, version, value); 108 | AzksValue(Self::hash( 109 | &[i2osp_array(value), i2osp_array(&nonce)].concat(), 110 | )) 111 | } 112 | 113 | /// To convert a regular label (arbitrary string of bytes) into a [NodeLabel], we compute the 114 | /// output as: H(label || freshness || version) 115 | /// 116 | /// Specifically, we concatenate the following together: 117 | /// - I2OSP(len(label) as u64, label) 118 | /// - A single byte encoded as 0u8 if "stale", 1u8 if "fresh" 119 | /// - A u64 representing the version 120 | /// 121 | /// These are all interpreted as a single byte array and hashed together, with the output 122 | /// of the hash returned. 123 | fn get_hash_from_label_input( 124 | label: &AkdLabel, 125 | freshness: VersionFreshness, 126 | version: u64, 127 | ) -> Vec { 128 | let freshness_bytes = [freshness as u8]; 129 | let hashed_label = Self::hash( 130 | &[ 131 | &crate::utils::i2osp_array(label)[..], 132 | &freshness_bytes, 133 | &version.to_be_bytes(), 134 | ] 135 | .concat(), 136 | ); 137 | hashed_label.to_vec() 138 | } 139 | 140 | /// Computes the parent hash from the children hashes and labels 141 | fn compute_parent_hash_from_children( 142 | left_val: &AzksValue, 143 | left_label: &[u8], 144 | right_val: &AzksValue, 145 | right_label: &[u8], 146 | ) -> AzksValue { 147 | AzksValue(Self::hash( 148 | &[ 149 | Self::hash(&[left_val.0.to_vec(), left_label.to_vec()].concat()), 150 | Self::hash(&[right_val.0.to_vec(), right_label.to_vec()].concat()), 151 | ] 152 | .concat(), 153 | )) 154 | } 155 | 156 | /// Given the top-level hash, compute the "actual" root hash that is published 157 | /// by the directory maintainer 158 | fn compute_root_hash_from_val(root_val: &AzksValue) -> Digest { 159 | Self::hash(&[&root_val.0[..], &NodeLabel::root().value::()].concat()) 160 | } 161 | 162 | /// Similar to commit_fresh_value, but used for stale values. 163 | fn stale_azks_value() -> AzksValue { 164 | AzksValue(Self::hash(&EMPTY_VALUE)) 165 | } 166 | 167 | fn compute_node_label_value(bytes: &[u8]) -> Vec { 168 | Self::hash(bytes).to_vec() 169 | } 170 | 171 | fn empty_label() -> NodeLabel { 172 | NodeLabel { 173 | label_val: [1u8; 32], 174 | label_len: 0, 175 | } 176 | } 177 | } 178 | 179 | #[cfg(feature = "public_tests")] 180 | impl super::traits::NamedConfiguration for WhatsAppV1Configuration { 181 | fn name() -> &'static str { 182 | "whatsapp_v1" 183 | } 184 | } 185 | -------------------------------------------------------------------------------- /akd_core/src/ecvrf/mod.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) Meta Platforms, Inc. and affiliates. 2 | // 3 | // This source code is dual-licensed under either the MIT license found in the 4 | // LICENSE-MIT file in the root directory of this source tree or the Apache 5 | // License, Version 2.0 found in the LICENSE-APACHE file in the root directory 6 | // of this source tree. You may select, at your option, one of the above-listed licenses. 7 | 8 | //! This module contains implementations of a 9 | //! [verifiable random function](https://en.wikipedia.org/wiki/Verifiable_random_function) 10 | //! (currently only ECVRF). VRFs are used, in the case of this crate, to anonymize the 11 | //! user id <-> node label mapping into a 1-way hash, which is verifyable without being 12 | //! regeneratable without the secret key. 13 | //! 14 | //! VRFs allow us to have the server generate a constant mapping from a user id to a node label 15 | //! but the client cannot themselves generate the mapping, only verify it. They can confirm 16 | //! a user id matches the label, but don't have the ability to determine the labels of other 17 | //! users in the directory. 18 | //! 19 | //! This module implements an instantiation of a verifiable random function known as 20 | //! [ECVRF-EDWARDS25519-SHA512-TAI from RFC9381](https://www.ietf.org/rfc/rfc9381.html). 21 | //! 22 | //! 23 | //! Adapted from Diem's NextGen Crypto module available [here](https://github.com/diem/diem/blob/502936fbd59e35276e2cf455532b143796d68a16/crypto/nextgen_crypto/src/vrf/ecvrf.rs) 24 | 25 | mod ecvrf_impl; 26 | mod traits; 27 | // export the functionality we want visible 28 | pub use crate::ecvrf::ecvrf_impl::{ 29 | Output, Proof, VRFExpandedPrivateKey, VRFPrivateKey, VRFPublicKey, 30 | }; 31 | pub use crate::ecvrf::traits::VRFKeyStorage; 32 | #[cfg(feature = "nostd")] 33 | use alloc::boxed::Box; 34 | #[cfg(feature = "nostd")] 35 | use alloc::format; 36 | #[cfg(feature = "nostd")] 37 | use alloc::string::String; 38 | #[cfg(feature = "nostd")] 39 | use alloc::string::ToString; 40 | #[cfg(feature = "nostd")] 41 | use alloc::vec::Vec; 42 | 43 | #[cfg(test)] 44 | mod tests; 45 | 46 | /// A error related to verifiable random functions 47 | #[derive(Debug, Eq, PartialEq)] 48 | pub enum VrfError { 49 | /// A problem retrieving or decoding the VRF public key 50 | PublicKey(String), 51 | /// A problem retrieving or decoding the VRF signing key 52 | SigningKey(String), 53 | /// A problem verifying the VRF proof 54 | Verification(String), 55 | } 56 | 57 | impl core::fmt::Display for VrfError { 58 | fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { 59 | let code = match &self { 60 | VrfError::PublicKey(msg) => format!("(Public Key) - {msg}"), 61 | VrfError::SigningKey(msg) => format!("(Signing Key) - {msg}"), 62 | VrfError::Verification(msg) => format!("(Verification) - {msg}"), 63 | }; 64 | write!(f, "Verifiable random function error {code}") 65 | } 66 | } 67 | 68 | /// This is a version of VRFKeyStorage for testing purposes, which uses the example from the VRF crate. 69 | /// 70 | /// const KEY_MATERIAL: &str = "c9afa9d845ba75166b5c215767b1d6934e50c3db36e89b127b8a622b120f6721"; 71 | #[derive(Clone)] 72 | pub struct HardCodedAkdVRF; 73 | 74 | unsafe impl Sync for HardCodedAkdVRF {} 75 | unsafe impl Send for HardCodedAkdVRF {} 76 | 77 | #[async_trait::async_trait] 78 | impl VRFKeyStorage for HardCodedAkdVRF { 79 | async fn retrieve(&self) -> Result, VrfError> { 80 | hex::decode("c9afa9d845ba75166b5c215767b1d6934e50c3db36e89b127b8a622b120f6721") 81 | .map_err(|hex_err| VrfError::PublicKey(hex_err.to_string())) 82 | } 83 | } 84 | -------------------------------------------------------------------------------- /akd_core/src/hash/mod.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) Meta Platforms, Inc. and affiliates. 2 | // 3 | // This source code is dual-licensed under either the MIT license found in the 4 | // LICENSE-MIT file in the root directory of this source tree or the Apache 5 | // License, Version 2.0 found in the LICENSE-APACHE file in the root directory 6 | // of this source tree. You may select, at your option, one of the above-listed licenses. 7 | 8 | //! This module contains all the hashing utilities needed for the AKD directory 9 | //! and verification operations 10 | 11 | #[cfg(feature = "nostd")] 12 | use alloc::format; 13 | #[cfg(feature = "nostd")] 14 | use alloc::string::String; 15 | 16 | /// A hash digest of a specified number of bytes 17 | pub type Digest = [u8; DIGEST_BYTES]; 18 | /// Represents an empty digest, with no data contained 19 | pub const EMPTY_DIGEST: [u8; DIGEST_BYTES] = [0u8; DIGEST_BYTES]; 20 | /// The number of bytes in a digest 21 | pub const DIGEST_BYTES: usize = 32; 22 | 23 | #[cfg(test)] 24 | mod tests; 25 | 26 | /// Try and parse a digest from an unknown length of bytes. Helpful for converting a `Vec` 27 | /// to a [Digest] 28 | pub fn try_parse_digest(value: &[u8]) -> Result { 29 | if value.len() != DIGEST_BYTES { 30 | Err(format!( 31 | "Failed to parse Digest. Expected {} bytes but the value has {} bytes", 32 | DIGEST_BYTES, 33 | value.len() 34 | )) 35 | } else { 36 | let mut arr = EMPTY_DIGEST; 37 | arr.copy_from_slice(value); 38 | Ok(arr) 39 | } 40 | } 41 | -------------------------------------------------------------------------------- /akd_core/src/hash/tests.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) Meta Platforms, Inc. and affiliates. 2 | // 3 | // This source code is dual-licensed under either the MIT license found in the 4 | // LICENSE-MIT file in the root directory of this source tree or the Apache 5 | // License, Version 2.0 found in the LICENSE-APACHE file in the root directory 6 | // of this source tree. You may select, at your option, one of the above-listed licenses. 7 | 8 | //! Tests for hashing 9 | 10 | use super::*; 11 | 12 | #[cfg(feature = "nostd")] 13 | use alloc::vec; 14 | 15 | #[test] 16 | fn test_try_parse_digest() { 17 | let mut data = EMPTY_DIGEST; 18 | let digest = try_parse_digest(&data).unwrap(); 19 | assert_eq!(EMPTY_DIGEST, digest); 20 | data[0] = 1; 21 | let digest = try_parse_digest(&data).unwrap(); 22 | assert_ne!(EMPTY_DIGEST, digest); 23 | 24 | let data_bad_length = vec![0u8; DIGEST_BYTES + 1]; 25 | assert!(try_parse_digest(&data_bad_length).is_err()); 26 | } 27 | -------------------------------------------------------------------------------- /akd_core/src/proto/specs/mod.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) Meta Platforms, Inc. and affiliates. 2 | // 3 | // This source code is dual-licensed under either the MIT license found in the 4 | // LICENSE-MIT file in the root directory of this source tree or the Apache 5 | // License, Version 2.0 found in the LICENSE-APACHE file in the root directory 6 | // of this source tree. You may select, at your option, one of the above-listed licenses. 7 | 8 | //! @generated code 9 | 10 | include!(concat!(env!("OUT_DIR"), "/protos/mod.rs")); 11 | -------------------------------------------------------------------------------- /akd_core/src/proto/specs/types.proto: -------------------------------------------------------------------------------- 1 | // Copyright (c) Meta Platforms, Inc. and affiliates. 2 | // 3 | // This source code is dual-licensed under either the MIT license found in the 4 | // LICENSE-MIT file in the root directory of this source tree or the Apache 5 | // License, Version 2.0 found in the LICENSE-APACHE file in the root directory 6 | // of this source tree. You may select, at your option, one of the above-listed licenses. 7 | 8 | // This contains the protobuf definition for inter-node messaging structures 9 | 10 | // To re-generate the protobuf specifications, utilize the build.rs script in this 11 | // crate (See Cargo.toml file) 12 | 13 | syntax = "proto2"; 14 | 15 | /* NodeLabel represents the label of a history tree node in the AKD tree with a 16 | * supplied label-length and label value (location) */ 17 | message NodeLabel { 18 | optional bytes label_val = 1; 19 | optional uint32 label_len = 2; 20 | } 21 | 22 | /* Element of an AZKS wihich contains a label and value */ 23 | message AzksElement { 24 | optional NodeLabel label = 1; 25 | optional bytes value = 2; 26 | } 27 | 28 | /* Represents a specific level of the tree with the parental sibling and the direction 29 | of the parent for use in tree hash calculations */ 30 | message SiblingProof { 31 | optional NodeLabel label = 1; 32 | repeated AzksElement siblings = 2; 33 | optional uint32 direction = 3; 34 | } 35 | 36 | /* Merkle proof of membership of a [`NodeLabel`] with a particular hash 37 | value in the tree at a given epoch */ 38 | message MembershipProof { 39 | optional NodeLabel label = 1; 40 | optional bytes hash_val = 2; 41 | repeated SiblingProof sibling_proofs = 3; 42 | } 43 | 44 | /* Merkle Patricia proof of non-membership for a [`NodeLabel`] in the tree 45 | at a given epoch. */ 46 | message NonMembershipProof { 47 | optional NodeLabel label = 1; 48 | optional NodeLabel longest_prefix = 2; 49 | repeated AzksElement longest_prefix_children = 3; 50 | optional MembershipProof longest_prefix_membership_proof = 4; 51 | } 52 | 53 | /* Proof that a given label was at a particular state at the given epoch. 54 | This means we need to show that the state and version we are claiming for this node must have been: 55 | * committed in the tree, 56 | * not too far ahead of the most recent marker version, 57 | * not stale when served. 58 | This proof is sent in response to a lookup query for a particular key. */ 59 | message LookupProof { 60 | optional uint64 epoch = 1; 61 | optional bytes value = 2; 62 | optional uint64 version = 3; 63 | optional bytes existence_vrf_proof = 4; 64 | optional MembershipProof existence_proof = 5; 65 | optional bytes marker_vrf_proof = 6; 66 | optional MembershipProof marker_proof = 7; 67 | optional bytes freshness_vrf_proof = 8; 68 | optional NonMembershipProof freshness_proof = 9; 69 | optional bytes commitment_nonce = 10; 70 | } 71 | 72 | /* A vector of UpdateProofs are sent as the proof to a history query for a particular key. 73 | For each version of the value associated with the key, the verifier must check that: 74 | * the version was included in the claimed epoch, 75 | * the previous version was retired at this epoch, 76 | * the version did not exist prior to this epoch, 77 | * the next few versions (up until the next marker), did not exist at this epoch, 78 | * the future marker versions did not exist at this epoch. */ 79 | message UpdateProof { 80 | optional uint64 epoch = 1; 81 | optional bytes value = 2; 82 | optional uint64 version = 3; 83 | optional bytes existence_vrf_proof = 4; 84 | optional MembershipProof existence_proof = 5; 85 | optional bytes previous_version_vrf_proof = 6; 86 | optional MembershipProof previous_version_proof = 7; 87 | optional bytes commitment_nonce = 8; 88 | } 89 | 90 | /* This proof consists of an array of [`UpdateProof`]s, membership proofs for 91 | existence versions at past markers, and non-membership proofs for future markers 92 | up until the current epoch. */ 93 | message HistoryProof { 94 | repeated UpdateProof update_proofs = 1; 95 | repeated bytes past_marker_vrf_proofs = 2; 96 | repeated MembershipProof existence_of_past_marker_proofs = 3; 97 | repeated bytes future_marker_vrf_proofs = 4; 98 | repeated NonMembershipProof non_existence_of_future_marker_proofs = 5; 99 | } 100 | 101 | /* SingleEncodedProof represents a proof that no leaves were changed or removed between epoch t and t + 1 */ 102 | message SingleAppendOnlyProof { 103 | repeated AzksElement inserted = 1; 104 | repeated AzksElement unchanged_nodes = 2; 105 | } 106 | 107 | /* An append-only proof is a proof that no nodes were changes from epochs[0] to epochs[end], epoch-by-epoch */ 108 | message AppendOnlyProof { 109 | repeated SingleAppendOnlyProof proofs = 1; 110 | repeated uint64 epochs = 2; 111 | } 112 | -------------------------------------------------------------------------------- /akd_core/src/proto/specs/types.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) Meta Platforms, Inc. and affiliates. 2 | // 3 | // This source code is dual-licensed under either the MIT license found in the 4 | // LICENSE-MIT file in the root directory of this source tree or the Apache 5 | // License, Version 2.0 found in the LICENSE-APACHE file in the root directory 6 | // of this source tree. You may select, at your option, one of the above-listed licenses. 7 | 8 | //! @generated code 9 | 10 | include!(concat!(env!("OUT_DIR"), "/protos/types.rs")); 11 | -------------------------------------------------------------------------------- /akd_core/src/verify/lookup.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) Meta Platforms, Inc. and affiliates. 2 | // 3 | // This source code is dual-licensed under either the MIT license found in the 4 | // LICENSE-MIT file in the root directory of this source tree or the Apache 5 | // License, Version 2.0 found in the LICENSE-APACHE file in the root directory 6 | // of this source tree. You may select, at your option, one of the above-listed licenses. 7 | 8 | //! Verification of lookup proofs 9 | 10 | use super::base::{verify_existence, verify_existence_with_val, verify_nonexistence}; 11 | use super::VerificationError; 12 | 13 | use crate::configuration::Configuration; 14 | use crate::hash::Digest; 15 | use crate::{AkdLabel, LookupProof, VerifyResult, VersionFreshness}; 16 | 17 | /// Verifies a lookup with respect to the root_hash 18 | pub fn lookup_verify( 19 | vrf_public_key: &[u8], 20 | root_hash: Digest, 21 | current_epoch: u64, 22 | akd_label: AkdLabel, 23 | proof: LookupProof, 24 | ) -> Result { 25 | if proof.version > current_epoch { 26 | return Err(VerificationError::LookupProof(alloc::format!( 27 | "Proof version {} is greater than current epoch {}", 28 | proof.version, 29 | current_epoch 30 | ))); 31 | } 32 | 33 | verify_existence_with_val::( 34 | vrf_public_key, 35 | root_hash, 36 | &akd_label, 37 | &proof.value, 38 | proof.epoch, 39 | &proof.commitment_nonce, 40 | VersionFreshness::Fresh, 41 | proof.version, 42 | &proof.existence_vrf_proof, 43 | &proof.existence_proof, 44 | )?; 45 | 46 | let marker_version = 1 << crate::utils::get_marker_version_log2(proof.version); 47 | verify_existence::( 48 | vrf_public_key, 49 | root_hash, 50 | &akd_label, 51 | VersionFreshness::Fresh, 52 | marker_version, 53 | &proof.marker_vrf_proof, 54 | &proof.marker_proof, 55 | )?; 56 | 57 | verify_nonexistence::( 58 | vrf_public_key, 59 | root_hash, 60 | &akd_label, 61 | VersionFreshness::Stale, 62 | proof.version, 63 | &proof.freshness_vrf_proof, 64 | &proof.freshness_proof, 65 | )?; 66 | 67 | Ok(VerifyResult { 68 | epoch: proof.epoch, 69 | version: proof.version, 70 | value: proof.value, 71 | }) 72 | } 73 | -------------------------------------------------------------------------------- /akd_core/src/verify/mod.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) Meta Platforms, Inc. and affiliates. 2 | // 3 | // This source code is dual-licensed under either the MIT license found in the 4 | // LICENSE-MIT file in the root directory of this source tree or the Apache 5 | // License, Version 2.0 found in the LICENSE-APACHE file in the root directory 6 | // of this source tree. You may select, at your option, one of the above-listed licenses. 7 | 8 | //! This module contains verification calls for different proofs contained in the AKD crate 9 | 10 | pub mod base; 11 | pub mod history; 12 | pub mod lookup; 13 | 14 | #[cfg(feature = "nostd")] 15 | use alloc::format; 16 | #[cfg(feature = "nostd")] 17 | use alloc::string::String; 18 | #[cfg(feature = "nostd")] 19 | use alloc::string::ToString; 20 | 21 | /// Proof verification error types 22 | #[derive(Debug, Eq, PartialEq)] 23 | pub enum VerificationError { 24 | /// Error verifying a membership proof 25 | MembershipProof(String), 26 | /// Error verifying a non-membership proof 27 | NonMembershipProof(String), 28 | /// Error verifying a lookup proof 29 | LookupProof(String), 30 | /// Error verifying a history proof 31 | HistoryProof(String), 32 | /// Error verifying a VRF proof 33 | #[cfg(feature = "vrf")] 34 | Vrf(crate::ecvrf::VrfError), 35 | /// Error converting protobuf types during verification 36 | #[cfg(feature = "protobuf")] 37 | Serialization(crate::proto::ConversionError), 38 | } 39 | 40 | impl core::fmt::Display for VerificationError { 41 | fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { 42 | let code = match &self { 43 | VerificationError::MembershipProof(err) => format!("(Membership proof) - {err}"), 44 | VerificationError::NonMembershipProof(err) => { 45 | format!("(Non-membership proof) - {err}") 46 | } 47 | VerificationError::LookupProof(err) => format!("(Lookup proof) - {err}"), 48 | VerificationError::HistoryProof(err) => format!("(History proof) - {err}"), 49 | #[cfg(feature = "vrf")] 50 | VerificationError::Vrf(vrf) => vrf.to_string(), 51 | #[cfg(feature = "protobuf")] 52 | VerificationError::Serialization(proto) => proto.to_string(), 53 | }; 54 | write!(f, "Verification error {code}") 55 | } 56 | } 57 | 58 | #[cfg(feature = "vrf")] 59 | impl From for VerificationError { 60 | fn from(input: crate::ecvrf::VrfError) -> Self { 61 | VerificationError::Vrf(input) 62 | } 63 | } 64 | 65 | #[cfg(feature = "protobuf")] 66 | impl From for VerificationError { 67 | fn from(input: crate::proto::ConversionError) -> Self { 68 | VerificationError::Serialization(input) 69 | } 70 | } 71 | 72 | #[cfg(feature = "protobuf")] 73 | impl From for VerificationError { 74 | fn from(input: protobuf::Error) -> Self { 75 | let conv: crate::proto::ConversionError = input.into(); 76 | conv.into() 77 | } 78 | } 79 | 80 | // Re-export the necessary verification functions 81 | 82 | #[cfg(feature = "public_tests")] 83 | pub use base::{verify_membership_for_tests_only, verify_nonmembership_for_tests_only}; 84 | 85 | pub use history::{key_history_verify, HistoryVerificationParams}; 86 | pub use lookup::lookup_verify; 87 | -------------------------------------------------------------------------------- /docker-compose.yml: -------------------------------------------------------------------------------- 1 | # Use root/example as user/password credentials 2 | services: 3 | db: 4 | container_name: akd-test-db 5 | platform: linux/x86_64 6 | image: mysql:8.4 7 | command: --mysql-native-password=ON 8 | restart: unless-stopped 9 | ports: 10 | - "8001:3306" 11 | environment: 12 | MYSQL_ROOT_PASSWORD: example 13 | MYSQL_DATABASE: default 14 | -------------------------------------------------------------------------------- /examples/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "examples" 3 | version = "0.12.0-pre.11" 4 | authors = ["akd contributors"] 5 | license = "MIT OR Apache-2.0" 6 | edition = "2021" 7 | publish = false 8 | 9 | 10 | [[bin]] 11 | name = "akd-examples" 12 | path = "src/main.rs" 13 | bench = false 14 | doc = false 15 | 16 | [features] 17 | # Collect runtime metrics on db access calls + timing 18 | runtime_metrics = [] 19 | 20 | [dependencies] 21 | anyhow = "1" 22 | async-trait = "0.1" 23 | colored = "2" 24 | clap = { version = "4", features = ["derive"] } 25 | dialoguer = "0.11" 26 | hex = "0.4" 27 | indicatif = "0.17" 28 | log = { version = "0.4", features = ["kv_unstable"] } 29 | multi_log = "0.1" 30 | mysql_async = "0.32" 31 | mysql_common = "0.31" 32 | once_cell = "1" 33 | protobuf = "3" 34 | rand = "0.8" 35 | serde = { version = "1", features = ["derive"] } 36 | serde_json = "1" 37 | thread-id = "4" 38 | tokio = { version = "1", features = ["full"] } 39 | xml-rs = "0.8" 40 | reqwest = "0.11" 41 | regex = "1" 42 | serde_yaml = "0.9" 43 | wasm-bindgen = "0.2" 44 | 45 | akd = { path = "../akd", features = [ 46 | "public_tests", 47 | "public_auditing", 48 | "whatsapp_v1", 49 | "experimental", 50 | ] } 51 | akd_core = { path = "../akd_core" } 52 | 53 | [dev-dependencies] 54 | serial_test = "2" 55 | assert_fs = "1" 56 | paste = "1" 57 | wasm-bindgen-test = "0.3" 58 | -------------------------------------------------------------------------------- /examples/README.md: -------------------------------------------------------------------------------- 1 | # Examples 2 | This crate contains a set of examples for using AKD. 3 | 4 | ## Running Examples 5 | 6 | There are currently three examples supported in this library: 7 | - `whatsapp-kt-auditor`: An auditor for WhatsApp key transparency audit proofs 8 | - `mysql-demo`: An interactive application that demonstrates the use of AKD with a MySQL storage layer 9 | - `fixture-generator`: A utility for producing test fixtures which can be used to measure when the underlying byte 10 | format for the AKD operations change 11 | 12 | ### WhatsApp Key Transparency Auditor 13 | 14 | To run this example: 15 | ``` 16 | cargo run -p examples --release -- whatsapp-kt-auditor 17 | ``` 18 | and this will bring up an interactive interface which allows you to load the current epochs, and choose which epochs to audit. 19 | 20 | You can also automatically audit the latest epoch with the `-l` parameter (for "latest"), by running: 21 | ``` 22 | cargo run -p examples --release -- whatsapp-kt-auditor -l 23 | ``` 24 | 25 | ### MySQL Demo 26 | 27 | This example requires setting up [Docker](https://docs.docker.com/get-docker/) (which will host the MySQL instance). Once Docker 28 | is up and running, you can simply run: 29 | ```bash 30 | docker compose up [-d] 31 | ``` 32 | in the root of repository to spin up the MySQL instance and then run: 33 | ```bash 34 | cargo run -p examples --release -- mysql-demo 35 | ``` 36 | to run the demo. You can also pass the `--help` argument to view various options for running benchmarks and auto-populating the instance. 37 | For example, you can try: 38 | ``` 39 | cargo run -p examples --release -- mysql-demo bench-publish 1000 3 40 | ``` 41 | which will create a publish with 1000 users each with 3 updates (across 3 epochs). 42 | 43 | Note that if you are encountering the error: 44 | ``` 45 | Failed 1 reconnection attempt(s) to MySQL database 46 | ``` 47 | then this means that establishing a connection with the Docker instance failed, and you will need to double-check your Docker setup. 48 | 49 | ### Fixture Generator 50 | 51 | This is primarily used for testing and compatibility purposes when the AKD library updates. For example, say that you as a developer 52 | make a change to an existing AKD configuration which affects the byte format (either by replacing the hash function, 53 | adjusting how hashing is done, or modifying the VRF computation). Then, when re-running the tests, the fixture tests will fail, to 54 | indicate that a non-backwards-compatible change has been made. 55 | 56 | If making a non-backwards-compatible change is intended, then this can be addressed by re-running the fixture generation code with 57 | the following command: 58 | ``` 59 | cargo run -p examples -- fixture-generator \ 60 | --epochs 10 \ 61 | --max_updates 5 \ 62 | --capture_states 9 10 \ 63 | --capture_deltas 10 \ 64 | --out examples/src/fixture_generator/examples/ 65 | ``` 66 | This will automatically write the new fixtures to the appropriate files under `examples/src/fixture_generator/examples/`, and 67 | the tests should now pass. 68 | 69 | ### Test Vectors 70 | 71 | Similarly to the fixture generator, this is also used for testing and compatibility purposes, but specifically for generating test 72 | vectors that can be matched against on a separate client implementation. Note that the serialization of these structs is done 73 | through protobuf, so this can be used to double-check that a compatible client implementation can indeed parse the proof bytes 74 | that are generated by the server-side API. The resulting output files under `examples/src/test_vectors/` 75 | contain hex-encoded values for the inputs to lookup and history proof verification. 76 | 77 | The test vector generation code can be run with the following command: 78 | ``` 79 | cargo run -p examples --release -- test-vectors \ 80 | --out examples/src/test_vectors 81 | ``` 82 | 83 | ### WASM Client 84 | 85 | This example, unlike the others, is not executable and is mainly intended to demonstrate how an application can build the WASM bindings 86 | for the client operations. Since the client operations only depend on the `akd_core` crate, which has fewer dependencies than the full 87 | `akd` crate, the resulting WASM library will be much more condensed than simply building directly from `akd`. You can take a look in the 88 | `wasm_client/` sub-directory for a simple example set of bindings for a client that wishes to verify proofs generated by the server. 89 | -------------------------------------------------------------------------------- /examples/src/fixture_generator/examples/example_tests.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) Meta Platforms, Inc. and affiliates. 2 | // 3 | // This source code is dual-licensed under either the MIT license found in the 4 | // LICENSE-MIT file in the root directory of this source tree or the Apache 5 | // License, Version 2.0 found in the LICENSE-APACHE file in the root directory 6 | // of this source tree. You may select, at your option, one of the above-listed licenses. 7 | 8 | //! Example test utilizing a fixture file. 9 | 10 | use std::fs::File; 11 | 12 | use akd::{ 13 | append_only_zks::AzksParallelismConfig, 14 | directory::Directory, 15 | ecvrf::HardCodedAkdVRF, 16 | storage::{memory::AsyncInMemoryDatabase, Database, StorageManager, StorageUtil}, 17 | NamedConfiguration, 18 | }; 19 | 20 | use crate::fixture_generator::reader::Reader; 21 | use crate::{fixture_generator::reader::yaml::YamlFileReader, test_config}; 22 | 23 | // Contains two consecutive states and the delta between them 24 | const FILE_PATH: &str = "src/fixture_generator/examples"; 25 | 26 | test_config!(test_use_fixture); 27 | async fn test_use_fixture() { 28 | // load fixture 29 | let mut reader = 30 | YamlFileReader::new(File::open(format!("{}/{}.yaml", FILE_PATH, TC::name())).unwrap()) 31 | .unwrap(); 32 | let metadata = reader.read_metadata().unwrap(); 33 | let epochs = metadata.args.capture_states.unwrap(); 34 | 35 | // prepare directory with initial state 36 | let initial_state = reader.read_state(epochs[0]).unwrap(); 37 | let db = AsyncInMemoryDatabase::new(); 38 | db.batch_set(initial_state.records, akd::storage::DbSetState::General) 39 | .await 40 | .unwrap(); 41 | let vrf = HardCodedAkdVRF {}; 42 | let storage_manager = StorageManager::new_no_cache(db); 43 | let akd = Directory::::new( 44 | storage_manager.clone(), 45 | vrf, 46 | AzksParallelismConfig::default(), 47 | ) 48 | .await 49 | .unwrap(); 50 | 51 | // publish delta updates 52 | let delta = reader.read_delta(epochs[1]).unwrap(); 53 | akd.publish(delta.updates).await.unwrap(); 54 | 55 | // assert final directory state 56 | let final_state = reader.read_state(epochs[1]).unwrap(); 57 | let records = storage_manager 58 | .get_db() 59 | .batch_get_all_direct() 60 | .await 61 | .unwrap(); 62 | assert_eq!(final_state.records.len(), records.len()); 63 | assert!(records.iter().all(|r| final_state.records.contains(r))); 64 | } 65 | -------------------------------------------------------------------------------- /examples/src/fixture_generator/examples/mod.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) Meta Platforms, Inc. and affiliates. 2 | // 3 | // This source code is dual-licensed under either the MIT license found in the 4 | // LICENSE-MIT file in the root directory of this source tree or the Apache 5 | // License, Version 2.0 found in the LICENSE-APACHE file in the root directory 6 | // of this source tree. You may select, at your option, one of the above-listed licenses. 7 | 8 | //! This module contains examples of how fixtures can be read and used in tests. 9 | 10 | #[cfg(test)] 11 | mod example_tests; 12 | -------------------------------------------------------------------------------- /examples/src/fixture_generator/generator.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) Meta Platforms, Inc. and affiliates. 2 | // 3 | // This source code is dual-licensed under either the MIT license found in the 4 | // LICENSE-MIT file in the root directory of this source tree or the Apache 5 | // License, Version 2.0 found in the LICENSE-APACHE file in the root directory 6 | // of this source tree. You may select, at your option, one of the above-listed licenses. 7 | 8 | //! This module contains the struct definitions of the tool output and main 9 | //! fixture generation logic. 10 | 11 | use std::collections::HashMap; 12 | use std::env; 13 | use std::fs::File; 14 | use std::io::Write; 15 | 16 | use akd::append_only_zks::AzksParallelismConfig; 17 | use akd::directory::Directory; 18 | use akd::storage::types::DbRecord; 19 | use akd::storage::{StorageManager, StorageUtil}; 20 | use akd::{AkdLabel, AkdValue, DomainLabel, NamedConfiguration}; 21 | use rand::rngs::StdRng; 22 | use rand::Rng; 23 | use rand::SeedableRng; 24 | use serde::{Deserialize, Serialize}; 25 | 26 | use crate::fixture_generator::parser::Args; 27 | use crate::fixture_generator::writer::yaml::YamlWriter; 28 | use crate::fixture_generator::writer::Writer; 29 | 30 | /// Directory state comprises all database records at a particular epoch. 31 | #[derive(Debug, PartialEq, Eq, Serialize, Deserialize)] 32 | pub struct State { 33 | pub epoch: u32, 34 | pub records: Vec, 35 | } 36 | 37 | /// Delta comprises all key updates published to the directory to advance to an 38 | /// epoch. 39 | #[derive(Debug, PartialEq, Eq, Serialize, Deserialize)] 40 | pub struct Delta { 41 | pub epoch: u32, 42 | pub updates: Vec<(AkdLabel, AkdValue)>, 43 | } 44 | 45 | /// Metadata about the output, including arguments passed to this tool and 46 | /// the tool version. 47 | #[derive(Debug, PartialEq, Eq, Serialize, Deserialize)] 48 | pub struct Metadata { 49 | pub args: Args, 50 | pub version: String, 51 | pub configuration: String, 52 | pub domain_label: String, 53 | } 54 | 55 | // "@" has to be separated from "generated" or linters might ignore this file 56 | const HEADER_COMMENT: &str = concat!( 57 | "@", 58 | "generated This file was automatically generated by \n\ 59 | the fixture generator tool with the following command:\n\n\ 60 | cargo run -p examples -- fixture-generator \\" 61 | ); 62 | const METADATA_COMMENT: &str = "Metadata"; 63 | const STATE_COMMENT: &str = "State - Epoch"; 64 | const DELTA_COMMENT: &str = "Delta - Epoch"; 65 | 66 | pub async fn run(args: Args) { 67 | // NOTE(new_config): Add new configurations here 68 | type L = akd::ExampleLabel; 69 | generate::(&args).await; 70 | generate::, L>(&args).await; 71 | } 72 | 73 | pub(crate) async fn generate(args: &Args) { 74 | let mut rng = StdRng::seed_from_u64(42); 75 | 76 | // args assertions 77 | assert!(args.max_updates >= args.min_updates); 78 | assert!(args 79 | .capture_states 80 | .as_ref() 81 | .is_none_or(|states| states.iter().max().unwrap() <= &args.epochs)); 82 | assert!(args 83 | .capture_deltas 84 | .as_ref() 85 | .is_none_or(|deltas| deltas.iter().max().unwrap() <= &args.epochs)); 86 | 87 | // process users 88 | let mut user_map = HashMap::new(); 89 | for user in &args.users { 90 | let mut events_map = HashMap::new(); 91 | for event in &user.events { 92 | events_map.insert(event.epoch, event.value.clone()); 93 | } 94 | user_map.insert(user.label.clone(), events_map); 95 | } 96 | 97 | // initialize writer 98 | let buffer: Box = if let Some(ref file_path) = args.out { 99 | Box::new(File::create(format!("{}/{}.yaml", file_path, TC::name())).unwrap()) 100 | } else { 101 | Box::new(std::io::stdout()) 102 | }; 103 | let mut writer = YamlWriter::new(buffer); 104 | 105 | // write raw args as comment 106 | let raw_args = format!( 107 | " {}", 108 | env::args().skip(1).collect::>().join(" ") 109 | ); 110 | writer.write_comment(HEADER_COMMENT); 111 | raw_args 112 | .split(" -") 113 | .skip(1) 114 | .map(|arg| format!(" -{arg} \\")) 115 | .for_each(|comment| writer.write_comment(&comment)); 116 | 117 | // write fixture metadata 118 | let comment = METADATA_COMMENT.to_string(); 119 | let metadata = Metadata { 120 | args: args.clone(), 121 | version: env!("CARGO_PKG_VERSION").to_string(), 122 | configuration: TC::name().to_string(), 123 | domain_label: String::from_utf8(L::domain_label().to_vec()).unwrap(), 124 | }; 125 | writer.write_line(); 126 | writer.write_comment(&comment); 127 | writer.write_object(metadata); 128 | 129 | // initialize directory 130 | let db = akd::storage::memory::AsyncInMemoryDatabase::new(); 131 | let vrf = akd::ecvrf::HardCodedAkdVRF {}; 132 | let storage_manager = StorageManager::new_no_cache(db); 133 | let akd = Directory::::new( 134 | storage_manager.clone(), 135 | vrf, 136 | AzksParallelismConfig::default(), 137 | ) 138 | .await 139 | .unwrap(); 140 | 141 | for epoch in 1..=args.epochs { 142 | // gather specified key updates 143 | let mut updates = vec![]; 144 | for (label, events) in user_map.iter() { 145 | if let Some(maybe_value) = events.get(&epoch) { 146 | let value = maybe_value 147 | .clone() 148 | .unwrap_or_else(|| AkdValue::random(&mut rng)); 149 | updates.push((label.clone(), value)) 150 | } 151 | } 152 | 153 | // generate random key updates if allowed 154 | if !args.no_generated_updates { 155 | let num_updates = rng.gen_range(args.min_updates..args.max_updates); 156 | for _ in updates.len()..num_updates as usize { 157 | updates.push((AkdLabel::random(&mut rng), AkdValue::random(&mut rng))); 158 | } 159 | } 160 | 161 | // write delta if required 162 | if let Some(ref deltas) = args.capture_deltas { 163 | if deltas.contains(&epoch) { 164 | let comment = format!("{DELTA_COMMENT} {epoch}"); 165 | let delta = Delta { 166 | epoch, 167 | updates: updates.clone(), 168 | }; 169 | writer.write_line(); 170 | writer.write_comment(&comment); 171 | writer.write_object(delta); 172 | } 173 | } 174 | 175 | // perform publish 176 | akd.publish(updates.clone()).await.unwrap(); 177 | 178 | // write state if required 179 | if let Some(ref states) = args.capture_states { 180 | if states.contains(&epoch) { 181 | let comment = format!("{STATE_COMMENT} {epoch}"); 182 | 183 | // Sort the records by label to make the output deterministic. 184 | let mut records = storage_manager 185 | .get_db() 186 | .batch_get_all_direct() 187 | .await 188 | .unwrap(); 189 | records.sort(); 190 | 191 | let state = State { epoch, records }; 192 | 193 | writer.write_line(); 194 | writer.write_comment(&comment); 195 | writer.write_object(state); 196 | } 197 | } 198 | } 199 | 200 | // flush writer and exit 201 | writer.flush(); 202 | } 203 | -------------------------------------------------------------------------------- /examples/src/fixture_generator/mod.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) Meta Platforms, Inc. and affiliates. 2 | // 3 | // This source code is dual-licensed under either the MIT license found in the 4 | // LICENSE-MIT file in the root directory of this source tree or the Apache 5 | // License, Version 2.0 found in the LICENSE-APACHE file in the root directory 6 | // of this source tree. You may select, at your option, one of the above-listed licenses. 7 | 8 | //! A CLI tool for generating directory fixtures for debug and testing purposes. 9 | //! Run cargo run -p examples -- fixture-generator --help for options. Example command: 10 | //! 11 | //! cargo run -- fixture-generator \ 12 | //! --user "User1: 1, (9, 'abc'), (10, 'def')" \ 13 | //! --user "User2: 1, 2, 3, 4, 5, 6, 7, 8, 9, 10" \ 14 | //! --epochs 10 \ 15 | //! --max_updates 5 \ 16 | //! --capture_states 9 10 \ 17 | //! --capture_deltas 10 18 | //! 19 | 20 | mod examples; 21 | mod generator; 22 | mod parser; 23 | pub mod reader; 24 | pub mod writer; 25 | 26 | pub(crate) use parser::Args; 27 | 28 | /// Re-export generator run function. 29 | pub(crate) use generator::run; 30 | 31 | const YAML_SEPARATOR: &str = "---"; 32 | -------------------------------------------------------------------------------- /examples/src/fixture_generator/parser.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) Meta Platforms, Inc. and affiliates. 2 | // 3 | // This source code is dual-licensed under either the MIT license found in the 4 | // LICENSE-MIT file in the root directory of this source tree or the Apache 5 | // License, Version 2.0 found in the LICENSE-APACHE file in the root directory 6 | // of this source tree. You may select, at your option, one of the above-listed licenses. 7 | 8 | //! This module contains the CLI argument definitions and parser. 9 | 10 | use akd::{AkdLabel, AkdValue}; 11 | use clap::Parser; 12 | use regex::Regex; 13 | use serde::{Deserialize, Serialize}; 14 | 15 | /// Any alphanumeric string - spaces are allowed e.g. "User123" or "User 123" 16 | const USER_PATTERN: &str = r"[\w\s]+"; 17 | 18 | /// A solo string of digits e.g. "10" or a tuple of digits and a string 19 | /// e.g."(10, 'abc')" 20 | const EVENT_PATTERN: &str = r"\d+|(\(\s*(\d+)\s*,\s*'(\w*)'\s*\))"; 21 | 22 | /// A key update the tool should include in the tree at the given epoch. 23 | /// If "value" is None, the tool will randomly generate a value for the epoch. 24 | #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] 25 | pub struct UserEvent { 26 | pub epoch: u32, 27 | pub value: Option, 28 | } 29 | 30 | /// A user whose key update events should be included in the tree. 31 | #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] 32 | pub struct User { 33 | pub label: AkdLabel, 34 | pub events: Vec, 35 | } 36 | 37 | /// This tool allows a directory to be created with specified and random 38 | /// contents, capturing the directory state and epoch-to-epoch delta in 39 | /// an output file for use in debugging and as test fixtures. 40 | #[derive(Parser, Clone, Debug, Eq, PartialEq, Serialize, Deserialize)] 41 | pub struct Args { 42 | /// Users and their associated key update events. 43 | /// A username is expected, followed by a colon and a list of epochs OR 44 | /// (epoch, value). Usernames are expected to be utf-8 strings, which will 45 | /// be internally interpreted as bytes. 46 | /// The following are valid examples of user arguments: 47 | /// --user "username: 1, 3, (5, 'xyz')" 48 | /// --user="username: [(1,'abc'), 2]" 49 | /// -u "some username: 1" 50 | #[arg( 51 | long = "user", 52 | short = 'u', 53 | num_args = 0.., 54 | value_parser = parse_user_events, 55 | )] 56 | pub users: Vec, 57 | 58 | /// Number of epochs to advance the tree by 59 | /// e.g. a value of 3 will perform 3 publishes on an empty directory. 60 | #[arg(long = "epochs", short = 'e')] 61 | pub epochs: u32, 62 | 63 | /// Maximum number of key updates **per epoch** the tool should perform. 64 | /// Note that all user events explicitly passed for an epoch will be 65 | /// included even if the number exceeds this value. 66 | #[arg(long = "max_updates", default_value = "10")] 67 | pub max_updates: u32, 68 | 69 | /// Minimum number of key updates **per epoch** the tool should perform. 70 | /// The tool will generate random labels and values to include in an epoch 71 | /// if the user events explicitly passed for an epoch are not sufficients. 72 | #[arg(long = "min_updates", default_value = "0")] 73 | pub min_updates: u32, 74 | 75 | /// Epochs where the state of the directory should be captured in the output 76 | /// e.g. the value 3 will output all db records after epoch 3 is performed. 77 | /// Multiple values are accepted e.g. --capture_states 9 10 78 | #[arg(long = "capture_states", short = 's', num_args = 0..)] 79 | pub capture_states: Option>, 80 | 81 | /// Epochs where the key updates required to bring the directory to the 82 | /// epoch should be captured in the output. 83 | /// e.g. the value 3 will output all key updates that were performed to 84 | /// advance the directory from epoch 2 to 3. 85 | /// Multiple values are accepted e.g. --capture_deltas 9 10 86 | #[arg(long = "capture_deltas", short = 'd', num_args = 0..)] 87 | pub capture_deltas: Option>, 88 | 89 | /// Name of output path. 90 | /// If omitted, output will be printed to stdout. 91 | #[arg(long = "out", short = 'o')] 92 | pub out: Option, 93 | 94 | /// Stops tool from generating random key updates in publishes. 95 | /// Use this if you want the tool to only use explicitly passed key updates. 96 | /// Explicilty passed key updates without values would still use randomly 97 | /// generated values. 98 | #[arg(long = "no_generated_updates", short = 'n')] 99 | pub no_generated_updates: bool, 100 | } 101 | 102 | fn parse_user_events(s: &str) -> Result { 103 | let mut split = s.split(':'); 104 | let username_text = split.next().unwrap(); 105 | let maybe_events_text = split.next(); 106 | 107 | let username = Regex::new(USER_PATTERN) 108 | .unwrap() 109 | .captures(username_text) 110 | .unwrap() 111 | .get(0) 112 | .unwrap() 113 | .as_str(); 114 | 115 | let events = if let Some(events_text) = maybe_events_text { 116 | Regex::new(EVENT_PATTERN) 117 | .unwrap() 118 | .captures_iter(events_text) 119 | .map(|event| { 120 | let epoch: u32; 121 | let value: Option; 122 | if event.get(1).is_some() { 123 | epoch = event.get(2).unwrap().as_str().parse().unwrap(); 124 | value = Some(AkdValue::from(event.get(3).unwrap().as_str())); 125 | } else { 126 | epoch = event.get(0).unwrap().as_str().parse().unwrap(); 127 | value = None; 128 | } 129 | UserEvent { epoch, value } 130 | }) 131 | .collect::>() 132 | } else { 133 | vec![] 134 | }; 135 | 136 | Ok(User { 137 | label: AkdLabel::from(username), 138 | events, 139 | }) 140 | } 141 | -------------------------------------------------------------------------------- /examples/src/fixture_generator/reader/mod.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) Meta Platforms, Inc. and affiliates. 2 | // 3 | // This source code is dual-licensed under either the MIT license found in the 4 | // LICENSE-MIT file in the root directory of this source tree or the Apache 5 | // License, Version 2.0 found in the LICENSE-APACHE file in the root directory 6 | // of this source tree. You may select, at your option, one of the above-listed licenses. 7 | 8 | //! This module contains the Reader trait to deserialize the tool's serde-compatible 9 | //! objects from a formatted file, as well as implementations of the trait. 10 | 11 | use std::result::Result; 12 | 13 | use crate::fixture_generator::generator::{Delta, Metadata, State}; 14 | 15 | /// Interface for reading output generated by the tool. 16 | pub trait Reader { 17 | /// Reads a metadata object. 18 | #[allow(dead_code)] 19 | fn read_metadata(&mut self) -> Result; 20 | 21 | /// Reads a state object for a given epoch. 22 | #[allow(dead_code)] 23 | fn read_state(&mut self, epoch: u32) -> Result; 24 | 25 | /// Reads a delta object for a given epoch. 26 | #[allow(dead_code)] 27 | fn read_delta(&mut self, epoch: u32) -> Result; 28 | 29 | /// Reads a String (freeform). 30 | #[allow(dead_code)] 31 | fn read_string(&mut self) -> Result; 32 | } 33 | 34 | #[derive(Debug, PartialEq, Eq)] 35 | pub enum ReaderError { 36 | NotFound, 37 | Format(String), 38 | Input(String), 39 | } 40 | 41 | impl std::error::Error for ReaderError {} 42 | 43 | impl std::fmt::Display for ReaderError { 44 | fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { 45 | match self { 46 | ReaderError::NotFound => write!(f, "Object not found"), 47 | ReaderError::Format(message) => write!(f, "Unexpected format: {message}"), 48 | ReaderError::Input(message) => write!(f, "Input stream error: {message}"), 49 | } 50 | } 51 | } 52 | 53 | /// YAML implementor of Reader trait. 54 | pub mod yaml; 55 | 56 | #[cfg(test)] 57 | mod tests; 58 | -------------------------------------------------------------------------------- /examples/src/fixture_generator/reader/tests.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) Meta Platforms, Inc. and affiliates. 2 | // 3 | // This source code is dual-licensed under either the MIT license found in the 4 | // LICENSE-MIT file in the root directory of this source tree or the Apache 5 | // License, Version 2.0 found in the LICENSE-APACHE file in the root directory 6 | // of this source tree. You may select, at your option, one of the above-listed licenses. 7 | 8 | //! Tests basic reader behavior. 9 | 10 | use std::env; 11 | use std::fs::File; 12 | 13 | use akd::NamedConfiguration; 14 | use assert_fs::fixture::{FileWriteStr, NamedTempFile, TempDir}; 15 | use clap::Parser; 16 | 17 | use crate::fixture_generator::generator; 18 | use crate::fixture_generator::parser::Args; 19 | use crate::fixture_generator::reader::yaml::YamlFileReader; 20 | use crate::fixture_generator::reader::{Reader, ReaderError}; 21 | use crate::test_config; 22 | 23 | type L = akd::ExampleLabel; 24 | 25 | test_config!(test_read); 26 | async fn test_read() { 27 | // generate a temp fixture file 28 | let file = TempDir::new() 29 | .unwrap() 30 | .with_file_name(format!("{}.yaml", TC::name())); 31 | let args = Args::parse_from(vec![ 32 | env!("CARGO_CRATE_NAME"), 33 | "--epochs", 34 | "10", 35 | "--capture_deltas", 36 | "10", 37 | "--capture_states", 38 | "9", 39 | "10", 40 | "--out", 41 | &format!("{}", file.parent().unwrap().display()), 42 | ]); 43 | generator::generate::(&args).await; 44 | 45 | // initialize reader 46 | let mut reader = YamlFileReader::new(File::open(file).unwrap()).unwrap(); 47 | 48 | // objects can be read in any order 49 | assert!(reader.read_state(10).is_ok()); 50 | assert!(reader.read_delta(10).is_ok()); 51 | assert!(reader.read_state(9).is_ok()); 52 | assert!(reader.read_metadata().is_ok()); 53 | 54 | // reading a non-existent object will return a NotFound error 55 | assert_eq!(Err(ReaderError::NotFound), reader.read_delta(9)); 56 | assert_eq!(Err(ReaderError::NotFound), reader.read_state(11)); 57 | 58 | // reading an already read object is OK 59 | assert!(reader.read_metadata().is_ok()); 60 | } 61 | 62 | #[tokio::test] 63 | async fn test_read_invalid_format() { 64 | // create an invalid file with no YAML separators 65 | let file = NamedTempFile::new("invalid.yaml").unwrap(); 66 | file.write_str("a\nb\nc\n").unwrap(); 67 | 68 | // initialize reader 69 | let mut reader = YamlFileReader::new(File::open(file).unwrap()).unwrap(); 70 | 71 | // reading any object will return a Format error 72 | assert!(matches!( 73 | reader.read_metadata(), 74 | Err(ReaderError::Format(_)) 75 | )); 76 | assert!(matches!(reader.read_state(0), Err(ReaderError::Format(_)))); 77 | } 78 | -------------------------------------------------------------------------------- /examples/src/fixture_generator/reader/yaml.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) Meta Platforms, Inc. and affiliates. 2 | // 3 | // This source code is dual-licensed under either the MIT license found in the 4 | // LICENSE-MIT file in the root directory of this source tree or the Apache 5 | // License, Version 2.0 found in the LICENSE-APACHE file in the root directory 6 | // of this source tree. You may select, at your option, one of the above-listed licenses. 7 | 8 | //! This module contains an implementor of the Reader trait for the YAML format. 9 | 10 | use std::fmt::Write as _; 11 | use std::fs::File; 12 | use std::io::{BufRead, BufReader, Lines, Seek}; 13 | use std::iter::Peekable; 14 | use std::result::Result; // import without risk of name clashing 15 | 16 | use serde::de::DeserializeOwned; 17 | 18 | use crate::fixture_generator::generator::{Delta, Metadata, State}; 19 | use crate::fixture_generator::reader::{Reader, ReaderError}; 20 | use crate::fixture_generator::YAML_SEPARATOR; 21 | 22 | impl From for ReaderError { 23 | fn from(error: std::io::Error) -> Self { 24 | ReaderError::Input(error.to_string()) 25 | } 26 | } 27 | 28 | /// YAML format file reader. 29 | pub struct YamlFileReader { 30 | file: File, 31 | index: u32, 32 | buffer: Peekable>>, 33 | } 34 | 35 | impl YamlFileReader { 36 | #[cfg(test)] 37 | pub fn new(file: File) -> Result { 38 | let index = 0; 39 | let buffer = Self::buffer(&file)?; 40 | 41 | Ok(Self { 42 | file, 43 | index, 44 | buffer, 45 | }) 46 | } 47 | 48 | // Instantiates a new buffer for a given file. 49 | fn buffer(file: &File) -> Result>>, ReaderError> { 50 | let mut file_ref_copy = file.try_clone()?; 51 | file_ref_copy.rewind()?; 52 | 53 | Ok(BufReader::new(file_ref_copy).lines().peekable()) 54 | } 55 | 56 | // Returns the next YAML "doc" in the file, looping back to the start of the 57 | // file if EOF is encountered. 58 | fn next_doc(&mut self) -> Result { 59 | // find start of doc 60 | loop { 61 | match self.buffer.peek() { 62 | Some(Ok(sep)) if sep.trim_end() == YAML_SEPARATOR => { 63 | self.buffer.next(); 64 | break; 65 | } 66 | Some(Ok(_)) => { 67 | self.buffer.next(); 68 | } 69 | None => { 70 | return Err(ReaderError::Format( 71 | "EOF encountered while looking for start of YAML doc".to_string(), 72 | )) 73 | } 74 | Some(Err(err)) => return Err(ReaderError::Input(err.to_string())), 75 | } 76 | } 77 | 78 | // collect lines until end of doc 79 | let mut doc = String::new(); 80 | loop { 81 | match self.buffer.peek() { 82 | Some(Ok(sep)) if sep.trim_end() == YAML_SEPARATOR => { 83 | self.index += 1; 84 | return Ok(doc); 85 | } 86 | Some(Ok(line)) => { 87 | // avoid the extra allocation call with a format! 88 | let _ = writeln!(doc, "{line}"); 89 | self.buffer.next(); 90 | } 91 | None => { 92 | // EOF encountered, reset buffer before returning 93 | self.index = 0; 94 | self.buffer = Self::buffer(&self.file)?; 95 | return Ok(doc); 96 | } 97 | Some(Err(err)) => return Err(ReaderError::Input(err.to_string())), 98 | } 99 | } 100 | } 101 | 102 | // Reads an object from the YAML file, utilizing validate_fun to validate 103 | // the object before returning it. 104 | fn read_impl bool>( 105 | &mut self, 106 | validate_fun: F, 107 | ) -> Result { 108 | let start = self.index; 109 | loop { 110 | if let Ok(object) = serde_yaml::from_str::(&self.next_doc()?) { 111 | if validate_fun(&object) { 112 | return Ok(object); 113 | } 114 | } 115 | // exit if all docs have been checked 116 | if self.index == start { 117 | return Err(ReaderError::NotFound); 118 | } 119 | } 120 | } 121 | } 122 | 123 | impl Reader for YamlFileReader { 124 | fn read_metadata(&mut self) -> Result { 125 | self.read_impl(|_: &Metadata| true) 126 | } 127 | 128 | fn read_state(&mut self, epoch: u32) -> Result { 129 | self.read_impl(|state: &State| state.epoch == epoch) 130 | } 131 | 132 | fn read_delta(&mut self, epoch: u32) -> Result { 133 | self.read_impl(|delta: &Delta| delta.epoch == epoch) 134 | } 135 | 136 | fn read_string(&mut self) -> Result { 137 | self.read_impl(|_: &String| true) 138 | } 139 | } 140 | -------------------------------------------------------------------------------- /examples/src/fixture_generator/writer/mod.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) Meta Platforms, Inc. and affiliates. 2 | // 3 | // This source code is dual-licensed under either the MIT license found in the 4 | // LICENSE-MIT file in the root directory of this source tree or the Apache 5 | // License, Version 2.0 found in the LICENSE-APACHE file in the root directory 6 | // of this source tree. You may select, at your option, one of the above-listed licenses. 7 | 8 | //! This module contains the Writer trait to serialize the tool's serde-compatible 9 | //! objects to a format, as well as implementations of the trait. 10 | 11 | use serde::Serialize; 12 | 13 | /// Interface for writing output generated by the tool. 14 | pub trait Writer { 15 | /// Writes a serde serializable object. 16 | fn write_object(&mut self, object: impl Serialize); 17 | 18 | /// Writes a comment that should be ignored by parsers. 19 | fn write_comment(&mut self, comment: &str); 20 | 21 | /// Writes a newline. 22 | fn write_line(&mut self); 23 | 24 | /// Flushes the internal buffer. 25 | fn flush(&mut self); 26 | } 27 | 28 | /// YAML implementor of Writer trait. 29 | pub(crate) mod yaml; 30 | -------------------------------------------------------------------------------- /examples/src/fixture_generator/writer/yaml.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) Meta Platforms, Inc. and affiliates. 2 | // 3 | // This source code is dual-licensed under either the MIT license found in the 4 | // LICENSE-MIT file in the root directory of this source tree or the Apache 5 | // License, Version 2.0 found in the LICENSE-APACHE file in the root directory 6 | // of this source tree. You may select, at your option, one of the above-listed licenses. 7 | 8 | //! This module contains an implementor of the Writer trait for the YAML format. 9 | 10 | use std::io::Write; 11 | 12 | use serde::Serialize; 13 | 14 | use crate::fixture_generator::{writer::Writer, YAML_SEPARATOR}; 15 | 16 | /// YAML format writer. 17 | pub(crate) struct YamlWriter { 18 | out: T, 19 | } 20 | 21 | impl YamlWriter { 22 | pub fn new(out: T) -> Self { 23 | Self { out } 24 | } 25 | } 26 | 27 | impl Writer for YamlWriter { 28 | fn write_object(&mut self, object: impl Serialize) { 29 | writeln!(self.out, "{}", YAML_SEPARATOR).unwrap(); 30 | serde_yaml::to_writer(&mut self.out, &object).unwrap(); 31 | } 32 | 33 | fn write_comment(&mut self, comment: &str) { 34 | let lines = comment.split('\n'); 35 | lines.for_each(|line| writeln!(self.out, "# {line}").unwrap()); 36 | } 37 | 38 | fn write_line(&mut self) { 39 | writeln!(self.out).unwrap() 40 | } 41 | 42 | fn flush(&mut self) { 43 | self.out.flush().unwrap(); 44 | } 45 | } 46 | -------------------------------------------------------------------------------- /examples/src/main.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) Meta Platforms, Inc. and affiliates. 2 | // 3 | // This source code is dual-licensed under either the MIT license found in the 4 | // LICENSE-MIT file in the root directory of this source tree or the Apache 5 | // License, Version 2.0 found in the LICENSE-APACHE file in the root directory 6 | // of this source tree. You may select, at your option, one of the above-listed licenses. 7 | 8 | //! A set of example applications and utilities for AKD 9 | 10 | mod fixture_generator; 11 | mod mysql_demo; 12 | mod test_vectors; 13 | mod wasm_client; 14 | mod whatsapp_kt_auditor; 15 | 16 | use anyhow::Result; 17 | use clap::{Parser, Subcommand}; 18 | 19 | /// AKD examples 20 | #[derive(Parser, Debug)] 21 | #[clap(author, about, long_about = None)] 22 | pub struct Arguments { 23 | /// The type of example to run 24 | #[clap(subcommand)] 25 | example: ExampleType, 26 | } 27 | 28 | #[derive(Subcommand, Debug, Clone)] 29 | enum ExampleType { 30 | /// WhatsApp Key Transparency Auditor 31 | WhatsappKtAuditor(whatsapp_kt_auditor::CliArgs), 32 | /// MySQL Demo 33 | MysqlDemo(mysql_demo::CliArgs), 34 | /// Fixture Generator 35 | FixtureGenerator(fixture_generator::Args), 36 | /// Test vectors generator 37 | TestVectors(test_vectors::Args), 38 | } 39 | 40 | // MAIN // 41 | #[tokio::main] 42 | async fn main() -> Result<()> { 43 | let args = Arguments::parse(); 44 | 45 | match args.example { 46 | ExampleType::WhatsappKtAuditor(args) => whatsapp_kt_auditor::render_cli(args).await?, 47 | ExampleType::MysqlDemo(args) => mysql_demo::render_cli(args).await?, 48 | ExampleType::FixtureGenerator(args) => fixture_generator::run(args).await, 49 | ExampleType::TestVectors(args) => test_vectors::run(args).await, 50 | } 51 | 52 | Ok(()) 53 | } 54 | 55 | // Test macros 56 | 57 | #[cfg(test)] 58 | #[macro_export] 59 | // NOTE(new_config): Add new configurations here 60 | macro_rules! test_config { 61 | ( $x:ident ) => { 62 | paste::paste! { 63 | #[tokio::test] 64 | async fn [<$x _ whatsapp_v1_config>]() { 65 | $x::().await 66 | } 67 | 68 | #[tokio::test] 69 | async fn [<$x _ experimental_config>]() { 70 | $x::>().await 71 | } 72 | } 73 | }; 74 | } 75 | 76 | #[cfg(test)] 77 | #[macro_export] 78 | // NOTE(new_config): Add new configurations here 79 | macro_rules! test_config_serial { 80 | ( $x:ident ) => { 81 | paste::paste! { 82 | #[serial_test::serial] 83 | #[tokio::test] 84 | async fn [<$x _ whatsapp_v1_config>]() { 85 | $x::().await 86 | } 87 | 88 | #[serial_test::serial] 89 | #[tokio::test] 90 | async fn [<$x _ experimental_config>]() { 91 | $x::>().await 92 | } 93 | } 94 | }; 95 | } 96 | -------------------------------------------------------------------------------- /examples/src/mysql_demo/commands.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) Meta Platforms, Inc. and affiliates. 2 | // 3 | // This source code is dual-licensed under either the MIT license found in the 4 | // LICENSE-MIT file in the root directory of this source tree or the Apache 5 | // License, Version 2.0 found in the LICENSE-APACHE file in the root directory 6 | // of this source tree. You may select, at your option, one of the above-listed licenses. 7 | 8 | use super::directory_host::DirectoryCommand; 9 | use colored::*; 10 | 11 | pub(crate) enum Command { 12 | Help, 13 | Exit, 14 | Flush, 15 | Info, 16 | Directory(DirectoryCommand), 17 | InvalidArgs(String), 18 | Unknown(String), 19 | } 20 | 21 | impl Command { 22 | pub(crate) fn parse(text: &mut String) -> Command { 23 | trim_newline(text); 24 | let parts: Vec<&str> = text.split(' ').collect(); 25 | 26 | let mut command = String::new(); 27 | if let Some(head) = parts.first() { 28 | command = String::from(*head); 29 | } 30 | 31 | match command.to_lowercase().as_ref() { 32 | "exit" | "x" => Command::Exit, 33 | "help" | "?" => Command::Help, 34 | "flush" => Command::Flush, 35 | "info" => Command::Info, 36 | cmd => Command::handle_dir_cmd(cmd, parts, text), 37 | } 38 | } 39 | 40 | pub(crate) fn print_help_menu() { 41 | println!( 42 | "{}", 43 | "*************************** Help menu ***************************".red() 44 | ); 45 | println!( 46 | "{} are commands, {} are mandatory args, {} are optional args", 47 | "green".green(), 48 | "blue".blue(), 49 | "magenta".magenta() 50 | ); 51 | println!("============================================================="); 52 | println!(" {}|{}:\t\t\tprint this menu", "help".green(), "?".green()); 53 | println!( 54 | " {}|{}:\t\t\texit the application", 55 | "exit".green(), 56 | "x".green() 57 | ); 58 | println!(" {}\t\t\t\tflush the database entries", "flush".green()); 59 | println!( 60 | " {}\t\t\t\tprints information about the running instance", 61 | "info".green() 62 | ); 63 | println!( 64 | " {} {} {}:\t\tpublish key material (value) for user", 65 | "publish".green(), 66 | "user".blue(), 67 | "value".blue() 68 | ); 69 | println!( 70 | " {} {}:\t\t\tlookup a proof for user", 71 | "lookup".green(), 72 | "user".blue() 73 | ); 74 | println!( 75 | " {} {}:\t\t\tlookup key history for user", 76 | "history".green(), 77 | "user".blue() 78 | ); 79 | println!( 80 | " {} {} {}:\t\tretrieve audit proof between start and end epochs", 81 | "audit".green(), 82 | "start".blue(), 83 | "end".blue() 84 | ); 85 | println!( 86 | " {}|{}\t\tretrieve the root hash at the latest epoch", 87 | "root".green(), 88 | "root_hash".green(), 89 | ); 90 | } 91 | 92 | // ==== Helpers for managing directory commands ==== // 93 | fn handle_dir_cmd(command: &str, parts: Vec<&str>, full_text: &str) -> Command { 94 | let dir_cmd: Option> = match command { 95 | "publish" => Some(Command::publish(parts)), 96 | "lookup" => Some(Command::lookup(parts)), 97 | "history" => Some(Command::history(parts)), 98 | "audit" => Some(Command::audit(parts)), 99 | "root" | "root_hash" => Some(Command::root_hash(parts)), 100 | _ => None, 101 | }; 102 | match dir_cmd { 103 | Some(Some(cmd)) => Command::Directory(cmd), 104 | Some(None) => { 105 | let msg = format!( 106 | "Command {} received invalid argments. Check {} for syntax", 107 | command, 108 | "help".green() 109 | ); 110 | Command::InvalidArgs(msg) 111 | } 112 | _ => Command::Unknown(String::from(full_text)), 113 | } 114 | } 115 | 116 | fn publish(parts: Vec<&str>) -> Option { 117 | if parts.len() < 3 { 118 | return None; 119 | } 120 | let (a, b) = (parts[1], parts[2]); 121 | let cmd = DirectoryCommand::Publish(String::from(a), String::from(b)); 122 | Some(cmd) 123 | } 124 | 125 | fn lookup(parts: Vec<&str>) -> Option { 126 | if parts.len() < 2 { 127 | return None; 128 | } 129 | let a = parts[1]; 130 | let cmd = DirectoryCommand::Lookup(String::from(a)); 131 | Some(cmd) 132 | } 133 | 134 | fn history(parts: Vec<&str>) -> Option { 135 | if parts.len() < 2 { 136 | return None; 137 | } 138 | let a = parts[1]; 139 | let cmd = DirectoryCommand::KeyHistory(String::from(a)); 140 | Some(cmd) 141 | } 142 | 143 | fn audit(parts: Vec<&str>) -> Option { 144 | if parts.len() < 3 { 145 | return None; 146 | } 147 | let (a, b) = (parts[1], parts[2]); 148 | match (a.parse::(), b.parse::()) { 149 | (Ok(u_a), Ok(u_b)) => { 150 | let cmd = DirectoryCommand::Audit(u_a, u_b); 151 | Some(cmd) 152 | } 153 | _ => None, 154 | } 155 | } 156 | 157 | fn root_hash(_parts: Vec<&str>) -> Option { 158 | let cmd = DirectoryCommand::RootHash; 159 | Some(cmd) 160 | } 161 | } 162 | 163 | fn trim_newline(s: &mut String) { 164 | if s.ends_with('\n') { 165 | s.pop(); 166 | if s.ends_with('\r') { 167 | s.pop(); 168 | } 169 | } 170 | } 171 | -------------------------------------------------------------------------------- /examples/src/mysql_demo/directory_host.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) Meta Platforms, Inc. and affiliates. 2 | // 3 | // This source code is dual-licensed under either the MIT license found in the 4 | // LICENSE-MIT file in the root directory of this source tree or the Apache 5 | // License, Version 2.0 found in the LICENSE-APACHE file in the root directory 6 | // of this source tree. You may select, at your option, one of the above-listed licenses. 7 | 8 | use akd::configuration::Configuration; 9 | use akd::ecvrf::VRFKeyStorage; 10 | use akd::storage::Database; 11 | use akd::HistoryParams; 12 | use akd::{AkdLabel, AkdValue}; 13 | use akd::{Directory, EpochHash}; 14 | use log::{error, info}; 15 | use tokio::sync::mpsc::*; 16 | use tokio::time::Instant; 17 | 18 | pub(crate) struct Rpc( 19 | pub(crate) DirectoryCommand, 20 | pub(crate) Option>>, 21 | ); 22 | 23 | #[derive(Debug)] 24 | pub enum DirectoryCommand { 25 | Publish(String, String), 26 | PublishBatch(Vec<(String, String)>), 27 | Lookup(String), 28 | KeyHistory(String), 29 | Audit(u64, u64), 30 | RootHash, 31 | Terminate, 32 | } 33 | 34 | pub(crate) async fn init_host(rx: &mut Receiver, directory: &mut Directory) 35 | where 36 | TC: Configuration, 37 | S: Database + 'static, 38 | V: VRFKeyStorage, 39 | { 40 | info!("Starting the verifiable directory host"); 41 | 42 | while let Some(Rpc(message, channel)) = rx.recv().await { 43 | match (message, channel) { 44 | (DirectoryCommand::Terminate, _) => { 45 | break; 46 | } 47 | (DirectoryCommand::Publish(a, b), Some(response)) => { 48 | let tic = Instant::now(); 49 | match directory 50 | .publish(vec![(AkdLabel::from(&a), AkdValue::from(&b))]) 51 | .await 52 | { 53 | Ok(EpochHash(epoch, hash)) => { 54 | let toc = Instant::now() - tic; 55 | let msg = format!( 56 | "PUBLISHED '{}' = '{}' in {} s (epoch: {}, root hash: {})", 57 | a, 58 | b, 59 | toc.as_secs_f64(), 60 | epoch, 61 | hex::encode(hash) 62 | ); 63 | response.send(Ok(msg)).unwrap() 64 | } 65 | Err(error) => { 66 | let msg = format!("Failed to publish with error: {error:?}"); 67 | response.send(Err(msg)).unwrap(); 68 | } 69 | } 70 | } 71 | (DirectoryCommand::PublishBatch(batches), Some(response)) => { 72 | let tic = Instant::now(); 73 | let len = batches.len(); 74 | match directory 75 | .publish( 76 | batches 77 | .into_iter() 78 | .map(|(key, value)| (AkdLabel::from(&key), AkdValue::from(&value))) 79 | .collect(), 80 | ) 81 | .await 82 | { 83 | Ok(_) => { 84 | let toc = Instant::now() - tic; 85 | let msg = format!("PUBLISHED {} records in {} s", len, toc.as_secs_f64()); 86 | response.send(Ok(msg)).unwrap() 87 | } 88 | Err(error) => { 89 | let msg = format!("Failed to publish with error: {error:?}"); 90 | response.send(Err(msg)).unwrap(); 91 | } 92 | } 93 | } 94 | (DirectoryCommand::Lookup(a), Some(response)) => { 95 | match directory.lookup(AkdLabel::from(&a)).await { 96 | Ok((proof, root_hash)) => { 97 | let vrf_pk = directory.get_public_key().await.unwrap(); 98 | let verification = akd::client::lookup_verify::( 99 | vrf_pk.as_bytes(), 100 | root_hash.hash(), 101 | root_hash.epoch(), 102 | AkdLabel::from(&a), 103 | proof, 104 | ); 105 | if verification.is_err() { 106 | let msg = format!("WARN: Lookup proof failed verification for '{a}'"); 107 | response.send(Err(msg)).unwrap(); 108 | } else { 109 | let msg = format!("Lookup proof verified for user '{a}'"); 110 | response.send(Ok(msg)).unwrap(); 111 | } 112 | } 113 | Err(error) => { 114 | let msg = format!("Failed to lookup with error {error:?}"); 115 | response.send(Err(msg)).unwrap(); 116 | } 117 | } 118 | } 119 | (DirectoryCommand::KeyHistory(a), Some(response)) => { 120 | match directory 121 | .key_history(&AkdLabel::from(&a), HistoryParams::default()) 122 | .await 123 | { 124 | Ok(_proof) => { 125 | let msg = format!("GOT KEY HISTORY FOR '{a}'"); 126 | response.send(Ok(msg)).unwrap(); 127 | } 128 | Err(error) => { 129 | let msg = format!("Failed to lookup with error {error:?}"); 130 | response.send(Err(msg)).unwrap(); 131 | } 132 | } 133 | } 134 | (DirectoryCommand::Audit(start, end), Some(response)) => { 135 | match directory.audit(start, end).await { 136 | Ok(_proof) => { 137 | let msg = format!("GOT AUDIT PROOF BETWEEN ({start}, {end})"); 138 | response.send(Ok(msg)).unwrap(); 139 | } 140 | Err(error) => { 141 | let msg = format!("Failed to get audit proof with error {error:?}"); 142 | response.send(Err(msg)).unwrap(); 143 | } 144 | } 145 | } 146 | (DirectoryCommand::RootHash, Some(response)) => { 147 | let hash = directory.get_epoch_hash().await; 148 | match hash { 149 | Ok(EpochHash(_, hash)) => { 150 | let msg = format!("Retrieved root hash {}", hex::encode(hash)); 151 | response.send(Ok(msg)).unwrap(); 152 | } 153 | Err(error) => { 154 | let msg = format!("Failed to retrieve root hash with error {error:?}"); 155 | response.send(Err(msg)).unwrap(); 156 | } 157 | } 158 | } 159 | (_, None) => { 160 | error!("A channel was not provided to the directory server to process a command!"); 161 | } 162 | } 163 | } 164 | 165 | info!("AKD host shutting down"); 166 | } 167 | -------------------------------------------------------------------------------- /examples/src/mysql_demo/logs.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) Meta Platforms, Inc. and affiliates. 2 | // 3 | // This source code is dual-licensed under either the MIT license found in the 4 | // LICENSE-MIT file in the root directory of this source tree or the Apache 5 | // License, Version 2.0 found in the LICENSE-APACHE file in the root directory 6 | // of this source tree. You may select, at your option, one of the above-listed licenses. 7 | 8 | extern crate thread_id; 9 | 10 | use colored::*; 11 | use log::{Level, Metadata, Record}; 12 | use once_cell::sync::OnceCell; 13 | use tokio::time::{Duration, Instant}; 14 | 15 | use std::fs::File; 16 | use std::io; 17 | use std::io::Write; 18 | use std::path::Path; 19 | use std::sync::Mutex; 20 | 21 | static EPOCH: OnceCell = OnceCell::new(); 22 | 23 | pub(crate) struct ConsoleLogger { 24 | pub(crate) level: Level, 25 | } 26 | 27 | impl ConsoleLogger { 28 | pub(crate) fn touch() { 29 | EPOCH.get_or_init(Instant::now); 30 | } 31 | 32 | pub(crate) fn format_log_record(io: &mut (dyn Write + Send), record: &Record, no_color: bool) { 33 | let target = { 34 | if let Some(target_str) = record.target().split(':').next_back() { 35 | if let Some(line) = record.line() { 36 | format!(" ({target_str}:{line})") 37 | } else { 38 | format!(" ({target_str})") 39 | } 40 | } else { 41 | "".to_string() 42 | } 43 | }; 44 | 45 | let toc = if let Some(epoch) = EPOCH.get() { 46 | Instant::now() - *epoch 47 | } else { 48 | Duration::from_millis(0) 49 | }; 50 | 51 | let seconds = toc.as_secs(); 52 | let hours = seconds / 3600; 53 | let minutes = (seconds / 60) % 60; 54 | let seconds = seconds % 60; 55 | let miliseconds = toc.subsec_millis(); 56 | 57 | let msg = format!( 58 | "[{:02}:{:02}:{:02}.{:03}] ({:x}) {:6} {}{}", 59 | hours, 60 | minutes, 61 | seconds, 62 | miliseconds, 63 | thread_id::get(), 64 | record.level(), 65 | record.args(), 66 | target 67 | ); 68 | if no_color { 69 | let _ = writeln!(io, "{msg}"); 70 | } else { 71 | let msg = match record.level() { 72 | Level::Trace | Level::Debug => msg.white(), 73 | Level::Info => msg.blue(), 74 | Level::Warn => msg.yellow(), 75 | Level::Error => msg.red(), 76 | }; 77 | let _ = writeln!(io, "{msg}"); 78 | } 79 | } 80 | } 81 | 82 | impl log::Log for ConsoleLogger { 83 | fn enabled(&self, metadata: &Metadata) -> bool { 84 | metadata.level() <= self.level 85 | } 86 | 87 | fn log(&self, record: &Record) { 88 | if !self.enabled(record.metadata()) { 89 | return; 90 | } 91 | let mut io = std::io::stdout(); 92 | ConsoleLogger::format_log_record(&mut io, record, false); 93 | } 94 | 95 | fn flush(&self) { 96 | let _ = std::io::stdout().flush(); 97 | } 98 | } 99 | 100 | pub(crate) struct FileLogger { 101 | sink: Mutex, 102 | } 103 | 104 | impl FileLogger { 105 | pub(crate) fn new>(path: T) -> io::Result { 106 | let file = File::create(path)?; 107 | Ok(Self { 108 | sink: Mutex::new(file), 109 | }) 110 | } 111 | } 112 | 113 | impl log::Log for FileLogger { 114 | fn enabled(&self, _metadata: &Metadata) -> bool { 115 | // use the global log-level 116 | true 117 | } 118 | 119 | fn log(&self, record: &Record) { 120 | if !self.enabled(record.metadata()) { 121 | return; 122 | } 123 | let mut sink = &*self.sink.lock().unwrap(); 124 | ConsoleLogger::format_log_record(&mut sink, record, true); 125 | } 126 | 127 | fn flush(&self) { 128 | let _ = std::io::stdout().flush(); 129 | } 130 | } 131 | -------------------------------------------------------------------------------- /examples/src/mysql_demo/tests/memory_tests.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) Meta Platforms, Inc. and affiliates. 2 | // 3 | // This source code is dual-licensed under either the MIT license found in the 4 | // LICENSE-MIT file in the root directory of this source tree or the Apache 5 | // License, Version 2.0 found in the LICENSE-APACHE file in the root directory 6 | // of this source tree. You may select, at your option, one of the above-listed licenses. 7 | 8 | use crate::{ 9 | mysql_demo::tests::test_util::{directory_test_suite, log_init}, 10 | test_config_serial, 11 | }; 12 | use akd::{ecvrf::HardCodedAkdVRF, storage::StorageManager, Configuration}; 13 | use log::info; 14 | 15 | type InMemoryDb = akd::storage::memory::AsyncInMemoryDatabase; 16 | 17 | test_config_serial!(test_directory_operations); 18 | async fn test_directory_operations() { 19 | log_init(log::Level::Info); 20 | 21 | info!("\n\n******** Starting In-Memory Directory Operations Integration Test ********\n\n"); 22 | 23 | let db = InMemoryDb::new(); 24 | 25 | let vrf = HardCodedAkdVRF {}; 26 | let storage_manager = StorageManager::new_no_cache(db); 27 | directory_test_suite::(&storage_manager, 500, &vrf).await; 28 | 29 | info!("\n\n******** Finished In-Memory Directory Operations Integration Test ********\n\n"); 30 | } 31 | 32 | test_config_serial!(test_directory_operations_with_caching); 33 | async fn test_directory_operations_with_caching() { 34 | log_init(log::Level::Info); 35 | 36 | info!("\n\n******** Starting In-Memory Directory Operations (w/caching) Integration Test ********\n\n"); 37 | 38 | let db = InMemoryDb::new(); 39 | 40 | let vrf = HardCodedAkdVRF {}; 41 | let storage_manager = StorageManager::new(db, None, None, None); 42 | directory_test_suite::(&storage_manager, 500, &vrf).await; 43 | 44 | info!("\n\n******** Finished In-Memory Directory Operations (w/caching) Integration Test ********\n\n"); 45 | } 46 | -------------------------------------------------------------------------------- /examples/src/mysql_demo/tests/mod.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) Meta Platforms, Inc. and affiliates. 2 | // 3 | // This source code is dual-licensed under either the MIT license found in the 4 | // LICENSE-MIT file in the root directory of this source tree or the Apache 5 | // License, Version 2.0 found in the LICENSE-APACHE file in the root directory 6 | // of this source tree. You may select, at your option, one of the above-listed licenses. 7 | 8 | mod memory_tests; 9 | mod mysql_db_tests; 10 | mod mysql_tests; 11 | mod test_util; 12 | -------------------------------------------------------------------------------- /examples/src/mysql_demo/tests/mysql_db_tests.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) Meta Platforms, Inc. and affiliates. 2 | // 3 | // This source code is dual-licensed under either the MIT license found in the 4 | // LICENSE-MIT file in the root directory of this source tree or the Apache 5 | // License, Version 2.0 found in the LICENSE-APACHE file in the root directory 6 | // of this source tree. You may select, at your option, one of the above-listed licenses. 7 | 8 | use super::test_util::log_init; 9 | use crate::mysql_demo::mysql::AsyncMySqlDatabase; 10 | 11 | // *** Tests *** // 12 | 13 | #[tokio::test] 14 | async fn test_mysql_db() { 15 | log_init(log::Level::Info); 16 | if AsyncMySqlDatabase::test_guard() { 17 | if let Err(error) = AsyncMySqlDatabase::create_test_db( 18 | "localhost", 19 | Option::from("root"), 20 | Option::from("example"), 21 | Option::from(8001), 22 | ) 23 | .await 24 | { 25 | panic!("Error creating test database: {}", error); 26 | } 27 | 28 | let mysql_db = AsyncMySqlDatabase::new( 29 | "localhost", 30 | "test_db", 31 | Option::from("root"), 32 | Option::from("example"), 33 | Option::from(8001), 34 | 200, 35 | ) 36 | .await 37 | .expect("Failed to create async mysql db"); 38 | 39 | if let Err(error) = mysql_db.delete_data().await { 40 | println!("Error cleaning mysql prior to test suite: {error}"); 41 | } 42 | 43 | // The test cases 44 | let manager = akd::storage::tests::run_test_cases_for_storage_impl(mysql_db.clone()).await; 45 | 46 | // clean the test infra 47 | if let Err(mysql_async::Error::Server(error)) = manager.get_db().drop_tables().await { 48 | println!("ERROR: Failed to clean MySQL test database with error {error}"); 49 | } 50 | } else { 51 | println!("WARN: Skipping MySQL test due to test guard noting that the docker container appears to not be running."); 52 | } 53 | } 54 | -------------------------------------------------------------------------------- /examples/src/mysql_demo/tests/mysql_tests.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) Meta Platforms, Inc. and affiliates. 2 | // 3 | // This source code is dual-licensed under either the MIT license found in the 4 | // LICENSE-MIT file in the root directory of this source tree or the Apache 5 | // License, Version 2.0 found in the LICENSE-APACHE file in the root directory 6 | // of this source tree. You may select, at your option, one of the above-listed licenses. 7 | 8 | use crate::mysql_demo::mysql::AsyncMySqlDatabase; 9 | use crate::mysql_demo::tests::test_util::{ 10 | directory_test_suite, log_init, test_lookups as test_lookups_util, 11 | }; 12 | use crate::test_config_serial; 13 | use akd::storage::StorageManager; 14 | use akd::{ecvrf::HardCodedAkdVRF, Configuration}; 15 | use log::{error, info, warn}; 16 | 17 | test_config_serial!(test_directory_operations); 18 | async fn test_directory_operations() { 19 | log_init(log::Level::Info); 20 | 21 | info!("\n\n******** Starting MySQL Directory Operations Integration Test ********\n\n"); 22 | 23 | if AsyncMySqlDatabase::test_guard() { 24 | // create the "test" database 25 | if let Err(error) = AsyncMySqlDatabase::create_test_db( 26 | "localhost", 27 | Option::from("root"), 28 | Option::from("example"), 29 | Option::from(8001), 30 | ) 31 | .await 32 | { 33 | panic!("Error creating test database: {}", error); 34 | } 35 | 36 | // connect to the newly created test db 37 | let mysql_db = AsyncMySqlDatabase::new( 38 | "localhost", 39 | "test_db", 40 | Option::from("root"), 41 | Option::from("example"), 42 | Option::from(8001), 43 | 200, 44 | ) 45 | .await 46 | .expect("Failed to create async mysql db"); 47 | 48 | // delete all data from the db 49 | if let Err(error) = mysql_db.delete_data().await { 50 | error!("Error cleaning mysql prior to test suite: {}", error); 51 | } 52 | 53 | let vrf = HardCodedAkdVRF {}; 54 | let storage_manager = StorageManager::new_no_cache(mysql_db.clone()); 55 | directory_test_suite::(&storage_manager, 50, &vrf).await; 56 | 57 | storage_manager.log_metrics().await; 58 | 59 | // clean the test infra 60 | if let Err(mysql_async::Error::Server(error)) = storage_manager.get_db().drop_tables().await 61 | { 62 | error!( 63 | "ERROR: Failed to clean MySQL test database with error {}", 64 | error 65 | ); 66 | } 67 | } else { 68 | warn!("WARN: Skipping MySQL test due to test guard noting that the docker container appears to not be running."); 69 | } 70 | 71 | info!("\n\n******** Completed MySQL Directory Operations Integration Test ********\n\n"); 72 | } 73 | 74 | test_config_serial!(test_directory_operations_with_caching); 75 | async fn test_directory_operations_with_caching() { 76 | log_init(log::Level::Info); 77 | 78 | info!("\n\n******** Starting MySQL Directory Operations (w/caching) Integration Test ********\n\n"); 79 | 80 | if AsyncMySqlDatabase::test_guard() { 81 | // create the "test" database 82 | if let Err(error) = AsyncMySqlDatabase::create_test_db( 83 | "localhost", 84 | Option::from("root"), 85 | Option::from("example"), 86 | Option::from(8001), 87 | ) 88 | .await 89 | { 90 | panic!("Error creating test database: {}", error); 91 | } 92 | 93 | // connect to the newly created test db 94 | let mysql_db = AsyncMySqlDatabase::new( 95 | "localhost", 96 | "test_db", 97 | Option::from("root"), 98 | Option::from("example"), 99 | Option::from(8001), 100 | 200, 101 | ) 102 | .await 103 | .expect("Failed to create async mysql db"); 104 | 105 | // delete all data from the db 106 | if let Err(error) = mysql_db.delete_data().await { 107 | error!("Error cleaning mysql prior to test suite: {}", error); 108 | } 109 | 110 | let vrf = HardCodedAkdVRF {}; 111 | let storage_manager = StorageManager::new(mysql_db.clone(), None, None, None); 112 | directory_test_suite::(&storage_manager, 50, &vrf).await; 113 | 114 | storage_manager.log_metrics().await; 115 | 116 | // clean the test infra 117 | if let Err(mysql_async::Error::Server(error)) = storage_manager.get_db().drop_tables().await 118 | { 119 | error!( 120 | "ERROR: Failed to clean MySQL test database with error {}", 121 | error 122 | ); 123 | } 124 | } else { 125 | warn!("WARN: Skipping MySQL test due to test guard noting that the docker container appears to not be running."); 126 | } 127 | 128 | info!("\n\n******** Completed MySQL Directory Operations (w/caching) Integration Test ********\n\n"); 129 | } 130 | 131 | test_config_serial!(test_lookups); 132 | async fn test_lookups() { 133 | log_init(log::Level::Info); 134 | 135 | info!("\n\n******** Starting MySQL Lookup Tests ********\n\n"); 136 | 137 | if AsyncMySqlDatabase::test_guard() { 138 | // create the "test" database 139 | if let Err(error) = AsyncMySqlDatabase::create_test_db( 140 | "localhost", 141 | Option::from("root"), 142 | Option::from("example"), 143 | Option::from(8001), 144 | ) 145 | .await 146 | { 147 | panic!("Error creating test database: {}", error); 148 | } 149 | 150 | // connect to the newly created test db 151 | let mysql_db = AsyncMySqlDatabase::new( 152 | "localhost", 153 | "test_db", 154 | Option::from("root"), 155 | Option::from("example"), 156 | Option::from(8001), 157 | 200, 158 | ) 159 | .await 160 | .expect("Failed to create async mysql db"); 161 | 162 | // delete all data from the db 163 | if let Err(error) = mysql_db.delete_data().await { 164 | error!("Error cleaning mysql prior to test suite: {}", error); 165 | } 166 | 167 | let vrf = HardCodedAkdVRF {}; 168 | let storage_manager = StorageManager::new(mysql_db, None, None, None); 169 | 170 | test_lookups_util::(&storage_manager, &vrf, 50, 5, 100).await; 171 | 172 | // clean the test infra 173 | if let Err(mysql_async::Error::Server(error)) = storage_manager.get_db().drop_tables().await 174 | { 175 | error!( 176 | "ERROR: Failed to clean MySQL test database with error {}", 177 | error 178 | ); 179 | } 180 | } else { 181 | warn!("WARN: Skipping MySQL test due to test guard noting that the docker container appears to not be running."); 182 | } 183 | 184 | info!("\n\n******** Completed MySQL Lookup Tests ********\n\n"); 185 | } 186 | -------------------------------------------------------------------------------- /examples/src/whatsapp_kt_auditor/auditor.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) Meta Platforms, Inc. and affiliates. 2 | // 3 | // This source code is dual-licensed under either the MIT license found in the 4 | // LICENSE-MIT file in the root directory of this source tree or the Apache 5 | // License, Version 2.0 found in the LICENSE-APACHE file in the root directory 6 | // of this source tree. You may select, at your option, one of the above-listed licenses. 7 | 8 | //! This module holds the auditor operations based on binary-encoded AuditProof blobs 9 | 10 | use super::EpochSummary; 11 | 12 | use anyhow::{anyhow, bail, Result}; 13 | use clap::{Parser, Subcommand}; 14 | use std::convert::TryFrom; 15 | use xml::reader::XmlEvent; 16 | use xml::EventReader; 17 | 18 | // Constant strings specific to the XML returned from the Cloudfront bucket query 19 | const KEY_STR: &str = "Key"; 20 | const IS_TRUNCATED_STR: &str = "IsTruncated"; 21 | const TRUE_STR: &str = "true"; 22 | 23 | /// Storage options for retrieving audit proofs 24 | #[derive(Subcommand, Clone, Debug)] 25 | pub enum AuditCommand { 26 | /// Audit a specific epoch 27 | Audit { 28 | /// The epoch to audit 29 | #[clap(long)] 30 | epoch: u64, 31 | }, 32 | /// Load the available epochs to audit 33 | LoadEpochs, 34 | } 35 | 36 | /// Audit operations supported by the client 37 | #[derive(Parser, Clone, Debug)] 38 | pub struct AuditArgs { 39 | #[clap(subcommand)] 40 | command: AuditCommand, 41 | } 42 | 43 | pub(crate) async fn audit_epoch(blob: akd::local_auditing::AuditBlob) -> Result { 44 | // decode the proof 45 | let (end_epoch, p_hash, c_hash, proof) = blob.decode().map_err(|err| anyhow!("{:?}", err))?; 46 | 47 | // verify it 48 | if let Err(akd_error) = akd::auditor::audit_verify::( 49 | vec![p_hash, c_hash], 50 | akd::AppendOnlyProof { 51 | proofs: vec![proof], 52 | epochs: vec![end_epoch - 1], // Note that the AppendOnlyProof struct expects epochs to begin with the starting epoch, not the ending epoch 53 | }, 54 | ) 55 | .await 56 | { 57 | bail!( 58 | "Audit proof for epoch {} failed to verify with error: {}", 59 | end_epoch, 60 | akd_error 61 | ) 62 | } else { 63 | // verification passed, generate the appropriate QR code 64 | Ok(format!( 65 | "Audit proof for epoch {} has verified successfully!", 66 | end_epoch 67 | )) 68 | } 69 | } 70 | 71 | pub(crate) fn display_audit_proofs_info(info: &mut [EpochSummary]) -> Result { 72 | info.sort_by(|a, b| a.name.epoch.cmp(&b.name.epoch)); 73 | if info.is_empty() { 74 | bail!("There are no epochs present in the storage repository"); 75 | } 76 | 77 | let min = info.first().unwrap().clone(); 78 | let max = info.last().unwrap().clone(); 79 | let (maybe_broken_epoch, is_contiguous) = 80 | info.iter() 81 | .skip(1) 82 | .fold((min.clone(), true), |(previous_item, cont), item| { 83 | if !cont { 84 | (previous_item, cont) 85 | } else { 86 | ( 87 | item.clone(), 88 | item.name.epoch == previous_item.name.epoch + 1, 89 | ) 90 | } 91 | }); 92 | 93 | if !is_contiguous { 94 | bail!("The audit proofs appear to not be contiguous. There's a break in the linear history at epoch {}", maybe_broken_epoch.name.epoch); 95 | } 96 | 97 | Ok(format!( 98 | "Loaded epochs between ({}) and ({}), inclusively.", 99 | min.name.epoch, max.name.epoch 100 | )) 101 | } 102 | 103 | pub(crate) async fn list_proofs(url: &str) -> Result> { 104 | let mut results = vec![]; 105 | let mut is_truncated = true; 106 | let mut start_after = "".to_string(); 107 | 108 | while is_truncated { 109 | let params: Vec<(String, String)> = if start_after == *"" { 110 | vec![("list-type".to_string(), "2".to_string())] 111 | } else { 112 | vec![ 113 | ("list-type".to_string(), "2".to_string()), 114 | ("start-after".to_string(), start_after.clone()), 115 | ] 116 | }; 117 | 118 | let (keys, truncated_result) = get_xml(url, ¶ms).await.unwrap(); 119 | is_truncated = truncated_result; 120 | if is_truncated { 121 | let last = keys[keys.len() - 1].clone(); 122 | start_after.clone_from(&last.key); 123 | } 124 | results.extend_from_slice(&keys); 125 | } 126 | 127 | Ok(results) 128 | } 129 | 130 | pub(crate) async fn get_proof( 131 | url: &str, 132 | epoch: &EpochSummary, 133 | ) -> Result { 134 | let url = format!("{}/{}", url, epoch.key); 135 | let resp = reqwest::get(url).await?.bytes().await?; 136 | let data = resp.to_vec(); 137 | 138 | Ok(akd::local_auditing::AuditBlob { 139 | data, 140 | name: epoch.name, 141 | }) 142 | } 143 | 144 | /// Returns the list of keys in the bucket, and whether or not there are more to fetch 145 | async fn get_xml(url: &str, params: &[(String, String)]) -> Result<(Vec, bool)> { 146 | let url = reqwest::Url::parse_with_params(url, params)?; 147 | let resp = reqwest::get(url).await?.text().await?; 148 | 149 | let mut results = vec![]; 150 | 151 | let mut is_truncated = false; 152 | let mut should_push = false; 153 | let mut should_check_truncated = false; 154 | 155 | let parser = EventReader::from_str(&resp); 156 | for event in parser { 157 | match event { 158 | Ok(XmlEvent::StartElement { name, .. }) => { 159 | if name.local_name == KEY_STR { 160 | should_push = true; 161 | } else if name.local_name == IS_TRUNCATED_STR { 162 | should_check_truncated = true; 163 | } 164 | } 165 | Ok(XmlEvent::Characters(text)) => { 166 | if should_push { 167 | results.push(EpochSummary::try_from(text.as_str())?); 168 | should_push = false; 169 | } else if should_check_truncated { 170 | is_truncated = text == TRUE_STR; 171 | should_check_truncated = false; 172 | } 173 | } 174 | Err(e) => println!("Error with parsing XML: {}", e), 175 | _ => (), 176 | } 177 | } 178 | 179 | Ok((results, is_truncated)) 180 | } 181 | -------------------------------------------------------------------------------- /examples/src/whatsapp_kt_auditor/mod.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) Meta Platforms, Inc. and affiliates. 2 | // 3 | // This source code is dual-licensed under either the MIT license found in the 4 | // LICENSE-MIT file in the root directory of this source tree or the Apache 5 | // License, Version 2.0 found in the LICENSE-APACHE file in the root directory 6 | // of this source tree. You may select, at your option, one of the above-listed licenses. 7 | 8 | //! A tool for verifying audit proofs published from WhatsApp's key transparency implementation 9 | 10 | mod auditor; 11 | 12 | use akd::local_auditing::AuditBlobName; 13 | use anyhow::{anyhow, bail, Result}; 14 | use clap::Parser; 15 | use dialoguer::theme::ColorfulTheme; 16 | use dialoguer::{Input, Select}; 17 | use indicatif::{ProgressBar, ProgressStyle}; 18 | use std::convert::TryFrom; 19 | use std::time::Duration; 20 | 21 | // Default domain for WhatsApp's key transparency audit proofs 22 | const WHATSAPP_KT_DOMAIN: &str = "https://d1tfr3x7n136ak.cloudfront.net"; 23 | type TC = akd::WhatsAppV1Configuration; 24 | 25 | /// Represents the summary of an epoch, and a unique key referring to the raw object in native storage (if needed) 26 | #[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Default)] 27 | pub struct EpochSummary { 28 | /// The name of the audit-blob decomposed into parts 29 | pub name: AuditBlobName, 30 | /// Unique idenfier for the blob in question 31 | pub key: String, 32 | } 33 | 34 | impl TryFrom<&str> for EpochSummary { 35 | type Error = anyhow::Error; 36 | 37 | fn try_from(potential_key: &str) -> Result { 38 | let name = AuditBlobName::try_from(potential_key).map_err(|err| anyhow!("{:?}", err))?; 39 | 40 | Ok(Self { 41 | name, 42 | key: potential_key.to_string(), 43 | }) 44 | } 45 | } 46 | 47 | #[derive(Parser, Debug, Clone)] 48 | pub(crate) struct CliArgs { 49 | /// Optional argument to indicate that only the latest epoch should be audited 50 | #[clap( 51 | long = "latest", 52 | short = 'l', 53 | name = "Audit only the latest epoch", 54 | default_value = "false" 55 | )] 56 | audit_latest: bool, 57 | } 58 | 59 | #[derive(Debug)] 60 | enum CliType { 61 | Audit, 62 | Quit, 63 | } 64 | 65 | struct CliOption { 66 | cli_type: CliType, 67 | text: String, 68 | } 69 | 70 | pub(crate) async fn render_cli(args: CliArgs) -> Result<()> { 71 | let pb = start_progress_bar("Loading epochs..."); 72 | let mut proofs = auditor::list_proofs(WHATSAPP_KT_DOMAIN).await?; 73 | finish_progress_bar(pb, auditor::display_audit_proofs_info(&mut proofs)?); 74 | 75 | if args.audit_latest { 76 | // Just audit the latest epoch and exit 77 | let latest_epoch_summary = proofs.last().expect("No epochs found"); 78 | do_epoch_audit(latest_epoch_summary).await?; 79 | return Ok(()); 80 | } 81 | 82 | let items: Vec = vec![ 83 | CliOption { 84 | cli_type: CliType::Audit, 85 | text: "Audit".to_string(), 86 | }, 87 | CliOption { 88 | cli_type: CliType::Quit, 89 | text: "Quit".to_string(), 90 | }, 91 | ]; 92 | 93 | loop { 94 | let selection = Select::with_theme(&ColorfulTheme::default()) 95 | .items( 96 | &items 97 | .iter() 98 | .map(|item| item.text.clone()) 99 | .collect::>(), 100 | ) 101 | .default(0) 102 | .interact_opt()?; 103 | 104 | match selection { 105 | Some(index) => match items[index].cli_type { 106 | CliType::Audit => { 107 | let epoch_input: String = Input::new() 108 | .with_prompt("Audit which epoch?".to_string()) 109 | .validate_with(|input: &String| -> Result<(), &str> { 110 | let int = input.parse::().map_err(|_| "Not a valid epoch")?; 111 | if 1 <= int && int <= proofs.len() { 112 | Ok(()) 113 | } else { 114 | Err("Epoch is out of available range") 115 | } 116 | }) 117 | .interact_text()?; 118 | let epoch = epoch_input.parse::()?; 119 | let maybe_proof = proofs.iter().find(|proof| proof.name.epoch == epoch); 120 | if let Some(epoch_summary) = maybe_proof { 121 | do_epoch_audit(epoch_summary).await?; 122 | } else { 123 | bail!("Could not find epoch {}", epoch); 124 | } 125 | } 126 | CliType::Quit => { 127 | break; 128 | } 129 | }, 130 | None => { 131 | break; 132 | } 133 | } 134 | } 135 | 136 | Ok(()) 137 | } 138 | 139 | pub(crate) async fn do_epoch_audit(epoch_summary: &EpochSummary) -> Result<()> { 140 | let pb1 = start_progress_bar("Downloading proof..."); 141 | let proof = auditor::get_proof(WHATSAPP_KT_DOMAIN, epoch_summary).await?; 142 | finish_progress_bar( 143 | pb1, 144 | format!( 145 | "Successfully downloaded proof for epoch {}.", 146 | epoch_summary.name.epoch 147 | ), 148 | ); 149 | 150 | let pb2 = start_progress_bar("Auditing..."); 151 | let result = auditor::audit_epoch(proof).await?; 152 | finish_progress_bar(pb2, result); 153 | 154 | Ok(()) 155 | } 156 | 157 | pub(crate) fn start_progress_bar(input_msg: &'static str) -> ProgressBar { 158 | let pb = ProgressBar::new_spinner(); 159 | pb.enable_steady_tick(Duration::from_millis(80)); 160 | pb.set_message(input_msg); 161 | let waiting_style = ProgressStyle::default_spinner() 162 | .template("[{elapsed_precise}] {spinner:.cyan/blue} {msg:.yellow}") 163 | .unwrap() 164 | .tick_strings(&[ 165 | "[ ]", "[= ]", "[== ]", "[=== ]", "[ ===]", "[ ==]", "[ =]", "[ ]", 166 | "[ =]", "[ ==]", "[ ===]", "[====]", "[=== ]", "[== ]", "[= ]", 167 | ]); 168 | 169 | pb.set_style(waiting_style); 170 | pb 171 | } 172 | 173 | pub(crate) fn finish_progress_bar(pb: ProgressBar, message: String) { 174 | let done_style = ProgressStyle::default_spinner() 175 | .template("[{elapsed_precise}] {msg:.bold.green}") 176 | .unwrap(); 177 | pb.set_style(done_style); 178 | pb.finish_with_message(message); 179 | } 180 | -------------------------------------------------------------------------------- /xtask/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "xtask" 3 | version = "0.1.0" 4 | license = "MIT OR Apache-2.0" 5 | edition = "2021" 6 | 7 | [dependencies] 8 | xtaskops = "0.4" 9 | anyhow = "1" 10 | -------------------------------------------------------------------------------- /xtask/README.md: -------------------------------------------------------------------------------- 1 | This package is included here to support the automatic reporting of code coverage on 2 | Github. 3 | 4 | ## Current code coverage 5 | 6 | [![codecov](https://codecov.io/gh/facebook/akd/branch/main/graph/badge.svg?token=VFE82QWLTK)](https://codecov.io/gh/facebook/akd) 7 | 8 | 9 | 10 | ## Viewing code coverage locally 11 | 12 | Do this once to set it up: 13 | ``` 14 | rustup component add llvm-tools-preview 15 | cargo install grcov 16 | ``` 17 | 18 | Subsequently, run: 19 | ``` 20 | cargo xtask coverage --dev 21 | ``` -------------------------------------------------------------------------------- /xtask/src/main.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) Meta Platforms, Inc. and affiliates. 2 | // 3 | // This source code is dual-licensed under either the MIT license found in the 4 | // LICENSE-MIT file in the root directory of this source tree or the Apache 5 | // License, Version 2.0 found in the LICENSE-APACHE file in the root directory 6 | // of this source tree. You may select, at your option, one of the above-listed licenses. 7 | 8 | fn main() -> Result<(), anyhow::Error> { 9 | xtaskops::tasks::main() 10 | } 11 | --------------------------------------------------------------------------------