├── .cargo
└── config.toml
├── Cargo.toml
├── .codecov.yml
├── .vscode
├── settings.json
└── launch.json
├── xtask
├── Cargo.toml
├── src
│ └── main.rs
└── README.md
├── .github
├── dependabot.yml
└── workflows
│ ├── coverage.yml
│ ├── publish.yml
│ ├── mysql.yml
│ ├── wait-for-crate-dependency.sh
│ └── ci.yml
├── .gitignore
├── docker-compose.yml
├── akd_core
├── src
│ ├── proto
│ │ └── specs
│ │ │ ├── mod.rs
│ │ │ ├── types.rs
│ │ │ └── types.proto
│ ├── hash
│ │ ├── tests.rs
│ │ └── mod.rs
│ ├── configuration
│ │ ├── mod.rs
│ │ ├── traits.rs
│ │ ├── experimental.rs
│ │ └── whatsapp_v1.rs
│ ├── build.rs
│ ├── verify
│ │ ├── lookup.rs
│ │ └── mod.rs
│ └── ecvrf
│ │ └── mod.rs
├── Cargo.toml
└── benches
│ └── parallel_vrfs.rs
├── examples
├── src
│ ├── mysql_demo
│ │ ├── tests
│ │ │ ├── mod.rs
│ │ │ ├── memory_tests.rs
│ │ │ ├── mysql_db_tests.rs
│ │ │ └── mysql_tests.rs
│ │ ├── logs.rs
│ │ ├── commands.rs
│ │ └── directory_host.rs
│ ├── fixture_generator
│ │ ├── examples
│ │ │ ├── mod.rs
│ │ │ └── example_tests.rs
│ │ ├── writer
│ │ │ ├── mod.rs
│ │ │ └── yaml.rs
│ │ ├── mod.rs
│ │ ├── reader
│ │ │ ├── mod.rs
│ │ │ ├── tests.rs
│ │ │ └── yaml.rs
│ │ ├── parser.rs
│ │ └── generator.rs
│ ├── main.rs
│ └── whatsapp_kt_auditor
│ │ ├── auditor.rs
│ │ └── mod.rs
├── Cargo.toml
└── README.md
├── akd
├── src
│ ├── client.rs
│ ├── helper_structs.rs
│ ├── storage
│ │ ├── cache
│ │ │ ├── mod.rs
│ │ │ └── tests.rs
│ │ └── mod.rs
│ ├── utils.rs
│ ├── tests
│ │ ├── test_preloads.rs
│ │ └── mod.rs
│ ├── test_utils.rs
│ └── auditor.rs
├── benches
│ ├── common.rs
│ ├── directory.rs
│ └── azks.rs
└── Cargo.toml
├── LICENSE-MIT
├── CONTRIBUTING.md
├── README.md
├── CODE_OF_CONDUCT.md
└── TESTING.md
/.cargo/config.toml:
--------------------------------------------------------------------------------
1 | [alias]
2 | xtask = "run --package xtask --"
3 |
--------------------------------------------------------------------------------
/Cargo.toml:
--------------------------------------------------------------------------------
1 | [workspace]
2 |
3 | members = ["akd", "akd_core", "examples", "xtask"]
4 | resolver = "2"
5 |
--------------------------------------------------------------------------------
/.codecov.yml:
--------------------------------------------------------------------------------
1 | codecov:
2 | require_ci_to_pass: false
3 |
4 | ignore:
5 | - "examples"
6 | - "xtask"
7 |
--------------------------------------------------------------------------------
/.vscode/settings.json:
--------------------------------------------------------------------------------
1 | {
2 | "rust-analyzer.cargo.unsetTest": [
3 | "core",
4 | "ed25519-dalek"
5 | ]
6 | }
--------------------------------------------------------------------------------
/xtask/Cargo.toml:
--------------------------------------------------------------------------------
1 | [package]
2 | name = "xtask"
3 | version = "0.1.0"
4 | license = "MIT OR Apache-2.0"
5 | edition = "2021"
6 |
7 | [dependencies]
8 | xtaskops = "0.4"
9 | anyhow = "1"
10 |
--------------------------------------------------------------------------------
/.github/dependabot.yml:
--------------------------------------------------------------------------------
1 | version: 2
2 |
3 | updates:
4 | - package-ecosystem: cargo
5 | directory: /
6 | schedule:
7 | interval: daily
8 |
9 | - package-ecosystem: github-actions
10 | directory: /
11 | schedule:
12 | interval: daily
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | .DS_Store
2 | .vs
3 |
4 | debug/
5 | coverage/
6 |
7 | .vscode/**
8 | !.vscode/launch.json
9 | !.vscode/settings.json
10 |
11 |
12 | **/*/.DS_Store
13 | Cargo.lock
14 | **/Cargo.lock
15 | **/*.rs.bk
16 |
17 | **/target
18 |
19 | **/*.log
20 | **/*.profraw
21 |
22 | .idea/**
23 | *.iml
24 |
--------------------------------------------------------------------------------
/docker-compose.yml:
--------------------------------------------------------------------------------
1 | # Use root/example as user/password credentials
2 | services:
3 | db:
4 | container_name: akd-test-db
5 | platform: linux/x86_64
6 | image: mysql:8.4
7 | command: --mysql-native-password=ON
8 | restart: unless-stopped
9 | ports:
10 | - "8001:3306"
11 | environment:
12 | MYSQL_ROOT_PASSWORD: example
13 | MYSQL_DATABASE: default
14 |
--------------------------------------------------------------------------------
/xtask/src/main.rs:
--------------------------------------------------------------------------------
1 | // Copyright (c) Meta Platforms, Inc. and affiliates.
2 | //
3 | // This source code is dual-licensed under either the MIT license found in the
4 | // LICENSE-MIT file in the root directory of this source tree or the Apache
5 | // License, Version 2.0 found in the LICENSE-APACHE file in the root directory
6 | // of this source tree. You may select, at your option, one of the above-listed licenses.
7 |
8 | fn main() -> Result<(), anyhow::Error> {
9 | xtaskops::tasks::main()
10 | }
11 |
--------------------------------------------------------------------------------
/akd_core/src/proto/specs/mod.rs:
--------------------------------------------------------------------------------
1 | // Copyright (c) Meta Platforms, Inc. and affiliates.
2 | //
3 | // This source code is dual-licensed under either the MIT license found in the
4 | // LICENSE-MIT file in the root directory of this source tree or the Apache
5 | // License, Version 2.0 found in the LICENSE-APACHE file in the root directory
6 | // of this source tree. You may select, at your option, one of the above-listed licenses.
7 |
8 | //! @generated code
9 |
10 | include!(concat!(env!("OUT_DIR"), "/protos/mod.rs"));
11 |
--------------------------------------------------------------------------------
/akd_core/src/proto/specs/types.rs:
--------------------------------------------------------------------------------
1 | // Copyright (c) Meta Platforms, Inc. and affiliates.
2 | //
3 | // This source code is dual-licensed under either the MIT license found in the
4 | // LICENSE-MIT file in the root directory of this source tree or the Apache
5 | // License, Version 2.0 found in the LICENSE-APACHE file in the root directory
6 | // of this source tree. You may select, at your option, one of the above-listed licenses.
7 |
8 | //! @generated code
9 |
10 | include!(concat!(env!("OUT_DIR"), "/protos/types.rs"));
11 |
--------------------------------------------------------------------------------
/examples/src/mysql_demo/tests/mod.rs:
--------------------------------------------------------------------------------
1 | // Copyright (c) Meta Platforms, Inc. and affiliates.
2 | //
3 | // This source code is dual-licensed under either the MIT license found in the
4 | // LICENSE-MIT file in the root directory of this source tree or the Apache
5 | // License, Version 2.0 found in the LICENSE-APACHE file in the root directory
6 | // of this source tree. You may select, at your option, one of the above-listed licenses.
7 |
8 | mod memory_tests;
9 | mod mysql_db_tests;
10 | mod mysql_tests;
11 | mod test_util;
12 |
--------------------------------------------------------------------------------
/akd/src/client.rs:
--------------------------------------------------------------------------------
1 | // Copyright (c) Meta Platforms, Inc. and affiliates.
2 | //
3 | // This source code is dual-licensed under either the MIT license found in the
4 | // LICENSE-MIT file in the root directory of this source tree or the Apache
5 | // License, Version 2.0 found in the LICENSE-APACHE file in the root directory
6 | // of this source tree. You may select, at your option, one of the above-listed licenses.
7 |
8 | //! Code for a client of a auditable key directory
9 |
10 | // Just re-export the verification calls here
11 | pub use akd_core::verify::*;
12 |
--------------------------------------------------------------------------------
/examples/src/fixture_generator/examples/mod.rs:
--------------------------------------------------------------------------------
1 | // Copyright (c) Meta Platforms, Inc. and affiliates.
2 | //
3 | // This source code is dual-licensed under either the MIT license found in the
4 | // LICENSE-MIT file in the root directory of this source tree or the Apache
5 | // License, Version 2.0 found in the LICENSE-APACHE file in the root directory
6 | // of this source tree. You may select, at your option, one of the above-listed licenses.
7 |
8 | //! This module contains examples of how fixtures can be read and used in tests.
9 |
10 | #[cfg(test)]
11 | mod example_tests;
12 |
--------------------------------------------------------------------------------
/xtask/README.md:
--------------------------------------------------------------------------------
1 | This package is included here to support the automatic reporting of code coverage on
2 | Github.
3 |
4 | ## Current code coverage
5 |
6 | [](https://codecov.io/gh/facebook/akd)
7 |
8 |
9 |
10 | ## Viewing code coverage locally
11 |
12 | Do this once to set it up:
13 | ```
14 | rustup component add llvm-tools-preview
15 | cargo install grcov
16 | ```
17 |
18 | Subsequently, run:
19 | ```
20 | cargo xtask coverage --dev
21 | ```
--------------------------------------------------------------------------------
/akd_core/src/hash/tests.rs:
--------------------------------------------------------------------------------
1 | // Copyright (c) Meta Platforms, Inc. and affiliates.
2 | //
3 | // This source code is dual-licensed under either the MIT license found in the
4 | // LICENSE-MIT file in the root directory of this source tree or the Apache
5 | // License, Version 2.0 found in the LICENSE-APACHE file in the root directory
6 | // of this source tree. You may select, at your option, one of the above-listed licenses.
7 |
8 | //! Tests for hashing
9 |
10 | use super::*;
11 |
12 | #[cfg(feature = "nostd")]
13 | use alloc::vec;
14 |
15 | #[test]
16 | fn test_try_parse_digest() {
17 | let mut data = EMPTY_DIGEST;
18 | let digest = try_parse_digest(&data).unwrap();
19 | assert_eq!(EMPTY_DIGEST, digest);
20 | data[0] = 1;
21 | let digest = try_parse_digest(&data).unwrap();
22 | assert_ne!(EMPTY_DIGEST, digest);
23 |
24 | let data_bad_length = vec![0u8; DIGEST_BYTES + 1];
25 | assert!(try_parse_digest(&data_bad_length).is_err());
26 | }
27 |
--------------------------------------------------------------------------------
/akd_core/src/configuration/mod.rs:
--------------------------------------------------------------------------------
1 | // Copyright (c) Meta Platforms, Inc. and affiliates.
2 | //
3 | // This source code is dual-licensed under either the MIT license found in the
4 | // LICENSE-MIT file in the root directory of this source tree or the Apache
5 | // License, Version 2.0 found in the LICENSE-APACHE file in the root directory
6 | // of this source tree. You may select, at your option, one of the above-listed licenses.
7 |
8 | //! Defines the configuration trait and implementations for various configurations
9 |
10 | mod traits;
11 | pub use traits::{Configuration, DomainLabel, ExampleLabel};
12 |
13 | #[cfg(feature = "public_tests")]
14 | pub use traits::NamedConfiguration;
15 |
16 | // Note(new_config): Update this when adding a new configuration
17 |
18 | #[cfg(feature = "whatsapp_v1")]
19 | pub(crate) mod whatsapp_v1;
20 | #[cfg(feature = "whatsapp_v1")]
21 | pub use whatsapp_v1::WhatsAppV1Configuration;
22 |
23 | #[cfg(feature = "experimental")]
24 | pub(crate) mod experimental;
25 | #[cfg(feature = "experimental")]
26 | pub use experimental::ExperimentalConfiguration;
27 |
--------------------------------------------------------------------------------
/LICENSE-MIT:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) Meta Platforms, Inc. and affiliates.
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/examples/src/fixture_generator/writer/mod.rs:
--------------------------------------------------------------------------------
1 | // Copyright (c) Meta Platforms, Inc. and affiliates.
2 | //
3 | // This source code is dual-licensed under either the MIT license found in the
4 | // LICENSE-MIT file in the root directory of this source tree or the Apache
5 | // License, Version 2.0 found in the LICENSE-APACHE file in the root directory
6 | // of this source tree. You may select, at your option, one of the above-listed licenses.
7 |
8 | //! This module contains the Writer trait to serialize the tool's serde-compatible
9 | //! objects to a format, as well as implementations of the trait.
10 |
11 | use serde::Serialize;
12 |
13 | /// Interface for writing output generated by the tool.
14 | pub trait Writer {
15 | /// Writes a serde serializable object.
16 | fn write_object(&mut self, object: impl Serialize);
17 |
18 | /// Writes a comment that should be ignored by parsers.
19 | fn write_comment(&mut self, comment: &str);
20 |
21 | /// Writes a newline.
22 | fn write_line(&mut self);
23 |
24 | /// Flushes the internal buffer.
25 | fn flush(&mut self);
26 | }
27 |
28 | /// YAML implementor of Writer trait.
29 | pub(crate) mod yaml;
30 |
--------------------------------------------------------------------------------
/examples/src/fixture_generator/mod.rs:
--------------------------------------------------------------------------------
1 | // Copyright (c) Meta Platforms, Inc. and affiliates.
2 | //
3 | // This source code is dual-licensed under either the MIT license found in the
4 | // LICENSE-MIT file in the root directory of this source tree or the Apache
5 | // License, Version 2.0 found in the LICENSE-APACHE file in the root directory
6 | // of this source tree. You may select, at your option, one of the above-listed licenses.
7 |
8 | //! A CLI tool for generating directory fixtures for debug and testing purposes.
9 | //! Run cargo run -p examples -- fixture-generator --help for options. Example command:
10 | //!
11 | //! cargo run -- fixture-generator \
12 | //! --user "User1: 1, (9, 'abc'), (10, 'def')" \
13 | //! --user "User2: 1, 2, 3, 4, 5, 6, 7, 8, 9, 10" \
14 | //! --epochs 10 \
15 | //! --max_updates 5 \
16 | //! --capture_states 9 10 \
17 | //! --capture_deltas 10
18 | //!
19 |
20 | mod examples;
21 | mod generator;
22 | mod parser;
23 | pub mod reader;
24 | pub mod writer;
25 |
26 | pub(crate) use parser::Args;
27 |
28 | /// Re-export generator run function.
29 | pub(crate) use generator::run;
30 |
31 | const YAML_SEPARATOR: &str = "---";
32 |
--------------------------------------------------------------------------------
/.github/workflows/coverage.yml:
--------------------------------------------------------------------------------
1 | name: Code Coverage
2 | on:
3 | push:
4 | branches:
5 | - main
6 | pull_request:
7 | types: [opened, reopened, synchronize]
8 |
9 | jobs:
10 | coverage:
11 | name: Coverage using xtask
12 | strategy:
13 | matrix:
14 | os: [ubuntu-latest]
15 | rust: [stable]
16 | runs-on: ${{ matrix.os }}
17 | steps:
18 | - name: Checkout sources
19 | uses: actions/checkout@v4
20 |
21 | - name: Install stable toolchain
22 | uses: actions-rs/toolchain@v1
23 | with:
24 | toolchain: ${{ matrix.rust }}
25 | override: true
26 | components: llvm-tools-preview
27 |
28 | - uses: Swatinem/rust-cache@v2
29 |
30 | - name: Download grcov
31 | run: |
32 | mkdir -p "${HOME}/.local/bin"
33 | curl -sL https://github.com/mozilla/grcov/releases/download/v0.8.10/grcov-x86_64-unknown-linux-gnu.tar.bz2 | tar jxf - -C "${HOME}/.local/bin"
34 | echo "$HOME/.local/bin" >> $GITHUB_PATH
35 | - name: Run xtask coverage
36 | uses: actions-rs/cargo@v1
37 | with:
38 | command: xtask
39 | args: coverage
40 |
41 |
42 | - name: Upload to codecov.io
43 | uses: codecov/codecov-action@v5
44 | with:
45 | files: coverage/*.lcov
--------------------------------------------------------------------------------
/.github/workflows/publish.yml:
--------------------------------------------------------------------------------
1 | name: Publish
2 |
3 | on:
4 | release:
5 | types: [published]
6 |
7 | jobs:
8 | publish:
9 | runs-on: ${{ matrix.os }}
10 | strategy:
11 | matrix:
12 | os: [ubuntu-latest]
13 | rust: [stable]
14 |
15 | steps:
16 | - uses: hecrj/setup-rust-action@v2
17 | with:
18 | rust-version: ${{ matrix.rust }}
19 |
20 | - uses: actions/checkout@main
21 |
22 | - name: Login to crates.io
23 | run: cargo login $CRATES_IO_TOKEN
24 | env:
25 | CRATES_IO_TOKEN: ${{ secrets.crates_io_token }}
26 |
27 | - name: Dry run publish akd_core
28 | run: cargo publish --dry-run --manifest-path Cargo.toml -p akd_core
29 |
30 | - name: Publish crate akd_core
31 | run: cargo publish --manifest-path Cargo.toml -p akd_core
32 | env:
33 | CARGO_REGISTRY_TOKEN: ${{ secrets.crates_io_token }}
34 |
35 | - name: Wait for necessary akd_core version to be available
36 | run: bash ./.github/workflows/wait-for-crate-dependency.sh akd akd_core
37 |
38 | - name: Dry run publish AKD
39 | run: cargo publish --dry-run --manifest-path Cargo.toml -p akd
40 |
41 | - name: Publish crate AKD
42 | run: cargo publish --manifest-path Cargo.toml -p akd
43 | env:
44 | CARGO_REGISTRY_TOKEN: ${{ secrets.crates_io_token }}
45 |
--------------------------------------------------------------------------------
/akd/src/helper_structs.rs:
--------------------------------------------------------------------------------
1 | // Copyright (c) Meta Platforms, Inc. and affiliates.
2 | //
3 | // This source code is dual-licensed under either the MIT license found in the
4 | // LICENSE-MIT file in the root directory of this source tree or the Apache
5 | // License, Version 2.0 found in the LICENSE-APACHE file in the root directory
6 | // of this source tree. You may select, at your option, one of the above-listed licenses.
7 |
8 | //! Helper structs that are used for various data structures,
9 | //! to make it easier to pass arguments around.
10 |
11 | use crate::Digest;
12 | use crate::{storage::types::ValueState, NodeLabel};
13 |
14 | /// Root hash of the tree and its associated epoch
15 | #[derive(Debug, Clone, Hash, PartialEq, Eq)]
16 | pub struct EpochHash(pub u64, pub Digest);
17 |
18 | impl EpochHash {
19 | /// Get the contained epoch
20 | pub fn epoch(&self) -> u64 {
21 | self.0
22 | }
23 | /// Get the contained hash
24 | pub fn hash(&self) -> Digest {
25 | self.1
26 | }
27 | }
28 |
29 | #[derive(Clone, Debug)]
30 | /// Info needed for a lookup of a user for an epoch
31 | pub struct LookupInfo {
32 | pub(crate) value_state: ValueState,
33 | pub(crate) marker_version: u64,
34 | pub(crate) existent_label: NodeLabel,
35 | pub(crate) marker_label: NodeLabel,
36 | pub(crate) non_existent_label: NodeLabel,
37 | }
38 |
--------------------------------------------------------------------------------
/examples/Cargo.toml:
--------------------------------------------------------------------------------
1 | [package]
2 | name = "examples"
3 | version = "0.12.0-pre.12"
4 | authors = ["akd contributors"]
5 | license = "MIT OR Apache-2.0"
6 | edition = "2021"
7 | publish = false
8 |
9 |
10 | [[bin]]
11 | name = "akd-examples"
12 | path = "src/main.rs"
13 | bench = false
14 | doc = false
15 |
16 | [features]
17 | # Collect runtime metrics on db access calls + timing
18 | runtime_metrics = []
19 |
20 | [dependencies]
21 | anyhow = "1"
22 | async-trait = "0.1"
23 | bytesize = "1"
24 | colored = "2"
25 | clap = { version = "4", features = ["derive"] }
26 | dialoguer = "0.11"
27 | hex = "0.4"
28 | indicatif = "0.17"
29 | log = { version = "0.4", features = ["kv_unstable"] }
30 | multi_log = "0.1"
31 | mysql_async = "0.32"
32 | mysql_common = "0.31"
33 | once_cell = "1"
34 | protobuf = "3"
35 | rand = "0.8"
36 | serde = { version = "1", features = ["derive"] }
37 | serde_json = "1"
38 | thread-id = "4"
39 | tokio = { version = "1", features = ["full"] }
40 | xml-rs = "0.8"
41 | reqwest = "0.11"
42 | regex = "1"
43 | serde_yaml = "0.9"
44 | wasm-bindgen = "0.2"
45 |
46 | akd = { path = "../akd", features = [
47 | "public_tests",
48 | "public_auditing",
49 | "whatsapp_v1",
50 | "experimental",
51 | ] }
52 | akd_core = { path = "../akd_core" }
53 |
54 | [dev-dependencies]
55 | serial_test = "2"
56 | assert_fs = "1"
57 | paste = "1"
58 | wasm-bindgen-test = "0.3"
59 |
--------------------------------------------------------------------------------
/akd/src/storage/cache/mod.rs:
--------------------------------------------------------------------------------
1 | // Copyright (c) Meta Platforms, Inc. and affiliates.
2 | //
3 | // This source code is dual-licensed under either the MIT license found in the
4 | // LICENSE-MIT file in the root directory of this source tree or the Apache
5 | // License, Version 2.0 found in the LICENSE-APACHE file in the root directory
6 | // of this source tree. You may select, at your option, one of the above-listed licenses.
7 |
8 | //! This module handles the caching implementation and testing for a time-based cache
9 | //! which supports memory pressure shedding
10 |
11 | use crate::storage::DbRecord;
12 | use std::time::Instant;
13 |
14 | #[cfg(test)]
15 | mod tests;
16 |
17 | /// items live for 30s by default
18 | pub(crate) const DEFAULT_ITEM_LIFETIME_MS: u64 = 30000;
19 | /// clean the cache every 15s by default
20 | pub(crate) const DEFAULT_CACHE_CLEAN_FREQUENCY_MS: u64 = 15000;
21 |
22 | pub(crate) struct CachedItem {
23 | pub(crate) expiration: Instant,
24 | pub(crate) data: DbRecord,
25 | }
26 |
27 | impl akd_core::SizeOf for CachedItem {
28 | fn size_of(&self) -> usize {
29 | // the size of an "Instant" varies based on the underlying implementation, so
30 | // we assume the largest which is 16 bytes on linux
31 | 16 + self.data.size_of()
32 | }
33 | }
34 |
35 | // -------- sub modules -------- //
36 |
37 | pub mod high_parallelism;
38 |
39 | // -------- cache exports -------- //
40 |
41 | pub use high_parallelism::TimedCache;
42 |
--------------------------------------------------------------------------------
/examples/src/fixture_generator/writer/yaml.rs:
--------------------------------------------------------------------------------
1 | // Copyright (c) Meta Platforms, Inc. and affiliates.
2 | //
3 | // This source code is dual-licensed under either the MIT license found in the
4 | // LICENSE-MIT file in the root directory of this source tree or the Apache
5 | // License, Version 2.0 found in the LICENSE-APACHE file in the root directory
6 | // of this source tree. You may select, at your option, one of the above-listed licenses.
7 |
8 | //! This module contains an implementor of the Writer trait for the YAML format.
9 |
10 | use std::io::Write;
11 |
12 | use serde::Serialize;
13 |
14 | use crate::fixture_generator::{writer::Writer, YAML_SEPARATOR};
15 |
16 | /// YAML format writer.
17 | pub(crate) struct YamlWriter {
18 | out: T,
19 | }
20 |
21 | impl YamlWriter {
22 | pub fn new(out: T) -> Self {
23 | Self { out }
24 | }
25 | }
26 |
27 | impl Writer for YamlWriter {
28 | fn write_object(&mut self, object: impl Serialize) {
29 | writeln!(self.out, "{YAML_SEPARATOR}").unwrap();
30 | serde_yaml::to_writer(&mut self.out, &object).unwrap();
31 | }
32 |
33 | fn write_comment(&mut self, comment: &str) {
34 | let lines = comment.split('\n');
35 | lines.for_each(|line| writeln!(self.out, "# {line}").unwrap());
36 | }
37 |
38 | fn write_line(&mut self) {
39 | writeln!(self.out).unwrap()
40 | }
41 |
42 | fn flush(&mut self) {
43 | self.out.flush().unwrap();
44 | }
45 | }
46 |
--------------------------------------------------------------------------------
/akd_core/src/hash/mod.rs:
--------------------------------------------------------------------------------
1 | // Copyright (c) Meta Platforms, Inc. and affiliates.
2 | //
3 | // This source code is dual-licensed under either the MIT license found in the
4 | // LICENSE-MIT file in the root directory of this source tree or the Apache
5 | // License, Version 2.0 found in the LICENSE-APACHE file in the root directory
6 | // of this source tree. You may select, at your option, one of the above-listed licenses.
7 |
8 | //! This module contains all the hashing utilities needed for the AKD directory
9 | //! and verification operations
10 |
11 | #[cfg(feature = "nostd")]
12 | use alloc::format;
13 | #[cfg(feature = "nostd")]
14 | use alloc::string::String;
15 |
16 | /// A hash digest of a specified number of bytes
17 | pub type Digest = [u8; DIGEST_BYTES];
18 | /// Represents an empty digest, with no data contained
19 | pub const EMPTY_DIGEST: [u8; DIGEST_BYTES] = [0u8; DIGEST_BYTES];
20 | /// The number of bytes in a digest
21 | pub const DIGEST_BYTES: usize = 32;
22 |
23 | #[cfg(test)]
24 | mod tests;
25 |
26 | /// Try and parse a digest from an unknown length of bytes. Helpful for converting a `Vec`
27 | /// to a [Digest]
28 | pub fn try_parse_digest(value: &[u8]) -> Result {
29 | if value.len() != DIGEST_BYTES {
30 | Err(format!(
31 | "Failed to parse Digest. Expected {} bytes but the value has {} bytes",
32 | DIGEST_BYTES,
33 | value.len()
34 | ))
35 | } else {
36 | let mut arr = EMPTY_DIGEST;
37 | arr.copy_from_slice(value);
38 | Ok(arr)
39 | }
40 | }
41 |
--------------------------------------------------------------------------------
/akd/benches/common.rs:
--------------------------------------------------------------------------------
1 | // Copyright (c) Meta Platforms, Inc. and affiliates.
2 | //
3 | // This source code is dual-licensed under either the MIT license found in the
4 | // LICENSE-MIT file in the root directory of this source tree or the Apache
5 | // License, Version 2.0 found in the LICENSE-APACHE file in the root directory
6 | // of this source tree. You may select, at your option, one of the above-listed licenses.
7 |
8 | #[macro_export]
9 | macro_rules! bench_config {
10 | ( $x:ident ) => {
11 | paste::paste! {
12 | // NOTE(new_config): Add a new configuration here
13 |
14 | #[cfg(feature = "whatsapp_v1")]
15 | fn [<$x _ whatsapp_v1_config>](c: &mut Criterion) {
16 | $x::(c)
17 | }
18 |
19 | #[cfg(feature = "experimental")]
20 | fn [<$x _ experimental_config>](c: &mut Criterion) {
21 | $x::>(c)
22 | }
23 | }
24 | };
25 | }
26 |
27 | #[macro_export]
28 | macro_rules! group_config {
29 | ( $( $group:path ),+ $(,)* ) => {
30 | paste::paste! {
31 | // NOTE(new_config): Add a new configuration here
32 |
33 | #[cfg(feature = "whatsapp_v1")]
34 | criterion_group!(
35 | $(
36 | [<$group _ whatsapp_v1_config>],
37 | )+
38 | );
39 |
40 | #[cfg(feature = "experimental")]
41 | criterion_group!(
42 | $(
43 | [<$group _ experimental_config>],
44 | )+
45 | );
46 | }
47 | };
48 | }
49 |
--------------------------------------------------------------------------------
/akd_core/src/build.rs:
--------------------------------------------------------------------------------
1 | // Copyright (c) Meta Platforms, Inc. and affiliates.
2 | //
3 | // This source code is dual-licensed under either the MIT license found in the
4 | // LICENSE-MIT file in the root directory of this source tree or the Apache
5 | // License, Version 2.0 found in the LICENSE-APACHE file in the root directory
6 | // of this source tree. You may select, at your option, one of the above-listed licenses.
7 |
8 | //! This is the pre-compilation build script for the crate `akd_core`. Mainly it's used to compile
9 | //! protobuf files into rust code prior to compilation.
10 |
11 | // NOTE: build.rs documentation = https://doc.rust-lang.org/cargo/reference/build-scripts.html
12 |
13 | /// The shared-path for all protobuf specifications
14 | const PROTOBUF_BASE_DIRECTORY: &str = "src/proto/specs";
15 | /// The list of protobuf files to generate inside PROBUF_BASE_DIRECTORY
16 | const PROTOBUF_FILES: [&str; 1] = ["types"];
17 | /// The output directory in the cargo build folder to emit the generated sources to
18 | const PROTOS_OUTPUT_DIR: &str = "protos";
19 |
20 | fn build_protobufs() {
21 | let mut protobuf_files = Vec::with_capacity(PROTOBUF_FILES.len());
22 |
23 | for file in PROTOBUF_FILES.iter() {
24 | let proto_file = format!("{PROTOBUF_BASE_DIRECTORY}/{file}.proto");
25 | println!("cargo:rerun-if-changed={proto_file}");
26 | protobuf_files.push(proto_file);
27 | }
28 |
29 | // Code generator writes to the output directory
30 | protobuf_codegen::Codegen::new()
31 | .pure()
32 | .includes([PROTOBUF_BASE_DIRECTORY])
33 | .inputs(&protobuf_files)
34 | .cargo_out_dir(PROTOS_OUTPUT_DIR)
35 | .run_from_script();
36 | }
37 |
38 | fn main() {
39 | // compile the spec files into Rust code
40 | build_protobufs();
41 | }
42 |
--------------------------------------------------------------------------------
/.github/workflows/mysql.yml:
--------------------------------------------------------------------------------
1 | name: MySQL and Integration Tests
2 | on:
3 | push:
4 | branches:
5 | - main
6 | pull_request:
7 | types: [opened, reopened, synchronize]
8 |
9 | jobs:
10 | run-tests:
11 | name: Run tests (Rust ${{matrix.toolchain}} on ${{matrix.os}})
12 | runs-on: ${{matrix.os}}-latest
13 | strategy:
14 | fail-fast: false
15 | matrix:
16 | toolchain: [stable]
17 | os: [ubuntu]
18 | steps:
19 | - uses: actions/checkout@main
20 |
21 | - name: Install rust
22 | uses: actions-rs/toolchain@v1
23 | with:
24 | toolchain: ${{matrix.toolchain}}
25 | override: true
26 |
27 | - name: Set up protoc
28 | uses: arduino/setup-protoc@v3.0.0
29 | with:
30 | repo-token: ${{ secrets.GITHUB_TOKEN }}
31 |
32 | - name: Cargo build
33 | uses: actions-rs/cargo@v1
34 | with:
35 | command: build
36 |
37 | - name: Build the docker-compose stack
38 | run: docker compose -f docker-compose.yml up -d
39 |
40 | - name: Check running containers
41 | run: docker ps -a
42 |
43 | - name: Verify MySQL db connection
44 | run: |
45 | while ! docker exec akd-test-db mysql --user=root --password=example -e "SHOW DATABASES" >/dev/null 2>&1; do
46 | sleep 1
47 | done
48 | echo "MySQL container is up"
49 |
50 | - name: Check container akd-test-db logs
51 | run: docker logs akd-test-db
52 |
53 | - name: Run MySQL tests and integration tests in examples package
54 | uses: actions-rs/cargo@v1
55 | with:
56 | command: test
57 | args: --manifest-path Cargo.toml -p examples
58 |
59 | - name: Cleanup docker container
60 | run: docker compose -f docker-compose.yml down -v
61 |
62 | - name: Copy integration test logs for review
63 | run: cat examples/integration_test.log
64 |
--------------------------------------------------------------------------------
/examples/src/mysql_demo/tests/memory_tests.rs:
--------------------------------------------------------------------------------
1 | // Copyright (c) Meta Platforms, Inc. and affiliates.
2 | //
3 | // This source code is dual-licensed under either the MIT license found in the
4 | // LICENSE-MIT file in the root directory of this source tree or the Apache
5 | // License, Version 2.0 found in the LICENSE-APACHE file in the root directory
6 | // of this source tree. You may select, at your option, one of the above-listed licenses.
7 |
8 | use crate::{
9 | mysql_demo::tests::test_util::{directory_test_suite, log_init},
10 | test_config_serial,
11 | };
12 | use akd::{ecvrf::HardCodedAkdVRF, storage::StorageManager, Configuration};
13 | use log::info;
14 |
15 | type InMemoryDb = akd::storage::memory::AsyncInMemoryDatabase;
16 |
17 | test_config_serial!(test_directory_operations);
18 | async fn test_directory_operations() {
19 | log_init(log::Level::Info);
20 |
21 | info!("\n\n******** Starting In-Memory Directory Operations Integration Test ********\n\n");
22 |
23 | let db = InMemoryDb::new();
24 |
25 | let vrf = HardCodedAkdVRF {};
26 | let storage_manager = StorageManager::new_no_cache(db);
27 | directory_test_suite::(&storage_manager, 500, &vrf).await;
28 |
29 | info!("\n\n******** Finished In-Memory Directory Operations Integration Test ********\n\n");
30 | }
31 |
32 | test_config_serial!(test_directory_operations_with_caching);
33 | async fn test_directory_operations_with_caching() {
34 | log_init(log::Level::Info);
35 |
36 | info!("\n\n******** Starting In-Memory Directory Operations (w/caching) Integration Test ********\n\n");
37 |
38 | let db = InMemoryDb::new();
39 |
40 | let vrf = HardCodedAkdVRF {};
41 | let storage_manager = StorageManager::new(db, None, None, None);
42 | directory_test_suite::(&storage_manager, 500, &vrf).await;
43 |
44 | info!("\n\n******** Finished In-Memory Directory Operations (w/caching) Integration Test ********\n\n");
45 | }
46 |
--------------------------------------------------------------------------------
/examples/src/mysql_demo/tests/mysql_db_tests.rs:
--------------------------------------------------------------------------------
1 | // Copyright (c) Meta Platforms, Inc. and affiliates.
2 | //
3 | // This source code is dual-licensed under either the MIT license found in the
4 | // LICENSE-MIT file in the root directory of this source tree or the Apache
5 | // License, Version 2.0 found in the LICENSE-APACHE file in the root directory
6 | // of this source tree. You may select, at your option, one of the above-listed licenses.
7 |
8 | use super::test_util::log_init;
9 | use crate::mysql_demo::mysql::AsyncMySqlDatabase;
10 |
11 | // *** Tests *** //
12 |
13 | #[tokio::test]
14 | async fn test_mysql_db() {
15 | log_init(log::Level::Info);
16 | if AsyncMySqlDatabase::test_guard() {
17 | if let Err(error) = AsyncMySqlDatabase::create_test_db(
18 | "localhost",
19 | Option::from("root"),
20 | Option::from("example"),
21 | Option::from(8001),
22 | )
23 | .await
24 | {
25 | panic!("Error creating test database: {error}");
26 | }
27 |
28 | let mysql_db = AsyncMySqlDatabase::new(
29 | "localhost",
30 | "test_db",
31 | Option::from("root"),
32 | Option::from("example"),
33 | Option::from(8001),
34 | 200,
35 | )
36 | .await
37 | .expect("Failed to create async mysql db");
38 |
39 | if let Err(error) = mysql_db.delete_data().await {
40 | println!("Error cleaning mysql prior to test suite: {error}");
41 | }
42 |
43 | // The test cases
44 | let manager = akd::storage::tests::run_test_cases_for_storage_impl(mysql_db.clone()).await;
45 |
46 | // clean the test infra
47 | if let Err(mysql_async::Error::Server(error)) = manager.get_db().drop_tables().await {
48 | println!("ERROR: Failed to clean MySQL test database with error {error}");
49 | }
50 | } else {
51 | println!("WARN: Skipping MySQL test due to test guard noting that the docker container appears to not be running.");
52 | }
53 | }
54 |
--------------------------------------------------------------------------------
/examples/src/fixture_generator/reader/mod.rs:
--------------------------------------------------------------------------------
1 | // Copyright (c) Meta Platforms, Inc. and affiliates.
2 | //
3 | // This source code is dual-licensed under either the MIT license found in the
4 | // LICENSE-MIT file in the root directory of this source tree or the Apache
5 | // License, Version 2.0 found in the LICENSE-APACHE file in the root directory
6 | // of this source tree. You may select, at your option, one of the above-listed licenses.
7 |
8 | //! This module contains the Reader trait to deserialize the tool's serde-compatible
9 | //! objects from a formatted file, as well as implementations of the trait.
10 |
11 | use std::result::Result;
12 |
13 | use crate::fixture_generator::generator::{Delta, Metadata, State};
14 |
15 | /// Interface for reading output generated by the tool.
16 | pub trait Reader {
17 | /// Reads a metadata object.
18 | #[allow(dead_code)]
19 | fn read_metadata(&mut self) -> Result;
20 |
21 | /// Reads a state object for a given epoch.
22 | #[allow(dead_code)]
23 | fn read_state(&mut self, epoch: u32) -> Result;
24 |
25 | /// Reads a delta object for a given epoch.
26 | #[allow(dead_code)]
27 | fn read_delta(&mut self, epoch: u32) -> Result;
28 |
29 | /// Reads a String (freeform).
30 | #[allow(dead_code)]
31 | fn read_string(&mut self) -> Result;
32 | }
33 |
34 | #[allow(dead_code)]
35 | #[derive(Debug, PartialEq, Eq)]
36 | pub enum ReaderError {
37 | NotFound,
38 | Format(String),
39 | Input(String),
40 | }
41 |
42 | impl std::error::Error for ReaderError {}
43 |
44 | impl std::fmt::Display for ReaderError {
45 | fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
46 | match self {
47 | ReaderError::NotFound => write!(f, "Object not found"),
48 | ReaderError::Format(message) => write!(f, "Unexpected format: {message}"),
49 | ReaderError::Input(message) => write!(f, "Input stream error: {message}"),
50 | }
51 | }
52 | }
53 |
54 | /// YAML implementor of Reader trait.
55 | pub mod yaml;
56 |
57 | #[cfg(test)]
58 | mod tests;
59 |
--------------------------------------------------------------------------------
/akd/src/utils.rs:
--------------------------------------------------------------------------------
1 | // Copyright (c) Meta Platforms, Inc. and affiliates.
2 | //
3 | // This source code is dual-licensed under either the MIT license found in the
4 | // LICENSE-MIT file in the root directory of this source tree or the Apache
5 | // License, Version 2.0 found in the LICENSE-APACHE file in the root directory
6 | // of this source tree. You may select, at your option, one of the above-listed licenses.
7 |
8 | // 1. Create a hashmap of all prefixes of all elements of the node set
9 | // 2. For each node in current_nodes set, check if each child is in prefix hashmap
10 | // 3. If so, add child label to batch set
11 |
12 | // Creates a byte array of 32 bytes from a u64
13 | // Note that this representation is big-endian, and
14 | // places the bits to the front of the output byte_array.
15 | #[cfg(any(test, feature = "public_tests"))]
16 | pub(crate) fn byte_arr_from_u64(input_int: u64) -> [u8; 32] {
17 | let mut output_arr = [0u8; 32];
18 | let input_arr = input_int.to_be_bytes();
19 | output_arr[..8].clone_from_slice(&input_arr[..8]);
20 | output_arr
21 | }
22 |
23 | #[allow(unused)]
24 | #[cfg(any(test, feature = "public_tests"))]
25 | pub(crate) fn random_label(rng: &mut impl rand::Rng) -> crate::NodeLabel {
26 | crate::NodeLabel {
27 | label_val: rng.gen::<[u8; 32]>(),
28 | label_len: 256,
29 | }
30 | }
31 |
32 | // NOTE(new_config): Add a new configuration here
33 |
34 | /// Macro used for running tests with different configurations
35 | #[cfg(any(test, feature = "public_tests"))]
36 | #[macro_export]
37 | macro_rules! test_config {
38 | ( $x:ident ) => {
39 | paste::paste! {
40 | #[cfg(feature = "whatsapp_v1")]
41 | #[tokio::test]
42 | async fn [<$x _ whatsapp_v1_config>]() -> Result<(), AkdError> {
43 | $x::<$crate::WhatsAppV1Configuration>().await
44 | }
45 |
46 | #[cfg(feature = "experimental")]
47 | #[tokio::test]
48 | async fn [<$x _ experimental_config>]() -> Result<(), AkdError> {
49 | $x::<$crate::ExperimentalConfiguration<$crate::ExampleLabel>>().await
50 | }
51 | }
52 | };
53 | }
54 |
--------------------------------------------------------------------------------
/akd_core/Cargo.toml:
--------------------------------------------------------------------------------
1 | [package]
2 | name = "akd_core"
3 | version = "0.12.0-pre.12"
4 | authors = ["akd contributors"]
5 | description = "Core utilities for the akd crate"
6 | license = "MIT OR Apache-2.0"
7 | edition = "2021"
8 | keywords = ["key-transparency", "akd"]
9 | repository = "https://github.com/facebook/akd"
10 | readme = "../README.md"
11 | # Uncomment for automated building of the protobuf Rust sources. Necessary if the .proto specs change
12 | build = "src/build.rs"
13 |
14 | [build-dependencies]
15 | protobuf-codegen = "3"
16 | protobuf-parse = "3"
17 |
18 | [features]
19 | # Disable all STD for the crate
20 | nostd = []
21 | # Supported configurations
22 | whatsapp_v1 = ["dep:blake3"]
23 | experimental = ["dep:blake3"]
24 | # Include the VRF verification logic
25 | vrf = ["ed25519-dalek", "curve25519-dalek"]
26 | serde_serialization = ["dep:serde", "dep:serde_bytes", "ed25519-dalek/serde"]
27 | # Parallelize VRF calculations during publish
28 | parallel_vrf = ["tokio"]
29 |
30 | bench = ["parallel_vrf", "experimental", "vrf", "tokio/rt-multi-thread"]
31 | public_tests = ["dep:paste"]
32 | protobuf = ["dep:protobuf"]
33 |
34 | # Default features mix
35 | default = ["vrf", "experimental"]
36 |
37 | [dependencies]
38 | ## Required dependencies ##
39 | async-trait = "0.1"
40 | curve25519-dalek = { version = "4", optional = true }
41 | ed25519-dalek = { version = "2", features = [
42 | "digest",
43 | "legacy_compatibility",
44 | ], optional = true }
45 | hex = "0.4"
46 | zeroize = "1"
47 |
48 | ## Optional dependencies ##
49 | blake3 = { version = "1", optional = true, default-features = false }
50 | protobuf = { version = "3", optional = true }
51 | rand = { version = "0.8", optional = true }
52 | serde = { version = "1", features = ["derive"], optional = true }
53 | serde_bytes = { version = "0.11", optional = true }
54 | tokio = { version = "1", features = ["rt"], optional = true }
55 | paste = { version = "1", optional = true }
56 |
57 | [dev-dependencies]
58 | bincode = "1"
59 | itertools = "0.13"
60 | proptest = "1"
61 | proptest-derive = "0.6"
62 | rand = "0.8"
63 | serde = { version = "1", features = ["derive"] }
64 | criterion = "0.5"
65 |
66 | # To enable the public-tests feature in tests
67 | akd_core = { path = ".", features = ["public_tests"] }
68 |
69 | [[bench]]
70 | name = "parallel_vrfs"
71 | harness = false
72 | required-features = ["bench"]
73 |
--------------------------------------------------------------------------------
/akd_core/src/verify/lookup.rs:
--------------------------------------------------------------------------------
1 | // Copyright (c) Meta Platforms, Inc. and affiliates.
2 | //
3 | // This source code is dual-licensed under either the MIT license found in the
4 | // LICENSE-MIT file in the root directory of this source tree or the Apache
5 | // License, Version 2.0 found in the LICENSE-APACHE file in the root directory
6 | // of this source tree. You may select, at your option, one of the above-listed licenses.
7 |
8 | //! Verification of lookup proofs
9 |
10 | use super::base::{verify_existence, verify_existence_with_val, verify_nonexistence};
11 | use super::VerificationError;
12 |
13 | use crate::configuration::Configuration;
14 | use crate::hash::Digest;
15 | use crate::{AkdLabel, LookupProof, VerifyResult, VersionFreshness};
16 |
17 | /// Verifies a lookup with respect to the root_hash
18 | pub fn lookup_verify(
19 | vrf_public_key: &[u8],
20 | root_hash: Digest,
21 | current_epoch: u64,
22 | akd_label: AkdLabel,
23 | proof: LookupProof,
24 | ) -> Result {
25 | if proof.version > current_epoch {
26 | return Err(VerificationError::LookupProof(alloc::format!(
27 | "Proof version {} is greater than current epoch {}",
28 | proof.version,
29 | current_epoch
30 | )));
31 | }
32 |
33 | verify_existence_with_val::(
34 | vrf_public_key,
35 | root_hash,
36 | &akd_label,
37 | &proof.value,
38 | proof.epoch,
39 | &proof.commitment_nonce,
40 | VersionFreshness::Fresh,
41 | proof.version,
42 | &proof.existence_vrf_proof,
43 | &proof.existence_proof,
44 | )?;
45 |
46 | let marker_version = 1 << crate::utils::get_marker_version_log2(proof.version);
47 | verify_existence::(
48 | vrf_public_key,
49 | root_hash,
50 | &akd_label,
51 | VersionFreshness::Fresh,
52 | marker_version,
53 | &proof.marker_vrf_proof,
54 | &proof.marker_proof,
55 | )?;
56 |
57 | verify_nonexistence::(
58 | vrf_public_key,
59 | root_hash,
60 | &akd_label,
61 | VersionFreshness::Stale,
62 | proof.version,
63 | &proof.freshness_vrf_proof,
64 | &proof.freshness_proof,
65 | )?;
66 |
67 | Ok(VerifyResult {
68 | epoch: proof.epoch,
69 | version: proof.version,
70 | value: proof.value,
71 | })
72 | }
73 |
--------------------------------------------------------------------------------
/examples/src/fixture_generator/examples/example_tests.rs:
--------------------------------------------------------------------------------
1 | // Copyright (c) Meta Platforms, Inc. and affiliates.
2 | //
3 | // This source code is dual-licensed under either the MIT license found in the
4 | // LICENSE-MIT file in the root directory of this source tree or the Apache
5 | // License, Version 2.0 found in the LICENSE-APACHE file in the root directory
6 | // of this source tree. You may select, at your option, one of the above-listed licenses.
7 |
8 | //! Example test utilizing a fixture file.
9 |
10 | use std::fs::File;
11 |
12 | use akd::{
13 | append_only_zks::AzksParallelismConfig,
14 | directory::Directory,
15 | ecvrf::HardCodedAkdVRF,
16 | storage::{memory::AsyncInMemoryDatabase, Database, StorageManager, StorageUtil},
17 | NamedConfiguration,
18 | };
19 |
20 | use crate::fixture_generator::reader::Reader;
21 | use crate::{fixture_generator::reader::yaml::YamlFileReader, test_config};
22 |
23 | // Contains two consecutive states and the delta between them
24 | const FILE_PATH: &str = "src/fixture_generator/examples";
25 |
26 | test_config!(test_use_fixture);
27 | async fn test_use_fixture() {
28 | // load fixture
29 | let mut reader =
30 | YamlFileReader::new(File::open(format!("{}/{}.yaml", FILE_PATH, TC::name())).unwrap())
31 | .unwrap();
32 | let metadata = reader.read_metadata().unwrap();
33 | let epochs = metadata.args.capture_states.unwrap();
34 |
35 | // prepare directory with initial state
36 | let initial_state = reader.read_state(epochs[0]).unwrap();
37 | let db = AsyncInMemoryDatabase::new();
38 | db.batch_set(initial_state.records, akd::storage::DbSetState::General)
39 | .await
40 | .unwrap();
41 | let vrf = HardCodedAkdVRF {};
42 | let storage_manager = StorageManager::new_no_cache(db);
43 | let akd = Directory::::new(
44 | storage_manager.clone(),
45 | vrf,
46 | AzksParallelismConfig::default(),
47 | )
48 | .await
49 | .unwrap();
50 |
51 | // publish delta updates
52 | let delta = reader.read_delta(epochs[1]).unwrap();
53 | akd.publish(delta.updates).await.unwrap();
54 |
55 | // assert final directory state
56 | let final_state = reader.read_state(epochs[1]).unwrap();
57 | let records = storage_manager
58 | .get_db()
59 | .batch_get_all_direct()
60 | .await
61 | .unwrap();
62 | assert_eq!(final_state.records.len(), records.len());
63 | assert!(records.iter().all(|r| final_state.records.contains(r)));
64 | }
65 |
--------------------------------------------------------------------------------
/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 | # Contributing to this library
2 | We want to make contributing to this project as easy and transparent as
3 | possible.
4 |
5 | ## Pull Requests
6 | We actively welcome your pull requests.
7 |
8 | 1. Fork the repo and create your branch from `main`.
9 | 2. If you've added code that should be tested, add tests.
10 | 3. If you've changed APIs, update the documentation.
11 | 4. Ensure the test suite passes.
12 | 5. If you haven't already, complete the Contributor License Agreement ("CLA").
13 |
14 | ### Special note regarding MySQL based tests
15 | We support MySQL directly within this repository. In order to utilize a MySQL database you may utilize the supplied [```docker-compose.yml```](docker-compose.yml) specification. It will create a basic database (named ```default```) and configure a container with the appropriate ports opened and mapped to the MySQL port. A valid [docker](https://www.docker.com/products/docker-desktop) instance is a dependency for this tool.
16 |
17 | You can instantiate the container with
18 | ```bash
19 | cd
20 |
21 | docker compose up [-d]
22 | ```
23 | where the ```-d``` flag indicates to background the process. If you want to run the container interactively, don't add this flag.
24 |
25 | When finished you can terminate the container you can terminate it with ```CTRL-C``` if you ran it interactively and ```docker compose down``` if you ran it with the ```-d``` flag.
26 |
27 | The MySQL connection info for this test container is
28 | ```
29 | MySQL port opened on local machine: 8001
30 | User: "root"
31 | Password: "example"
32 | Default database: "default"
33 | ```
34 |
35 | You can see an example configured connection in the code [here](akd_mysql/src/mysql_db_tests.rs), line 29.
36 |
37 | ## Contributor License Agreement ("CLA")
38 | In order to accept your pull request, we need you to submit a CLA. You only need
39 | to do this once to work on any of Facebook's open source projects.
40 |
41 | Complete your CLA here:
42 |
43 | ## Issues
44 | We use GitHub issues to track public bugs. Please ensure your description is
45 | clear and has sufficient instructions to be able to reproduce the issue.
46 |
47 | Facebook has a [bounty program](https://www.facebook.com/whitehat/) for the safe
48 | disclosure of security bugs. In those cases, please go through the process
49 | outlined on that page and do not file a public issue.
50 |
51 | ## License
52 |
53 | By contributing to akd, you agree that your contributions will be
54 | licensed under both the LICENSE-MIT and LICENSE-APACHE files in the root
55 | directory of this source tree.
--------------------------------------------------------------------------------
/examples/src/fixture_generator/reader/tests.rs:
--------------------------------------------------------------------------------
1 | // Copyright (c) Meta Platforms, Inc. and affiliates.
2 | //
3 | // This source code is dual-licensed under either the MIT license found in the
4 | // LICENSE-MIT file in the root directory of this source tree or the Apache
5 | // License, Version 2.0 found in the LICENSE-APACHE file in the root directory
6 | // of this source tree. You may select, at your option, one of the above-listed licenses.
7 |
8 | //! Tests basic reader behavior.
9 |
10 | use std::env;
11 | use std::fs::File;
12 |
13 | use akd::NamedConfiguration;
14 | use assert_fs::fixture::{FileWriteStr, NamedTempFile, TempDir};
15 | use clap::Parser;
16 |
17 | use crate::fixture_generator::generator;
18 | use crate::fixture_generator::parser::Args;
19 | use crate::fixture_generator::reader::yaml::YamlFileReader;
20 | use crate::fixture_generator::reader::{Reader, ReaderError};
21 | use crate::test_config;
22 |
23 | type L = akd::ExampleLabel;
24 |
25 | test_config!(test_read);
26 | async fn test_read() {
27 | // generate a temp fixture file
28 | let file = TempDir::new()
29 | .unwrap()
30 | .with_file_name(format!("{}.yaml", TC::name()));
31 | let args = Args::parse_from(vec![
32 | env!("CARGO_CRATE_NAME"),
33 | "--epochs",
34 | "10",
35 | "--capture_deltas",
36 | "10",
37 | "--capture_states",
38 | "9",
39 | "10",
40 | "--out",
41 | &format!("{}", file.parent().unwrap().display()),
42 | ]);
43 | generator::generate::(&args).await;
44 |
45 | // initialize reader
46 | let mut reader = YamlFileReader::new(File::open(file).unwrap()).unwrap();
47 |
48 | // objects can be read in any order
49 | assert!(reader.read_state(10).is_ok());
50 | assert!(reader.read_delta(10).is_ok());
51 | assert!(reader.read_state(9).is_ok());
52 | assert!(reader.read_metadata().is_ok());
53 |
54 | // reading a non-existent object will return a NotFound error
55 | assert_eq!(Err(ReaderError::NotFound), reader.read_delta(9));
56 | assert_eq!(Err(ReaderError::NotFound), reader.read_state(11));
57 |
58 | // reading an already read object is OK
59 | assert!(reader.read_metadata().is_ok());
60 | }
61 |
62 | #[tokio::test]
63 | async fn test_read_invalid_format() {
64 | // create an invalid file with no YAML separators
65 | let file = NamedTempFile::new("invalid.yaml").unwrap();
66 | file.write_str("a\nb\nc\n").unwrap();
67 |
68 | // initialize reader
69 | let mut reader = YamlFileReader::new(File::open(file).unwrap()).unwrap();
70 |
71 | // reading any object will return a Format error
72 | assert!(matches!(
73 | reader.read_metadata(),
74 | Err(ReaderError::Format(_))
75 | ));
76 | assert!(matches!(reader.read_state(0), Err(ReaderError::Format(_))));
77 | }
78 |
--------------------------------------------------------------------------------
/examples/src/main.rs:
--------------------------------------------------------------------------------
1 | // Copyright (c) Meta Platforms, Inc. and affiliates.
2 | //
3 | // This source code is dual-licensed under either the MIT license found in the
4 | // LICENSE-MIT file in the root directory of this source tree or the Apache
5 | // License, Version 2.0 found in the LICENSE-APACHE file in the root directory
6 | // of this source tree. You may select, at your option, one of the above-listed licenses.
7 |
8 | //! A set of example applications and utilities for AKD
9 |
10 | mod fixture_generator;
11 | mod mysql_demo;
12 | mod test_vectors;
13 | mod wasm_client;
14 | mod whatsapp_kt_auditor;
15 |
16 | use anyhow::Result;
17 | use clap::{Parser, Subcommand};
18 |
19 | /// AKD examples
20 | #[derive(Parser, Debug)]
21 | #[clap(author, about, long_about = None)]
22 | pub struct Arguments {
23 | /// The type of example to run
24 | #[clap(subcommand)]
25 | example: ExampleType,
26 | }
27 |
28 | #[derive(Subcommand, Debug, Clone)]
29 | enum ExampleType {
30 | /// WhatsApp Key Transparency Auditor
31 | WhatsappKtAuditor(whatsapp_kt_auditor::CliArgs),
32 | /// MySQL Demo
33 | MysqlDemo(mysql_demo::CliArgs),
34 | /// Fixture Generator
35 | FixtureGenerator(fixture_generator::Args),
36 | /// Test vectors generator
37 | TestVectors(test_vectors::Args),
38 | }
39 |
40 | // MAIN //
41 | #[tokio::main]
42 | async fn main() -> Result<()> {
43 | let args = Arguments::parse();
44 |
45 | match args.example {
46 | ExampleType::WhatsappKtAuditor(args) => whatsapp_kt_auditor::render_cli(args).await?,
47 | ExampleType::MysqlDemo(args) => mysql_demo::render_cli(args).await?,
48 | ExampleType::FixtureGenerator(args) => fixture_generator::run(args).await,
49 | ExampleType::TestVectors(args) => test_vectors::run(args).await,
50 | }
51 |
52 | Ok(())
53 | }
54 |
55 | // Test macros
56 |
57 | #[cfg(test)]
58 | #[macro_export]
59 | // NOTE(new_config): Add new configurations here
60 | macro_rules! test_config {
61 | ( $x:ident ) => {
62 | paste::paste! {
63 | #[tokio::test]
64 | async fn [<$x _ whatsapp_v1_config>]() {
65 | $x::().await
66 | }
67 |
68 | #[tokio::test]
69 | async fn [<$x _ experimental_config>]() {
70 | $x::>().await
71 | }
72 | }
73 | };
74 | }
75 |
76 | #[cfg(test)]
77 | #[macro_export]
78 | // NOTE(new_config): Add new configurations here
79 | macro_rules! test_config_serial {
80 | ( $x:ident ) => {
81 | paste::paste! {
82 | #[serial_test::serial]
83 | #[tokio::test]
84 | async fn [<$x _ whatsapp_v1_config>]() {
85 | $x::().await
86 | }
87 |
88 | #[serial_test::serial]
89 | #[tokio::test]
90 | async fn [<$x _ experimental_config>]() {
91 | $x::>().await
92 | }
93 | }
94 | };
95 | }
96 |
--------------------------------------------------------------------------------
/akd_core/src/verify/mod.rs:
--------------------------------------------------------------------------------
1 | // Copyright (c) Meta Platforms, Inc. and affiliates.
2 | //
3 | // This source code is dual-licensed under either the MIT license found in the
4 | // LICENSE-MIT file in the root directory of this source tree or the Apache
5 | // License, Version 2.0 found in the LICENSE-APACHE file in the root directory
6 | // of this source tree. You may select, at your option, one of the above-listed licenses.
7 |
8 | //! This module contains verification calls for different proofs contained in the AKD crate
9 |
10 | pub mod base;
11 | pub mod history;
12 | pub mod lookup;
13 |
14 | #[cfg(feature = "nostd")]
15 | use alloc::format;
16 | #[cfg(feature = "nostd")]
17 | use alloc::string::String;
18 | #[cfg(feature = "nostd")]
19 | use alloc::string::ToString;
20 |
21 | /// Proof verification error types
22 | #[derive(Debug, Eq, PartialEq)]
23 | pub enum VerificationError {
24 | /// Error verifying a membership proof
25 | MembershipProof(String),
26 | /// Error verifying a non-membership proof
27 | NonMembershipProof(String),
28 | /// Error verifying a lookup proof
29 | LookupProof(String),
30 | /// Error verifying a history proof
31 | HistoryProof(String),
32 | /// Error verifying a VRF proof
33 | #[cfg(feature = "vrf")]
34 | Vrf(crate::ecvrf::VrfError),
35 | /// Error converting protobuf types during verification
36 | #[cfg(feature = "protobuf")]
37 | Serialization(crate::proto::ConversionError),
38 | }
39 |
40 | impl core::fmt::Display for VerificationError {
41 | fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
42 | let code = match &self {
43 | VerificationError::MembershipProof(err) => format!("(Membership proof) - {err}"),
44 | VerificationError::NonMembershipProof(err) => {
45 | format!("(Non-membership proof) - {err}")
46 | }
47 | VerificationError::LookupProof(err) => format!("(Lookup proof) - {err}"),
48 | VerificationError::HistoryProof(err) => format!("(History proof) - {err}"),
49 | #[cfg(feature = "vrf")]
50 | VerificationError::Vrf(vrf) => vrf.to_string(),
51 | #[cfg(feature = "protobuf")]
52 | VerificationError::Serialization(proto) => proto.to_string(),
53 | };
54 | write!(f, "Verification error {code}")
55 | }
56 | }
57 |
58 | #[cfg(feature = "vrf")]
59 | impl From for VerificationError {
60 | fn from(input: crate::ecvrf::VrfError) -> Self {
61 | VerificationError::Vrf(input)
62 | }
63 | }
64 |
65 | #[cfg(feature = "protobuf")]
66 | impl From for VerificationError {
67 | fn from(input: crate::proto::ConversionError) -> Self {
68 | VerificationError::Serialization(input)
69 | }
70 | }
71 |
72 | #[cfg(feature = "protobuf")]
73 | impl From for VerificationError {
74 | fn from(input: protobuf::Error) -> Self {
75 | let conv: crate::proto::ConversionError = input.into();
76 | conv.into()
77 | }
78 | }
79 |
80 | // Re-export the necessary verification functions
81 |
82 | #[cfg(feature = "public_tests")]
83 | pub use base::{verify_membership_for_tests_only, verify_nonmembership_for_tests_only};
84 |
85 | pub use history::{key_history_verify, HistoryVerificationParams};
86 | pub use lookup::lookup_verify;
87 |
--------------------------------------------------------------------------------
/akd/Cargo.toml:
--------------------------------------------------------------------------------
1 | [package]
2 | name = "akd"
3 | version = "0.12.0-pre.12"
4 | authors = ["akd contributors"]
5 | description = "An implementation of an auditable key directory"
6 | license = "MIT OR Apache-2.0"
7 | edition = "2021"
8 | keywords = ["key-transparency", "akd"]
9 | repository = "https://github.com/facebook/akd"
10 | readme = "../README.md"
11 |
12 | [features]
13 | # Supported configurations
14 | whatsapp_v1 = ["akd_core/whatsapp_v1"]
15 | experimental = ["akd_core/experimental"]
16 |
17 | # Default features mix (experimental + audit-proof protobuf mgmt support)
18 | default = [
19 | "public_auditing",
20 | "parallel_vrf",
21 | "preload_history",
22 | "greedy_lookup_preload",
23 | "experimental",
24 | ]
25 |
26 | bench = ["experimental", "public_tests", "tokio/rt-multi-thread"]
27 | # Greedy loading of lookup proof nodes
28 | greedy_lookup_preload = []
29 | public_auditing = ["dep:protobuf", "akd_core/protobuf"]
30 | # Parallelize VRF calculations during publish
31 | parallel_vrf = ["akd_core/parallel_vrf"]
32 | # Enable pre-loading of the nodes when generating history proofs
33 | preload_history = []
34 | public_tests = [
35 | "rand",
36 | "dep:colored",
37 | "dep:once_cell",
38 | "serde_serialization",
39 | "akd_core/public_tests",
40 | "akd_core/rand",
41 | "dep:paste",
42 | ]
43 | rand = ["dep:rand"]
44 | # Collect runtime metrics on db access calls + timing
45 | runtime_metrics = []
46 | serde_serialization = ["dep:serde", "akd_core/serde_serialization"]
47 | # TESTING ONLY: Artifically slow the in-memory database (for benchmarking)
48 | slow_internal_db = []
49 | # Tracing instrumentation
50 | tracing = ["dep:tracing"]
51 | # Tracing-based instrumentation
52 | tracing_instrument = ["tracing/attributes"]
53 |
54 | [dependencies]
55 | ## Required dependencies ##
56 | akd_core = { version = "0.12.0-pre.12", path = "../akd_core", default-features = false, features = [
57 | "vrf",
58 | ] }
59 | async-recursion = "1"
60 | async-trait = "0.1"
61 | dashmap = "5"
62 | hex = "0.4"
63 | log = { version = "0.4", features = ["kv_unstable"] }
64 | tokio = { version = "1", features = ["sync", "time", "rt"] }
65 |
66 | ## Optional dependencies ##
67 | colored = { version = "2", optional = true }
68 | once_cell = { version = "1", optional = true }
69 | paste = { version = "1", optional = true }
70 | protobuf = { version = "3", optional = true }
71 | rand = { version = "0.8", optional = true }
72 | serde = { version = "1", features = ["derive"], optional = true }
73 | tracing = { version = "0.1.40", optional = true }
74 |
75 | [dev-dependencies]
76 | criterion = "0.5"
77 | serial_test = "2"
78 | proptest = "1"
79 | proptest-derive = "0.4"
80 | colored = "2"
81 | once_cell = "1"
82 | ctor = "0.2"
83 | tokio-test = "0.4"
84 | tokio = { version = "1", features = ["rt", "sync", "time", "macros"] }
85 | mockall = "0.11"
86 | futures = "0.3"
87 | itertools = "0.11"
88 |
89 | # To enable the public_tests feature in tests
90 | akd = { path = ".", features = [
91 | "public_tests",
92 | "whatsapp_v1",
93 | "experimental",
94 | ], default-features = false }
95 |
96 | [[bench]]
97 | name = "azks"
98 | harness = false
99 | required-features = ["bench"]
100 |
101 | [[bench]]
102 | name = "directory"
103 | harness = false
104 | required-features = ["bench"]
105 |
--------------------------------------------------------------------------------
/akd/src/tests/test_preloads.rs:
--------------------------------------------------------------------------------
1 | // Copyright (c) Meta Platforms, Inc. and affiliates.
2 | //
3 | // This source code is dual-licensed under either the MIT license found in the
4 | // LICENSE-MIT file in the root directory of this source tree or the Apache
5 | // License, Version 2.0 found in the LICENSE-APACHE file in the root directory
6 | // of this source tree. You may select, at your option, one of the above-listed licenses.
7 |
8 | //! Contains the tests for ensuring that preloading of nodes works as intended
9 |
10 | use akd_core::configuration::Configuration;
11 |
12 | use crate::{
13 | append_only_zks::AzksParallelismConfig,
14 | directory::Directory,
15 | ecvrf::HardCodedAkdVRF,
16 | errors::{AkdError, StorageError},
17 | storage::{manager::StorageManager, memory::AsyncInMemoryDatabase},
18 | test_config,
19 | tests::{setup_mocked_db, MockLocalDatabase},
20 | tree_node::TreeNodeWithPreviousValue,
21 | AkdLabel, AkdValue,
22 | };
23 |
24 | test_config!(test_publish_op_makes_no_get_requests);
25 | async fn test_publish_op_makes_no_get_requests() -> Result<(), AkdError> {
26 | let test_db = AsyncInMemoryDatabase::new();
27 |
28 | let mut db = MockLocalDatabase {
29 | ..Default::default()
30 | };
31 | setup_mocked_db(&mut db, &test_db);
32 |
33 | let storage = StorageManager::new_no_cache(db);
34 | let vrf = HardCodedAkdVRF {};
35 | let akd = Directory::::new(storage, vrf, AzksParallelismConfig::default())
36 | .await
37 | .expect("Failed to create directory");
38 |
39 | // Create a set with 2 updates, (label, value) pairs
40 | // ("hello10", "hello10")
41 | // ("hello11", "hello11")
42 | let mut updates = vec![];
43 | for i in 0..2 {
44 | updates.push((
45 | AkdLabel(format!("hello1{i}").as_bytes().to_vec()),
46 | AkdValue(format!("hello1{i}").as_bytes().to_vec()),
47 | ));
48 | }
49 | // Publish the updates. Now the akd's epoch will be 1.
50 | akd.publish(updates)
51 | .await
52 | .expect("Failed to do initial publish");
53 |
54 | // create a new mock, this time which explodes on any "get" of tree-nodes (shouldn't happen). It is still backed by the same
55 | // async in-mem db so all previous data should be there
56 | let mut db2 = MockLocalDatabase {
57 | ..Default::default()
58 | };
59 | setup_mocked_db(&mut db2, &test_db);
60 | db2.expect_get::()
61 | .returning(|_| Err(StorageError::Other("Boom!".to_string())));
62 |
63 | let storage = StorageManager::new_no_cache(db2);
64 | let vrf = HardCodedAkdVRF {};
65 | let akd = Directory::::new(storage, vrf, AzksParallelismConfig::default())
66 | .await
67 | .expect("Failed to create directory");
68 |
69 | // create more updates
70 | let mut updates = vec![];
71 | for i in 0..2 {
72 | updates.push((
73 | AkdLabel(format!("hello1{i}").as_bytes().to_vec()),
74 | AkdValue(format!("hello1{}", i + 1).as_bytes().to_vec()),
75 | ));
76 | }
77 |
78 | // try to publish again, this time with the "boom" returning from any mocked get-calls
79 | // on tree nodes
80 | akd.publish(updates)
81 | .await
82 | .expect("Failed to do subsequent publish");
83 |
84 | Ok(())
85 | }
86 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | ## akd 
2 |
3 | An implementation of an auditable key directory (also known as a verifiable registry or authenticated dictionary).
4 |
5 | Auditable key directories can be used to help provide key transparency for end-to-end encrypted
6 | messaging.
7 |
8 | This implementation is based off of the protocols described in
9 | [SEEMless](https://eprint.iacr.org/2018/607), with ideas incorporated from [Parakeet](https://eprint.iacr.org/2023/081).
10 |
11 | This library provides a stateless API for an auditable key directory, meaning that a consumer of this library must provide their own solution for the storage of the entries of the directory.
12 |
13 | Documentation
14 | -------------
15 |
16 | The API can be found [here](https://docs.rs/akd/) along with an example for usage. To learn more about the technical details
17 | behind how the directory is constructed, see [here](https://docs.rs/akd_core/).
18 |
19 | Installation
20 | ------------
21 |
22 | Add the following line to the dependencies of your `Cargo.toml`:
23 |
24 | ```
25 | akd = "0.12.0-pre.11"
26 | ```
27 |
28 | ### Minimum Supported Rust Version
29 |
30 | Rust **1.51** or higher.
31 |
32 | Top-Level Directory Organization
33 | --------------------------------
34 |
35 | | Subfolder | On crates.io? | Description |
36 | | :--- | :---: | :--- |
37 | | `akd` | ✓ | Main implementation of AKD which a service provider that manages the underlying directory would need to run. A good starting point for diving into this implementation. |
38 | | `akd_core` | ✓ | Minimal library consisting of core operations in AKD. |
39 | | `examples` | | Contains various examples for using AKD, along with utilities such as locally verifying audit proofs that are produced by WhatsApp's key transparency deployment. More details are contained [here](examples/README.md). |
40 | | `xtask` | | Used for running the code coverage pipeline. |
41 |
42 |
43 | Audit
44 | -----
45 |
46 | This library was audited by NCC Group in August of 2023. The audit was sponsored by Meta for its use in [WhatsApp's key transparency deployment](https://engineering.fb.com/2023/04/13/security/whatsapp-key-transparency/).
47 |
48 | The audit found issues in release `v0.9.0`, and the fixes were subsequently incorporated into release `v0.11.0`. See the [full audit report here](https://research.nccgroup.com/2023/11/14/public-report-whatsapp-auditable-key-directory-akd-implementation-review/).
49 |
50 | Contributors
51 | ------------
52 |
53 | The original authors of this code are
54 | Evan Au ([@afterdusk](https://github.com/afterdusk)),
55 | Alex Chernyak ([@alexme22](https://github.com/alexme22)),
56 | Dillon George ([@dillonrg](https://github.com/dillonrg)),
57 | Sean Lawlor ([@slawlor](https://github.com/slawlor)),
58 | Kevin Lewi ([@kevinlewi](https://github.com/kevinlewi)),
59 | Jasleen Malvai ([@jasleen1](https://github.com/jasleen1)), and
60 | Ercan Ozturk ([@eozturk1](https://github.com/eozturk1)).
61 | To learn more about contributing to this project, [see this document](https://github.com/facebook/akd/blob/main/CONTRIBUTING.md).
62 |
63 | License
64 | -------
65 |
66 | This project is dual-licensed under either the [MIT license](https://github.com/facebook/akd/blob/main/LICENSE-MIT)
67 | or the [Apache License, Version 2.0](https://github.com/facebook/akd/blob/main/LICENSE-APACHE).
68 | You may select, at your option, one of the above-listed licenses.
69 |
--------------------------------------------------------------------------------
/.github/workflows/wait-for-crate-dependency.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # Finds expected version of a crate in another crate's Cargo.toml file
3 | get_crate_expected_version_number()
4 | {
5 | local INDEX_CRATE=$1
6 | local TARGET_CRATE=$2
7 |
8 | local INDEX_TOML_FILE="$INDEX_CRATE/Cargo.toml"
9 | # Issue #174. The script is looking for multiple entries if the dependency is listed multiple times
10 | # Additionally this regex with grep works for both the notations
11 | # 1. crate = { some_other_options ... version = "x.y.z" ... other_options }
12 | # 2. crate = "x.y.z"
13 | # It also supports optional pre-release suffixes in the form of "-pre.x"
14 | # 3. crate = "w.x.y-pre.z"
15 | local EXPECTED_VERSION=$(grep "$TARGET_CRATE" $INDEX_TOML_FILE | grep -o '[0-9]\.[0-9\.]\+\(-pre\.[0-9]\+\)\?'| head -n 1)
16 | echo $EXPECTED_VERSION
17 | }
18 |
19 | # Get published versions of a crate from https://github.com/rust-lang/crates.io-index/
20 | get_crate_published_versions()
21 | {
22 | local CRATE_INDEX_URL=$1
23 |
24 | local PUBLISHED_VERSIONS=$(curl -sS "$CRATE_INDEX_URL" | jq .vers)
25 | echo "$PUBLISHED_VERSIONS"
26 | }
27 |
28 | # Retrieve the raw github url for a given crate based on the crate name following
29 | # crates.io's strange indexing strategy
30 | get_crate_raw_github_url() {
31 | local CRATE=$1
32 |
33 | local STR_LEN=$(echo "$CRATE" | wc -c)
34 | STR_LEN=$((STR_LEN - 1))
35 | if (($STR_LEN > 3)); then
36 | local FIRST_TWO=$(echo ${CRATE:0:2})
37 | local SECOND_TWO=$(echo ${CRATE:2:2})
38 | echo "https://raw.githubusercontent.com/rust-lang/crates.io-index/master/$FIRST_TWO/$SECOND_TWO/$CRATE"
39 | else
40 | local FIRST_ONE=$(echo ${CRATE:0:1})
41 | echo "https://raw.githubusercontent.com/rust-lang/crates.io-index/master/$STR_LEN/$FIRST_ONE/$CRATE"
42 | fi
43 | }
44 |
45 | # Wait for a specific crate version to be published to crates.io.
46 | # See https://github.com/novifinancial/akd/issues/116.
47 | # Must be run in the project root folder.
48 | INDEX_CRATE=$1
49 | TARGET_CRATE=$2
50 |
51 | if [ "$INDEX_CRATE" == "" ] || [ "$TARGET_CRATE" == "" ]
52 | then
53 | echo "Both the target crate and index crate are required arguments."
54 | echo "Usage:"
55 | echo "bash ./.github/workflows/wait-for-crate-dependency.sh INDEX_CRATE TARGET_CRATE"
56 | echo " - INDEX_CRATE : The crate which contains the dependency version specification"
57 | echo " - TARGET_CRATE : The crate which version needs to be published to build the INDEX_CRATE"
58 | exit 1
59 | fi
60 |
61 | EXPECTED_VERSION=$(get_crate_expected_version_number "$INDEX_CRATE" "$TARGET_CRATE" || exit 1)
62 | echo "Expecting $TARGET_CRATE = { version = $EXPECTED_VERSION } for $INDEX_CRATE"
63 | TARGET_URL=$(get_crate_raw_github_url "$TARGET_CRATE" || exit 1)
64 | echo "Target URL for $TARGET_CRATE is $TARGET_URL"
65 | WAIT_TIME=1
66 | while sleep $WAIT_TIME;
67 | do
68 | PUBLISHED_VERSIONS=$(get_crate_published_versions "$TARGET_URL" | tr '\n' " ")
69 | echo "Available $TARGET_CRATE versions: $PUBLISHED_VERSIONS"
70 | EXISTS=$(echo $PUBLISHED_VERSIONS | grep "\"$EXPECTED_VERSION\"")
71 | if [[ $EXISTS != "" ]]; then
72 | echo "Expected version of $TARGET_CRATE ($EXPECTED_VERSION) has been published"
73 | break
74 | fi
75 | echo "Expected version of $TARGET_CRATE ($EXPECTED_VERSION) is not yet published. Retrying after a wait"
76 | WAIT_TIME=$((WAIT_TIME+1))
77 | if [[ $WAIT_TIME == 42 ]]; then
78 | echo "Giving up after 42 wait periods"
79 | exit 1
80 | fi
81 | done
82 |
--------------------------------------------------------------------------------
/CODE_OF_CONDUCT.md:
--------------------------------------------------------------------------------
1 | # Code of Conduct
2 |
3 | ## Our Pledge
4 |
5 | In the interest of fostering an open and welcoming environment, we as
6 | contributors and maintainers pledge to make participation in our project and
7 | our community a harassment-free experience for everyone, regardless of age, body
8 | size, disability, ethnicity, sex characteristics, gender identity and expression,
9 | level of experience, education, socio-economic status, nationality, personal
10 | appearance, race, religion, or sexual identity and orientation.
11 |
12 | ## Our Standards
13 |
14 | Examples of behavior that contributes to creating a positive environment
15 | include:
16 |
17 | * Using welcoming and inclusive language
18 | * Being respectful of differing viewpoints and experiences
19 | * Gracefully accepting constructive criticism
20 | * Focusing on what is best for the community
21 | * Showing empathy towards other community members
22 |
23 | Examples of unacceptable behavior by participants include:
24 |
25 | * The use of sexualized language or imagery and unwelcome sexual attention or
26 | advances
27 | * Trolling, insulting/derogatory comments, and personal or political attacks
28 | * Public or private harassment
29 | * Publishing others' private information, such as a physical or electronic
30 | address, without explicit permission
31 | * Other conduct which could reasonably be considered inappropriate in a
32 | professional setting
33 |
34 | ## Our Responsibilities
35 |
36 | Project maintainers are responsible for clarifying the standards of acceptable
37 | behavior and are expected to take appropriate and fair corrective action in
38 | response to any instances of unacceptable behavior.
39 |
40 | Project maintainers have the right and responsibility to remove, edit, or
41 | reject comments, commits, code, wiki edits, issues, and other contributions
42 | that are not aligned to this Code of Conduct, or to ban temporarily or
43 | permanently any contributor for other behaviors that they deem inappropriate,
44 | threatening, offensive, or harmful.
45 |
46 | ## Scope
47 |
48 | This Code of Conduct applies within all project spaces, and it also applies when
49 | an individual is representing the project or its community in public spaces.
50 | Examples of representing a project or community include using an official
51 | project e-mail address, posting via an official social media account, or acting
52 | as an appointed representative at an online or offline event. Representation of
53 | a project may be further defined and clarified by project maintainers.
54 |
55 | ## Enforcement
56 |
57 | Instances of abusive, harassing, or otherwise unacceptable behavior may be
58 | reported by contacting the project team at . All
59 | complaints will be reviewed and investigated and will result in a response that
60 | is deemed necessary and appropriate to the circumstances. The project team is
61 | obligated to maintain confidentiality with regard to the reporter of an incident.
62 | Further details of specific enforcement policies may be posted separately.
63 |
64 | Project maintainers who do not follow or enforce the Code of Conduct in good
65 | faith may face temporary or permanent repercussions as determined by other
66 | members of the project's leadership.
67 |
68 | ## Attribution
69 |
70 | This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4,
71 | available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html
72 |
73 | [homepage]: https://www.contributor-covenant.org
74 |
75 | For answers to common questions about this code of conduct, see
76 | https://www.contributor-covenant.org/faq
77 |
--------------------------------------------------------------------------------
/akd_core/src/ecvrf/mod.rs:
--------------------------------------------------------------------------------
1 | // Copyright (c) Meta Platforms, Inc. and affiliates.
2 | //
3 | // This source code is dual-licensed under either the MIT license found in the
4 | // LICENSE-MIT file in the root directory of this source tree or the Apache
5 | // License, Version 2.0 found in the LICENSE-APACHE file in the root directory
6 | // of this source tree. You may select, at your option, one of the above-listed licenses.
7 |
8 | //! This module contains implementations of a
9 | //! [verifiable random function](https://en.wikipedia.org/wiki/Verifiable_random_function)
10 | //! (currently only ECVRF). VRFs are used, in the case of this crate, to anonymize the
11 | //! user id <-> node label mapping into a 1-way hash, which is verifyable without being
12 | //! regeneratable without the secret key.
13 | //!
14 | //! VRFs allow us to have the server generate a constant mapping from a user id to a node label
15 | //! but the client cannot themselves generate the mapping, only verify it. They can confirm
16 | //! a user id matches the label, but don't have the ability to determine the labels of other
17 | //! users in the directory.
18 | //!
19 | //! This module implements an instantiation of a verifiable random function known as
20 | //! [ECVRF-EDWARDS25519-SHA512-TAI from RFC9381](https://www.ietf.org/rfc/rfc9381.html).
21 | //!
22 | //!
23 | //! Adapted from Diem's NextGen Crypto module available [here](https://github.com/diem/diem/blob/502936fbd59e35276e2cf455532b143796d68a16/crypto/nextgen_crypto/src/vrf/ecvrf.rs)
24 |
25 | mod ecvrf_impl;
26 | mod traits;
27 | // export the functionality we want visible
28 | pub use crate::ecvrf::ecvrf_impl::{
29 | Output, Proof, VRFExpandedPrivateKey, VRFPrivateKey, VRFPublicKey,
30 | };
31 | pub use crate::ecvrf::traits::VRFKeyStorage;
32 | #[cfg(feature = "nostd")]
33 | use alloc::boxed::Box;
34 | #[cfg(feature = "nostd")]
35 | use alloc::format;
36 | #[cfg(feature = "nostd")]
37 | use alloc::string::String;
38 | #[cfg(feature = "nostd")]
39 | use alloc::string::ToString;
40 | #[cfg(feature = "nostd")]
41 | use alloc::vec::Vec;
42 |
43 | #[cfg(test)]
44 | mod tests;
45 |
46 | /// A error related to verifiable random functions
47 | #[derive(Debug, Eq, PartialEq)]
48 | pub enum VrfError {
49 | /// A problem retrieving or decoding the VRF public key
50 | PublicKey(String),
51 | /// A problem retrieving or decoding the VRF signing key
52 | SigningKey(String),
53 | /// A problem verifying the VRF proof
54 | Verification(String),
55 | }
56 |
57 | impl core::fmt::Display for VrfError {
58 | fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
59 | let code = match &self {
60 | VrfError::PublicKey(msg) => format!("(Public Key) - {msg}"),
61 | VrfError::SigningKey(msg) => format!("(Signing Key) - {msg}"),
62 | VrfError::Verification(msg) => format!("(Verification) - {msg}"),
63 | };
64 | write!(f, "Verifiable random function error {code}")
65 | }
66 | }
67 |
68 | /// This is a version of VRFKeyStorage for testing purposes, which uses the example from the VRF crate.
69 | ///
70 | /// const KEY_MATERIAL: &str = "c9afa9d845ba75166b5c215767b1d6934e50c3db36e89b127b8a622b120f6721";
71 | #[derive(Clone)]
72 | pub struct HardCodedAkdVRF;
73 |
74 | unsafe impl Sync for HardCodedAkdVRF {}
75 | unsafe impl Send for HardCodedAkdVRF {}
76 |
77 | #[async_trait::async_trait]
78 | impl VRFKeyStorage for HardCodedAkdVRF {
79 | async fn retrieve(&self) -> Result, VrfError> {
80 | hex::decode("c9afa9d845ba75166b5c215767b1d6934e50c3db36e89b127b8a622b120f6721")
81 | .map_err(|hex_err| VrfError::PublicKey(hex_err.to_string()))
82 | }
83 | }
84 |
--------------------------------------------------------------------------------
/akd/src/test_utils.rs:
--------------------------------------------------------------------------------
1 | // Copyright (c) Meta Platforms, Inc. and affiliates.
2 | //
3 | // This source code is dual-licensed under either the MIT license found in the
4 | // LICENSE-MIT file in the root directory of this source tree or the Apache
5 | // License, Version 2.0 found in the LICENSE-APACHE file in the root directory
6 | // of this source tree. You may select, at your option, one of the above-listed licenses.
7 |
8 | //! This module contains common test utilities for crates generating tests utilizing the
9 | //! AKD crate
10 |
11 | use colored::*;
12 | use log::{Level, Metadata, Record};
13 | use once_cell::sync::OnceCell;
14 | use std::sync::Once;
15 | use std::time::{Duration, Instant};
16 |
17 | static EPOCH: OnceCell = OnceCell::new();
18 | static LOGGER: TestConsoleLogger = TestConsoleLogger {};
19 | static INIT_ONCE: Once = Once::new();
20 |
21 | pub(crate) struct TestConsoleLogger;
22 |
23 | impl TestConsoleLogger {
24 | pub(crate) fn format_log_record(record: &Record) {
25 | let target = {
26 | if let Some(target_str) = record.target().split(':').next_back() {
27 | if let Some(line) = record.line() {
28 | format!(" ({target_str}:{line})")
29 | } else {
30 | format!(" ({target_str})")
31 | }
32 | } else {
33 | "".to_string()
34 | }
35 | };
36 |
37 | let toc = if let Some(epoch) = EPOCH.get() {
38 | Instant::now() - *epoch
39 | } else {
40 | Duration::from_millis(0)
41 | };
42 |
43 | let seconds = toc.as_secs();
44 | let hours = seconds / 3600;
45 | let minutes = (seconds / 60) % 60;
46 | let seconds = seconds % 60;
47 | let miliseconds = toc.subsec_millis();
48 |
49 | let msg = format!(
50 | "[{:02}:{:02}:{:02}.{:03}] {:6} {}{}",
51 | hours,
52 | minutes,
53 | seconds,
54 | miliseconds,
55 | record.level(),
56 | record.args(),
57 | target
58 | );
59 | let msg = match record.level() {
60 | Level::Trace | Level::Debug => msg.white(),
61 | Level::Info => msg.blue(),
62 | Level::Warn => msg.yellow(),
63 | Level::Error => msg.red(),
64 | };
65 | println!("{msg}");
66 | }
67 | }
68 |
69 | impl log::Log for TestConsoleLogger {
70 | fn enabled(&self, _metadata: &Metadata) -> bool {
71 | true
72 | }
73 |
74 | fn log(&self, record: &Record) {
75 | if !self.enabled(record.metadata()) {
76 | return;
77 | }
78 | TestConsoleLogger::format_log_record(record);
79 | }
80 |
81 | fn flush(&self) {}
82 | }
83 |
84 | /// Initialize the logger for console logging within test environments.
85 | /// This is safe to call multiple times, but it will only initialize the logger
86 | /// to the log-level _first_ set. If you want a specific log-level (e.g. Debug)
87 | /// for a specific test, make sure to only run that single test after editing that
88 | /// test's log-level.
89 | ///
90 | /// The default level applied everywhere is Info
91 | pub fn init_logger(level: Level) {
92 | EPOCH.get_or_init(Instant::now);
93 |
94 | INIT_ONCE.call_once(|| {
95 | log::set_logger(&LOGGER)
96 | .map(|()| log::set_max_level(level.to_level_filter()))
97 | .unwrap();
98 | });
99 | }
100 |
101 | /// Global test startup constructor. Only runs in the TEST profile. Each
102 | /// crate which wants logging enabled in tests being run should make this call
103 | /// itself.
104 | #[cfg(test)]
105 | #[ctor::ctor]
106 | fn test_start() {
107 | init_logger(Level::Info);
108 | }
109 |
--------------------------------------------------------------------------------
/akd/benches/directory.rs:
--------------------------------------------------------------------------------
1 | // Copyright (c) Meta Platforms, Inc. and affiliates.
2 | //
3 | // This source code is dual-licensed under either the MIT license found in the
4 | // LICENSE-MIT file in the root directory of this source tree or the Apache
5 | // License, Version 2.0 found in the LICENSE-APACHE file in the root directory
6 | // of this source tree. You may select, at your option, one of the above-listed licenses.
7 |
8 | #[macro_use]
9 | extern crate criterion;
10 |
11 | mod common;
12 |
13 | use akd::append_only_zks::AzksParallelismConfig;
14 | use akd::ecvrf::HardCodedAkdVRF;
15 | use akd::storage::manager::StorageManager;
16 | use akd::storage::memory::AsyncInMemoryDatabase;
17 | use akd::NamedConfiguration;
18 | use akd::{AkdLabel, AkdValue, Directory};
19 | use criterion::{BatchSize, Criterion};
20 | use rand::distributions::Alphanumeric;
21 | use rand::rngs::StdRng;
22 | use rand::{Rng, SeedableRng};
23 |
24 | bench_config!(history_generation);
25 | fn history_generation(c: &mut Criterion) {
26 | let num_users = 1000;
27 | let num_updates = 10;
28 | let runtime = tokio::runtime::Builder::new_multi_thread()
29 | .enable_time()
30 | .build()
31 | .unwrap();
32 |
33 | let idata = (1..num_users)
34 | .into_iter()
35 | .map(|i| {
36 | let user = format!("User {}", i);
37 | AkdLabel::from(&user)
38 | })
39 | .collect::>();
40 |
41 | let id = format!(
42 | "Benchmark key history proof generation on a small tree ({})",
43 | TC::name()
44 | );
45 |
46 | c.bench_function(&id, move |b| {
47 | b.iter_batched(
48 | || {
49 | let mut rng = StdRng::seed_from_u64(42);
50 | let database = AsyncInMemoryDatabase::new();
51 | let vrf = HardCodedAkdVRF {};
52 | let db = StorageManager::new(
53 | database,
54 | Some(std::time::Duration::from_secs(60)),
55 | None,
56 | Some(std::time::Duration::from_secs(60)),
57 | );
58 | let db_clone = db.clone();
59 | let directory = runtime
60 | .block_on(async move {
61 | Directory::::new(db, vrf, AzksParallelismConfig::default()).await
62 | })
63 | .unwrap();
64 |
65 | for _epoch in 1..num_updates {
66 | let value: String = (0..rng.gen_range(10..20))
67 | .map(|_| rng.sample(&Alphanumeric))
68 | .map(char::from)
69 | .collect();
70 | let data = idata
71 | .iter()
72 | .map(|k| (k.clone(), AkdValue::from(&value)))
73 | .collect::>();
74 | runtime.block_on(directory.publish(data)).unwrap();
75 | }
76 |
77 | (directory, db_clone)
78 | },
79 | |(directory, db)| {
80 | // flush the cache prior to each generation to get fresh results
81 | runtime.block_on(db.flush_cache());
82 |
83 | // generate for the most recent 10 updates
84 | let label = AkdLabel::from("User 1");
85 | let params = akd::HistoryParams::MostRecent(5);
86 | runtime
87 | .block_on(directory.key_history(&label, params))
88 | .unwrap();
89 | },
90 | BatchSize::PerIteration,
91 | );
92 | });
93 | }
94 |
95 | group_config!(directory_benches, history_generation);
96 |
97 | fn main() {
98 | // NOTE(new_config): Add a new configuration here
99 |
100 | #[cfg(feature = "whatsapp_v1")]
101 | directory_benches_whatsapp_v1_config();
102 | #[cfg(feature = "experimental")]
103 | directory_benches_experimental_config();
104 |
105 | Criterion::default().configure_from_args().final_summary();
106 | }
107 |
--------------------------------------------------------------------------------
/examples/src/mysql_demo/logs.rs:
--------------------------------------------------------------------------------
1 | // Copyright (c) Meta Platforms, Inc. and affiliates.
2 | //
3 | // This source code is dual-licensed under either the MIT license found in the
4 | // LICENSE-MIT file in the root directory of this source tree or the Apache
5 | // License, Version 2.0 found in the LICENSE-APACHE file in the root directory
6 | // of this source tree. You may select, at your option, one of the above-listed licenses.
7 |
8 | extern crate thread_id;
9 |
10 | use colored::*;
11 | use log::{Level, Metadata, Record};
12 | use once_cell::sync::OnceCell;
13 | use tokio::time::{Duration, Instant};
14 |
15 | use std::fs::File;
16 | use std::io;
17 | use std::io::Write;
18 | use std::path::Path;
19 | use std::sync::Mutex;
20 |
21 | static EPOCH: OnceCell = OnceCell::new();
22 |
23 | pub(crate) struct ConsoleLogger {
24 | pub(crate) level: Level,
25 | }
26 |
27 | impl ConsoleLogger {
28 | pub(crate) fn touch() {
29 | EPOCH.get_or_init(Instant::now);
30 | }
31 |
32 | pub(crate) fn format_log_record(io: &mut (dyn Write + Send), record: &Record, no_color: bool) {
33 | let target = {
34 | if let Some(target_str) = record.target().split(':').next_back() {
35 | if let Some(line) = record.line() {
36 | format!(" ({target_str}:{line})")
37 | } else {
38 | format!(" ({target_str})")
39 | }
40 | } else {
41 | "".to_string()
42 | }
43 | };
44 |
45 | let toc = if let Some(epoch) = EPOCH.get() {
46 | Instant::now() - *epoch
47 | } else {
48 | Duration::from_millis(0)
49 | };
50 |
51 | let seconds = toc.as_secs();
52 | let hours = seconds / 3600;
53 | let minutes = (seconds / 60) % 60;
54 | let seconds = seconds % 60;
55 | let miliseconds = toc.subsec_millis();
56 |
57 | let msg = format!(
58 | "[{:02}:{:02}:{:02}.{:03}] ({:x}) {:6} {}{}",
59 | hours,
60 | minutes,
61 | seconds,
62 | miliseconds,
63 | thread_id::get(),
64 | record.level(),
65 | record.args(),
66 | target
67 | );
68 | if no_color {
69 | let _ = writeln!(io, "{msg}");
70 | } else {
71 | let msg = match record.level() {
72 | Level::Trace | Level::Debug => msg.white(),
73 | Level::Info => msg.blue(),
74 | Level::Warn => msg.yellow(),
75 | Level::Error => msg.red(),
76 | };
77 | let _ = writeln!(io, "{msg}");
78 | }
79 | }
80 | }
81 |
82 | impl log::Log for ConsoleLogger {
83 | fn enabled(&self, metadata: &Metadata) -> bool {
84 | metadata.level() <= self.level
85 | }
86 |
87 | fn log(&self, record: &Record) {
88 | if !self.enabled(record.metadata()) {
89 | return;
90 | }
91 | let mut io = std::io::stdout();
92 | ConsoleLogger::format_log_record(&mut io, record, false);
93 | }
94 |
95 | fn flush(&self) {
96 | let _ = std::io::stdout().flush();
97 | }
98 | }
99 |
100 | pub(crate) struct FileLogger {
101 | sink: Mutex,
102 | }
103 |
104 | impl FileLogger {
105 | pub(crate) fn new>(path: T) -> io::Result {
106 | let file = File::create(path)?;
107 | Ok(Self {
108 | sink: Mutex::new(file),
109 | })
110 | }
111 | }
112 |
113 | impl log::Log for FileLogger {
114 | fn enabled(&self, _metadata: &Metadata) -> bool {
115 | // use the global log-level
116 | true
117 | }
118 |
119 | fn log(&self, record: &Record) {
120 | if !self.enabled(record.metadata()) {
121 | return;
122 | }
123 | let mut sink = &*self.sink.lock().unwrap();
124 | ConsoleLogger::format_log_record(&mut sink, record, true);
125 | }
126 |
127 | fn flush(&self) {
128 | let _ = std::io::stdout().flush();
129 | }
130 | }
131 |
--------------------------------------------------------------------------------
/.github/workflows/ci.yml:
--------------------------------------------------------------------------------
1 | name: CI
2 | on:
3 | push:
4 | branches:
5 | - main
6 | pull_request:
7 | types: [opened, reopened, synchronize]
8 |
9 | jobs:
10 | test:
11 | name: ${{matrix.name}}
12 | runs-on: ubuntu-latest
13 | strategy:
14 | fail-fast: false
15 | matrix:
16 | include:
17 | - name: Test the core crate (akd_core)
18 | package: akd_core
19 | flags:
20 |
21 | - name: Test the core crate (akd_core) with no standard library
22 | package: akd_core
23 | flags: --features nostd
24 |
25 | - name: Test the core crate (akd_core) with protobuf and serde enabled
26 | package: akd_core
27 | flags: --features protobuf,serde_serialization
28 |
29 | - name: Test the base library, with default features
30 | package: akd
31 |
32 | - name: Test the base library, enabling runtime metrics processing
33 | package: akd
34 | flags: --features runtime_metrics
35 |
36 | steps:
37 | - uses: actions/checkout@main
38 |
39 | - name: Install rust
40 | uses: actions-rs/toolchain@v1
41 | with:
42 | toolchain: stable
43 | override: true
44 |
45 | - name: Run test
46 | uses: actions-rs/cargo@v1
47 | with:
48 | command: test
49 | args: --package ${{matrix.package}} ${{matrix.flags}}
50 |
51 | clippy:
52 | name: Clippy
53 | runs-on: ubuntu-latest
54 | steps:
55 | - uses: actions/checkout@main
56 | - name: Install minimal stable with clippy
57 | uses: actions-rs/toolchain@v1
58 | with:
59 | profile: minimal
60 | toolchain: stable
61 | components: clippy
62 | override: true
63 |
64 | - name: Run Clippy
65 | uses: actions-rs/cargo@v1
66 | with:
67 | command: clippy
68 | args: --all -- -D clippy::all -D warnings
69 |
70 | rustfmt:
71 | name: rustfmt
72 | runs-on: ubuntu-latest
73 | steps:
74 | - uses: actions/checkout@main
75 | - name: Install minimal stable with rustfmt
76 | uses: actions-rs/toolchain@v1
77 | with:
78 | profile: minimal
79 | toolchain: stable
80 | components: rustfmt
81 | override: true
82 |
83 | - name: Build library (make sure all build.rs files have generated necessary code)
84 | uses: actions-rs/cargo@v1
85 | with:
86 | command: build
87 | args: --package akd_core
88 |
89 | - name: Run rustfmt
90 | uses: actions-rs/cargo@v1
91 | with:
92 | command: fmt
93 | args: --all -- --check
94 |
95 | benches:
96 | name: benches
97 | runs-on: ubuntu-latest
98 | strategy:
99 | matrix:
100 | include:
101 | - name: Build the akd_core benches
102 | package: akd_core
103 | flags: -F bench
104 | - name: Build the akd benches
105 | package: akd
106 | flags: -F bench
107 | steps:
108 | - uses: actions/checkout@main
109 | - name: Install rust
110 | uses: actions-rs/toolchain@v1
111 | with:
112 | toolchain: stable
113 | override: true
114 | - name: Run test
115 | uses: actions-rs/cargo@v1
116 | with:
117 | command: bench
118 | args: --package ${{matrix.package}} ${{matrix.flags}}
119 |
120 | docs:
121 | name: docs
122 | runs-on: ubuntu-latest
123 | env:
124 | RUSTDOCFLAGS: -Dwarnings
125 | strategy:
126 | fail-fast: false
127 | matrix:
128 | include:
129 | - package: akd
130 | - package: akd_core
131 | steps:
132 | - uses: actions/checkout@main
133 | - name: Install rust
134 | uses: actions-rs/toolchain@v1
135 | with:
136 | toolchain: stable
137 | override: true
138 |
139 | - name: Run rustdoc for ${{matrix.package}}
140 | uses: actions-rs/cargo@v1
141 | with:
142 | command: doc
143 | args: --package ${{matrix.package}}
144 |
--------------------------------------------------------------------------------
/examples/README.md:
--------------------------------------------------------------------------------
1 | # Examples
2 | This crate contains a set of examples for using AKD.
3 |
4 | ## Running Examples
5 |
6 | There are currently three examples supported in this library:
7 | - `whatsapp-kt-auditor`: An auditor for WhatsApp key transparency audit proofs
8 | - `mysql-demo`: An interactive application that demonstrates the use of AKD with a MySQL storage layer
9 | - `fixture-generator`: A utility for producing test fixtures which can be used to measure when the underlying byte
10 | format for the AKD operations change
11 |
12 | ### WhatsApp Key Transparency Auditor
13 |
14 | To run this example:
15 | ```
16 | cargo run -p examples --release -- whatsapp-kt-auditor
17 | ```
18 | and this will bring up an interactive interface which allows you to load the current epochs, and choose which epochs to audit.
19 |
20 | You can also automatically audit the latest epoch with the `-l` parameter (for "latest"), by running:
21 | ```
22 | cargo run -p examples --release -- whatsapp-kt-auditor -l
23 | ```
24 | or if you want to audit a specific epoch:
25 | ```
26 | cargo run -p examples --release -- whatsapp-kt-auditor -e 42
27 | ```
28 |
29 |
30 | ### MySQL Demo
31 |
32 | This example requires setting up [Docker](https://docs.docker.com/get-docker/) (which will host the MySQL instance). Once Docker
33 | is up and running, you can simply run:
34 | ```bash
35 | docker compose up [-d]
36 | ```
37 | in the root of repository to spin up the MySQL instance and then run:
38 | ```bash
39 | cargo run -p examples --release -- mysql-demo
40 | ```
41 | to run the demo. You can also pass the `--help` argument to view various options for running benchmarks and auto-populating the instance.
42 | For example, you can try:
43 | ```
44 | cargo run -p examples --release -- mysql-demo bench-publish 1000 3
45 | ```
46 | which will create a publish with 1000 users each with 3 updates (across 3 epochs).
47 |
48 | Note that if you are encountering the error:
49 | ```
50 | Failed 1 reconnection attempt(s) to MySQL database
51 | ```
52 | then this means that establishing a connection with the Docker instance failed, and you will need to double-check your Docker setup.
53 |
54 | ### Fixture Generator
55 |
56 | This is primarily used for testing and compatibility purposes when the AKD library updates. For example, say that you as a developer
57 | make a change to an existing AKD configuration which affects the byte format (either by replacing the hash function,
58 | adjusting how hashing is done, or modifying the VRF computation). Then, when re-running the tests, the fixture tests will fail, to
59 | indicate that a non-backwards-compatible change has been made.
60 |
61 | If making a non-backwards-compatible change is intended, then this can be addressed by re-running the fixture generation code with
62 | the following command:
63 | ```
64 | cargo run -p examples -- fixture-generator \
65 | --epochs 10 \
66 | --max_updates 5 \
67 | --capture_states 9 10 \
68 | --capture_deltas 10 \
69 | --out examples/src/fixture_generator/examples/
70 | ```
71 | This will automatically write the new fixtures to the appropriate files under `examples/src/fixture_generator/examples/`, and
72 | the tests should now pass.
73 |
74 | ### Test Vectors
75 |
76 | Similarly to the fixture generator, this is also used for testing and compatibility purposes, but specifically for generating test
77 | vectors that can be matched against on a separate client implementation. Note that the serialization of these structs is done
78 | through protobuf, so this can be used to double-check that a compatible client implementation can indeed parse the proof bytes
79 | that are generated by the server-side API. The resulting output files under `examples/src/test_vectors/`
80 | contain hex-encoded values for the inputs to lookup and history proof verification.
81 |
82 | The test vector generation code can be run with the following command:
83 | ```
84 | cargo run -p examples --release -- test-vectors \
85 | --out examples/src/test_vectors
86 | ```
87 |
88 | ### WASM Client
89 |
90 | This example, unlike the others, is not executable and is mainly intended to demonstrate how an application can build the WASM bindings
91 | for the client operations. Since the client operations only depend on the `akd_core` crate, which has fewer dependencies than the full
92 | `akd` crate, the resulting WASM library will be much more condensed than simply building directly from `akd`. You can take a look in the
93 | `wasm_client/` sub-directory for a simple example set of bindings for a client that wishes to verify proofs generated by the server.
94 |
--------------------------------------------------------------------------------
/akd_core/src/configuration/traits.rs:
--------------------------------------------------------------------------------
1 | // Copyright (c) Meta Platforms, Inc. and affiliates.
2 | //
3 | // This source code is dual-licensed under either the MIT license found in the
4 | // LICENSE-MIT file in the root directory of this source tree or the Apache
5 | // License, Version 2.0 found in the LICENSE-APACHE file in the root directory
6 | // of this source tree. You may select, at your option, one of the above-listed licenses.
7 |
8 | //! Defines the configuration trait for customizing the directory's cryptographic operations
9 |
10 | use crate::hash::Digest;
11 | use crate::{AkdLabel, AkdValue, AzksValue, AzksValueWithEpoch, NodeLabel, VersionFreshness};
12 |
13 | #[cfg(feature = "nostd")]
14 | use alloc::vec::Vec;
15 |
16 | /// Trait for specifying a domain separation label that should be specific to the
17 | /// application
18 | pub trait DomainLabel: Clone + 'static {
19 | /// Returns a label, which is used as a domain separator when computing hashes
20 | fn domain_label() -> &'static [u8];
21 | }
22 |
23 | /// An example domain separation label (this should not be used in a production setting!)
24 | #[derive(Clone)]
25 | pub struct ExampleLabel;
26 |
27 | impl DomainLabel for ExampleLabel {
28 | fn domain_label() -> &'static [u8] {
29 | "ExampleLabel".as_bytes()
30 | }
31 | }
32 |
33 | /// Trait for customizing the directory's cryptographic operations
34 | pub trait Configuration: Clone + Send + Sync + 'static {
35 | /// Hash a single byte array
36 | fn hash(item: &[u8]) -> crate::hash::Digest;
37 |
38 | /// The value stored in the root node upon initialization, with no children
39 | fn empty_root_value() -> AzksValue;
40 |
41 | /// AZKS value corresponding to an empty node
42 | fn empty_node_hash() -> AzksValue;
43 |
44 | /// Hash a leaf epoch and nonce with a given [AkdValue]
45 | fn hash_leaf_with_value(
46 | value: &crate::AkdValue,
47 | epoch: u64,
48 | nonce: &[u8],
49 | ) -> AzksValueWithEpoch;
50 |
51 | /// Hash a commit and epoch together to get the leaf's hash value
52 | fn hash_leaf_with_commitment(commitment: AzksValue, epoch: u64) -> AzksValueWithEpoch;
53 |
54 | /// Used by the server to produce a commitment nonce for an AkdLabel, version, and AkdValue.
55 | fn get_commitment_nonce(
56 | commitment_key: &[u8],
57 | label: &NodeLabel,
58 | version: u64,
59 | value: &AkdValue,
60 | ) -> Digest;
61 |
62 | /// Used by the server to produce a commitment for an AkdLabel, version, and AkdValue
63 | fn compute_fresh_azks_value(
64 | commitment_key: &[u8],
65 | label: &NodeLabel,
66 | version: u64,
67 | value: &AkdValue,
68 | ) -> AzksValue;
69 |
70 | /// To convert a regular label (arbitrary string of bytes) into a [NodeLabel], we compute the
71 | /// output as: H(label || freshness || version)
72 | ///
73 | /// Specifically, we concatenate the following together:
74 | /// - I2OSP(len(label) as u64, label)
75 | /// - A single byte encoded as 0u8 if "stale", 1u8 if "fresh"
76 | /// - A u64 representing the version
77 | ///
78 | /// These are all interpreted as a single byte array and hashed together, with the output
79 | /// of the hash returned.
80 | fn get_hash_from_label_input(
81 | label: &AkdLabel,
82 | freshness: VersionFreshness,
83 | version: u64,
84 | ) -> Vec;
85 |
86 | /// Computes the parent hash from the children hashes and labels
87 | fn compute_parent_hash_from_children(
88 | left_val: &AzksValue,
89 | left_label: &[u8],
90 | right_val: &AzksValue,
91 | right_label: &[u8],
92 | ) -> AzksValue;
93 |
94 | /// Given the top-level hash, compute the "actual" root hash that is published
95 | /// by the directory maintainer
96 | fn compute_root_hash_from_val(root_val: &AzksValue) -> Digest;
97 |
98 | /// Similar to commit_fresh_value, but used for stale values.
99 | fn stale_azks_value() -> AzksValue;
100 |
101 | /// Computes the node label value from the bytes of the label
102 | fn compute_node_label_value(bytes: &[u8]) -> Vec;
103 |
104 | /// Returns the representation of the empty label
105 | fn empty_label() -> NodeLabel;
106 | }
107 |
108 | /// For fixture generation / testing purposes only
109 | #[cfg(feature = "public_tests")]
110 | pub trait NamedConfiguration: Configuration {
111 | /// The name of the configuration
112 | fn name() -> &'static str;
113 | }
114 |
--------------------------------------------------------------------------------
/akd_core/src/proto/specs/types.proto:
--------------------------------------------------------------------------------
1 | // Copyright (c) Meta Platforms, Inc. and affiliates.
2 | //
3 | // This source code is dual-licensed under either the MIT license found in the
4 | // LICENSE-MIT file in the root directory of this source tree or the Apache
5 | // License, Version 2.0 found in the LICENSE-APACHE file in the root directory
6 | // of this source tree. You may select, at your option, one of the above-listed licenses.
7 |
8 | // This contains the protobuf definition for inter-node messaging structures
9 |
10 | // To re-generate the protobuf specifications, utilize the build.rs script in this
11 | // crate (See Cargo.toml file)
12 |
13 | syntax = "proto2";
14 |
15 | /* NodeLabel represents the label of a history tree node in the AKD tree with a
16 | * supplied label-length and label value (location) */
17 | message NodeLabel {
18 | optional bytes label_val = 1;
19 | optional uint32 label_len = 2;
20 | }
21 |
22 | /* Element of an AZKS wihich contains a label and value */
23 | message AzksElement {
24 | optional NodeLabel label = 1;
25 | optional bytes value = 2;
26 | }
27 |
28 | /* Represents a specific level of the tree with the parental sibling and the direction
29 | of the parent for use in tree hash calculations */
30 | message SiblingProof {
31 | optional NodeLabel label = 1;
32 | repeated AzksElement siblings = 2;
33 | optional uint32 direction = 3;
34 | }
35 |
36 | /* Merkle proof of membership of a [`NodeLabel`] with a particular hash
37 | value in the tree at a given epoch */
38 | message MembershipProof {
39 | optional NodeLabel label = 1;
40 | optional bytes hash_val = 2;
41 | repeated SiblingProof sibling_proofs = 3;
42 | }
43 |
44 | /* Merkle Patricia proof of non-membership for a [`NodeLabel`] in the tree
45 | at a given epoch. */
46 | message NonMembershipProof {
47 | optional NodeLabel label = 1;
48 | optional NodeLabel longest_prefix = 2;
49 | repeated AzksElement longest_prefix_children = 3;
50 | optional MembershipProof longest_prefix_membership_proof = 4;
51 | }
52 |
53 | /* Proof that a given label was at a particular state at the given epoch.
54 | This means we need to show that the state and version we are claiming for this node must have been:
55 | * committed in the tree,
56 | * not too far ahead of the most recent marker version,
57 | * not stale when served.
58 | This proof is sent in response to a lookup query for a particular key. */
59 | message LookupProof {
60 | optional uint64 epoch = 1;
61 | optional bytes value = 2;
62 | optional uint64 version = 3;
63 | optional bytes existence_vrf_proof = 4;
64 | optional MembershipProof existence_proof = 5;
65 | optional bytes marker_vrf_proof = 6;
66 | optional MembershipProof marker_proof = 7;
67 | optional bytes freshness_vrf_proof = 8;
68 | optional NonMembershipProof freshness_proof = 9;
69 | optional bytes commitment_nonce = 10;
70 | }
71 |
72 | /* A vector of UpdateProofs are sent as the proof to a history query for a particular key.
73 | For each version of the value associated with the key, the verifier must check that:
74 | * the version was included in the claimed epoch,
75 | * the previous version was retired at this epoch,
76 | * the version did not exist prior to this epoch,
77 | * the next few versions (up until the next marker), did not exist at this epoch,
78 | * the future marker versions did not exist at this epoch. */
79 | message UpdateProof {
80 | optional uint64 epoch = 1;
81 | optional bytes value = 2;
82 | optional uint64 version = 3;
83 | optional bytes existence_vrf_proof = 4;
84 | optional MembershipProof existence_proof = 5;
85 | optional bytes previous_version_vrf_proof = 6;
86 | optional MembershipProof previous_version_proof = 7;
87 | optional bytes commitment_nonce = 8;
88 | }
89 |
90 | /* This proof consists of an array of [`UpdateProof`]s, membership proofs for
91 | existence versions at past markers, and non-membership proofs for future markers
92 | up until the current epoch. */
93 | message HistoryProof {
94 | repeated UpdateProof update_proofs = 1;
95 | repeated bytes past_marker_vrf_proofs = 2;
96 | repeated MembershipProof existence_of_past_marker_proofs = 3;
97 | repeated bytes future_marker_vrf_proofs = 4;
98 | repeated NonMembershipProof non_existence_of_future_marker_proofs = 5;
99 | }
100 |
101 | /* SingleEncodedProof represents a proof that no leaves were changed or removed between epoch t and t + 1 */
102 | message SingleAppendOnlyProof {
103 | repeated AzksElement inserted = 1;
104 | repeated AzksElement unchanged_nodes = 2;
105 | }
106 |
107 | /* An append-only proof is a proof that no nodes were changes from epochs[0] to epochs[end], epoch-by-epoch */
108 | message AppendOnlyProof {
109 | repeated SingleAppendOnlyProof proofs = 1;
110 | repeated uint64 epochs = 2;
111 | }
112 |
--------------------------------------------------------------------------------
/akd/src/tests/mod.rs:
--------------------------------------------------------------------------------
1 | // Copyright (c) Meta Platforms, Inc. and affiliates.
2 | //
3 | // This source code is dual-licensed under either the MIT license found in the
4 | // LICENSE-MIT file in the root directory of this source tree or the Apache
5 | // License, Version 2.0 found in the LICENSE-APACHE file in the root directory
6 | // of this source tree. You may select, at your option, one of the above-listed licenses.
7 |
8 | //! Contains the tests for the high-level API (directory, auditor, client)
9 |
10 | mod test_core_protocol;
11 | mod test_errors;
12 | mod test_preloads;
13 |
14 | use std::collections::HashMap;
15 |
16 | use crate::{
17 | errors::StorageError,
18 | storage::{
19 | memory::AsyncInMemoryDatabase,
20 | types::{DbRecord, KeyData, ValueState, ValueStateRetrievalFlag},
21 | Database, DbSetState, Storable,
22 | },
23 | tree_node::TreeNodeWithPreviousValue,
24 | AkdLabel, AkdValue, Azks,
25 | };
26 |
27 | // Below contains the mock code for constructing a `MockLocalDatabase`
28 |
29 | #[allow(dead_code)]
30 | #[derive(Clone)]
31 | pub struct LocalDatabase;
32 |
33 | unsafe impl Send for LocalDatabase {}
34 |
35 | unsafe impl Sync for LocalDatabase {}
36 |
37 | // Note that this macro produces a `MockLocalDatabase` struct
38 | mockall::mock! {
39 | pub LocalDatabase {
40 |
41 | }
42 | impl Clone for LocalDatabase {
43 | fn clone(&self) -> Self;
44 | }
45 | #[async_trait::async_trait]
46 | impl Database for LocalDatabase {
47 | async fn set(&self, record: DbRecord) -> Result<(), StorageError>;
48 | async fn batch_set(
49 | &self,
50 | records: Vec,
51 | state: DbSetState,
52 | ) -> Result<(), StorageError>;
53 | async fn get(&self, id: &St::StorageKey) -> Result;
54 | async fn batch_get(
55 | &self,
56 | ids: &[St::StorageKey],
57 | ) -> Result, StorageError>;
58 | async fn get_user_data(&self, username: &AkdLabel) -> Result;
59 | async fn get_user_state(
60 | &self,
61 | username: &AkdLabel,
62 | flag: ValueStateRetrievalFlag,
63 | ) -> Result;
64 | async fn get_user_state_versions(
65 | &self,
66 | usernames: &[AkdLabel],
67 | flag: ValueStateRetrievalFlag,
68 | ) -> Result, StorageError>;
69 | }
70 | }
71 |
72 | fn setup_mocked_db(db: &mut MockLocalDatabase, test_db: &AsyncInMemoryDatabase) {
73 | // ===== Set ===== //
74 | let tmp_db = test_db.clone();
75 | db.expect_set()
76 | .returning(move |record| futures::executor::block_on(tmp_db.set(record)));
77 |
78 | // ===== Batch Set ===== //
79 | let tmp_db = test_db.clone();
80 | db.expect_batch_set().returning(move |record, other| {
81 | futures::executor::block_on(tmp_db.batch_set(record, other))
82 | });
83 |
84 | // ===== Get ===== //
85 | let tmp_db = test_db.clone();
86 | db.expect_get::()
87 | .returning(move |key| futures::executor::block_on(tmp_db.get::(key)));
88 |
89 | let tmp_db = test_db.clone();
90 | db.expect_get::()
91 | .returning(move |key| {
92 | futures::executor::block_on(tmp_db.get::(key))
93 | });
94 |
95 | let tmp_db = test_db.clone();
96 | db.expect_get::()
97 | .returning(move |key| futures::executor::block_on(tmp_db.get::(key)));
98 |
99 | // ===== Batch Get ===== //
100 | let tmp_db = test_db.clone();
101 | db.expect_batch_get::()
102 | .returning(move |key| futures::executor::block_on(tmp_db.batch_get::(key)));
103 |
104 | let tmp_db = test_db.clone();
105 | db.expect_batch_get::()
106 | .returning(move |key| {
107 | futures::executor::block_on(tmp_db.batch_get::(key))
108 | });
109 |
110 | // ===== Get User Data ===== //
111 | let tmp_db = test_db.clone();
112 | db.expect_get_user_data()
113 | .returning(move |arg| futures::executor::block_on(tmp_db.get_user_data(arg)));
114 |
115 | // ===== Get User State ===== //
116 | let tmp_db = test_db.clone();
117 | db.expect_get_user_state()
118 | .returning(move |arg, flag| futures::executor::block_on(tmp_db.get_user_state(arg, flag)));
119 |
120 | // ===== Get User State Versions ===== //
121 | let tmp_db = test_db.clone();
122 | db.expect_get_user_state_versions()
123 | .returning(move |arg, flag| {
124 | futures::executor::block_on(tmp_db.get_user_state_versions(arg, flag))
125 | });
126 | }
127 |
--------------------------------------------------------------------------------
/.vscode/launch.json:
--------------------------------------------------------------------------------
1 | {
2 | // Use IntelliSense to learn about possible attributes.
3 | // Hover to view descriptions of existing attributes.
4 | // For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387
5 | "version": "0.2.0",
6 | "configurations": [
7 | {
8 | "type": "lldb",
9 | "request": "launch",
10 | "name": "Debug AKD Auditor User Interface",
11 | "cargo": {
12 | "args": [
13 | "build",
14 | "--bin=akd_local_auditor",
15 | "--package=akd_local_auditor"
16 | ],
17 | },
18 | "args": [
19 | "ui"
20 | ],
21 | "cwd": "${workspaceFolder}"
22 | },
23 | {
24 | "type": "lldb",
25 | "request": "launch",
26 | "name": "Debug integration tests in library 'akd_integration_tests'",
27 | "cargo": {
28 | "args": [
29 | "test",
30 | "--no-run",
31 | "--lib",
32 | "--package=akd_integration_tests"
33 | ],
34 | },
35 | "args": [],
36 | "cwd": "${workspaceFolder}"
37 | },
38 | {
39 | "type": "lldb",
40 | "request": "launch",
41 | "name": "Debug unit tests in library 'akd_core'",
42 | "cargo": {
43 | "args": [
44 | "test",
45 | "--no-run",
46 | "--lib",
47 | "--package=akd_core",
48 | "--features=protobuf,blake3"
49 | ],
50 | },
51 | "args": ["test_minimum_encoding_label_bytes"],
52 | "cwd": "${workspaceFolder}"
53 | },
54 | {
55 | "type": "lldb",
56 | "request": "launch",
57 | "name": "Debug benchmark 'azks'",
58 | "cargo": {
59 | "args": [
60 | "test",
61 | "--no-run",
62 | "--bench=azks",
63 | "--package=akd"
64 | ],
65 | "filter": {
66 | "name": "azks",
67 | "kind": "bench"
68 | }
69 | },
70 | "args": [],
71 | "cwd": "${workspaceFolder}"
72 | },
73 | {
74 | "type": "lldb",
75 | "request": "launch",
76 | "name": "Debug MySQL unit tests",
77 | "cargo": {
78 | "args": [
79 | "test",
80 | "--no-run",
81 | "--lib",
82 | "--package=akd_mysql"
83 | ],
84 | "filter": {
85 | "name": "akd_mysql",
86 | "kind": "lib"
87 | }
88 | },
89 | "args": [],
90 | "cwd": "${workspaceFolder}"
91 | },
92 | {
93 | "type": "lldb",
94 | "request": "launch",
95 | "name": "Debug POC Repl",
96 | "cargo": {
97 | "args": [
98 | "build",
99 | "--bin=akd_app",
100 | "--package=akd_app"
101 | ],
102 | "filter": {
103 | "name": "akd_app",
104 | "kind": "bin"
105 | }
106 | },
107 | "args": ["-d", "--memory"],
108 | "cwd": "${workspaceFolder}"
109 | },
110 | {
111 | "type": "lldb",
112 | "request": "launch",
113 | "name": "Debug POC Publish",
114 | "cargo": {
115 | "args": [
116 | "build",
117 | "--bin=akd_app",
118 | "--package=akd_app"
119 | ],
120 | "filter": {
121 | "name": "akd_app",
122 | "kind": "bin"
123 | }
124 | },
125 | "args": ["-d", "--memory", "bench-publish", "100000", "2", "true"],
126 | "cwd": "${workspaceFolder}"
127 | },
128 | {
129 | "type": "lldb",
130 | "request": "launch",
131 | "name": "Debug POC Db Flush",
132 | "cargo": {
133 | "args": [
134 | "build",
135 | "--bin=akd_app",
136 | "--package=akd_app"
137 | ],
138 | "filter": {
139 | "name": "akd_app",
140 | "kind": "bin"
141 | }
142 | },
143 | "args": ["-d", "flush"],
144 | "cwd": "${workspaceFolder}"
145 | }
146 | ]
147 | }
148 |
--------------------------------------------------------------------------------
/akd/src/auditor.rs:
--------------------------------------------------------------------------------
1 | // Copyright (c) Meta Platforms, Inc. and affiliates.
2 | //
3 | // This source code is dual-licensed under either the MIT license found in the
4 | // LICENSE-MIT file in the root directory of this source tree or the Apache
5 | // License, Version 2.0 found in the LICENSE-APACHE file in the root directory
6 | // of this source tree. You may select, at your option, one of the above-listed licenses.
7 |
8 | //! Code for an auditor of a authenticated key directory
9 |
10 | use akd_core::configuration::Configuration;
11 | use akd_core::AzksElement;
12 |
13 | use crate::append_only_zks::AzksParallelismConfig;
14 | use crate::AzksValue;
15 | use crate::{
16 | append_only_zks::InsertMode,
17 | errors::{AkdError, AuditorError, AzksError},
18 | storage::{manager::StorageManager, memory::AsyncInMemoryDatabase},
19 | AppendOnlyProof, Azks, Digest, SingleAppendOnlyProof,
20 | };
21 |
22 | /// Verifies an audit proof, given start and end hashes for a merkle patricia tree.
23 | #[cfg_attr(feature = "tracing_instrument", tracing::instrument(skip_all))]
24 | pub async fn audit_verify(
25 | hashes: Vec,
26 | proof: AppendOnlyProof,
27 | ) -> Result<(), AkdError> {
28 | if proof.epochs.len() + 1 != hashes.len() {
29 | return Err(AkdError::AuditErr(AuditorError::VerifyAuditProof(format!(
30 | "The proof has a different number of epochs than needed for hashes.
31 | The number of hashes you provide should be one more than the number of epochs!
32 | Number of epochs = {}, number of hashes = {}",
33 | proof.epochs.len(),
34 | hashes.len()
35 | ))));
36 | }
37 | if proof.epochs.len() != proof.proofs.len() {
38 | return Err(AkdError::AuditErr(AuditorError::VerifyAuditProof(format!(
39 | "The proof has {} epochs and {} proofs. These should be equal!",
40 | proof.epochs.len(),
41 | proof.proofs.len()
42 | ))));
43 | }
44 | for i in 0..hashes.len() - 1 {
45 | let start_hash = hashes[i];
46 | let end_hash = hashes[i + 1];
47 | verify_consecutive_append_only::(
48 | &proof.proofs[i],
49 | start_hash,
50 | end_hash,
51 | proof.epochs[i] + 1,
52 | )
53 | .await?;
54 | }
55 | Ok(())
56 | }
57 |
58 | /// Helper for audit, verifies an append-only proof.
59 | ///
60 | /// This function first creates a new AZKS instance with the unchanged nodes from the proof,
61 | /// then it verifies the start hash against the root hash of this AZKS instance.
62 | /// Next, it creates another AZKS instance with the unchanged nodes and inserted nodes,
63 | /// and verifies the end hash against the root hash of this second AZKS instance.
64 | #[cfg_attr(feature = "tracing_instrument", tracing::instrument(skip_all))]
65 | pub async fn verify_consecutive_append_only(
66 | proof: &SingleAppendOnlyProof,
67 | start_hash: Digest,
68 | end_hash: Digest,
69 | end_epoch: u64,
70 | ) -> Result<(), AkdError> {
71 | verify_append_only_hash::(proof.unchanged_nodes.clone(), start_hash, None).await?;
72 |
73 | let mut unchanged_with_inserted_nodes = proof.unchanged_nodes.clone();
74 | unchanged_with_inserted_nodes.extend(proof.inserted.iter().map(|x| {
75 | let mut y = *x;
76 | y.value = AzksValue(TC::hash_leaf_with_commitment(x.value, end_epoch).0);
77 | y
78 | }));
79 |
80 | verify_append_only_hash::(unchanged_with_inserted_nodes, end_hash, Some(end_epoch - 1))
81 | .await?;
82 | Ok(())
83 | }
84 |
85 | /// This function verifies the root hash of an AZKS instance against an expected hash.
86 | /// It creates an AZKS instance from a set of nodes, and checks if the computed root
87 | /// hash matches the expected hash. The optional latest_epoch parameter allows for
88 | /// specifying the latest epoch for the AZKS instance.
89 | async fn verify_append_only_hash(
90 | nodes: Vec,
91 | expected_hash: Digest,
92 | latest_epoch: Option,
93 | ) -> Result<(), AkdError> {
94 | let manager = StorageManager::new_no_cache(
95 | AsyncInMemoryDatabase::new_with_remove_child_nodes_on_insertion(),
96 | );
97 | let mut azks = Azks::new::(&manager).await?;
98 | if let Some(epoch) = latest_epoch {
99 | azks.latest_epoch = epoch;
100 | }
101 | azks.batch_insert_nodes::(
102 | &manager,
103 | nodes,
104 | InsertMode::Auditor,
105 | AzksParallelismConfig::default(),
106 | )
107 | .await?;
108 | let computed_hash: Digest = azks.get_root_hash::(&manager).await?;
109 | if computed_hash != expected_hash {
110 | return Err(AkdError::AzksErr(AzksError::VerifyAppendOnlyProof(
111 | format!(
112 | "Expected hash {} does not match computed root hash {}",
113 | hex::encode(expected_hash),
114 | hex::encode(computed_hash)
115 | ),
116 | )));
117 | }
118 | Ok(())
119 | }
120 |
--------------------------------------------------------------------------------
/akd/src/storage/cache/tests.rs:
--------------------------------------------------------------------------------
1 | // Copyright (c) Meta Platforms, Inc. and affiliates.
2 | //
3 | // This source code is dual-licensed under either the MIT license found in the
4 | // LICENSE-MIT file in the root directory of this source tree or the Apache
5 | // License, Version 2.0 found in the LICENSE-APACHE file in the root directory
6 | // of this source tree. You may select, at your option, one of the above-listed licenses.
7 |
8 | //! Caching tests
9 |
10 | use super::*;
11 | use std::time::Duration;
12 |
13 | use crate::storage::types::{ValueState, ValueStateKey};
14 | use crate::storage::DbRecord;
15 | use crate::{AkdLabel, AkdValue, NodeLabel};
16 |
17 | #[tokio::test]
18 | async fn test_cache_put_and_expires() {
19 | let cache = TimedCache::new(
20 | Some(Duration::from_millis(10)),
21 | None,
22 | Some(Duration::from_millis(50)),
23 | );
24 |
25 | let value_state = DbRecord::ValueState(ValueState {
26 | epoch: 1,
27 | version: 1,
28 | label: NodeLabel {
29 | label_len: 1,
30 | label_val: [0u8; 32],
31 | },
32 | value: AkdValue::from("some value"),
33 | username: AkdLabel::from("user"),
34 | });
35 | let key = ValueStateKey(AkdLabel::from("user").0.to_vec(), 1);
36 | cache.put(&value_state).await;
37 |
38 | let got = cache.hit_test::(&key).await;
39 | assert!(got.is_some());
40 | assert_eq!(Some(value_state), got);
41 |
42 | tokio::time::sleep(tokio::time::Duration::from_millis(100)).await;
43 | let got = cache.hit_test::(&key).await;
44 | assert_eq!(None, got);
45 | }
46 |
47 | #[tokio::test]
48 | async fn test_cache_overwrite() {
49 | let cache = TimedCache::new(Some(Duration::from_millis(1000)), None, None);
50 |
51 | let value_state = ValueState {
52 | epoch: 1,
53 | version: 1,
54 | label: NodeLabel {
55 | label_len: 1,
56 | label_val: [0u8; 32],
57 | },
58 | value: AkdValue::from("some value"),
59 | username: AkdLabel::from("user"),
60 | };
61 | let key = ValueStateKey(AkdLabel::from("user").0.to_vec(), 1);
62 |
63 | let value_state_2 = ValueState {
64 | epoch: 1,
65 | version: 2,
66 | label: NodeLabel {
67 | label_len: 2,
68 | label_val: [0u8; 32],
69 | },
70 | value: AkdValue::from("some value"),
71 | username: AkdLabel::from("user"),
72 | };
73 | cache.put(&DbRecord::ValueState(value_state)).await;
74 | cache
75 | .put(&DbRecord::ValueState(value_state_2.clone()))
76 | .await;
77 |
78 | let got = cache.hit_test::(&key).await;
79 | assert_eq!(Some(DbRecord::ValueState(value_state_2)), got);
80 | }
81 |
82 | #[tokio::test]
83 | async fn test_cache_memory_pressure() {
84 | let cache = TimedCache::new(
85 | Some(Duration::from_millis(1000)),
86 | Some(10),
87 | Some(Duration::from_millis(50)),
88 | );
89 |
90 | let value_state = DbRecord::ValueState(ValueState {
91 | epoch: 1,
92 | version: 1,
93 | label: NodeLabel {
94 | label_len: 1,
95 | label_val: [0u8; 32],
96 | },
97 | value: AkdValue::from("some value"),
98 | username: AkdLabel::from("user"),
99 | });
100 | let key = ValueStateKey(AkdLabel::from("user").0.to_vec(), 1);
101 | cache.put(&value_state).await;
102 |
103 | // we only do an "automated" clean every 50ms in test, which is when memory pressure is evaluated.
104 | // 100ms will make sure the clean op will run on the next `hit_test` op
105 | tokio::time::sleep(tokio::time::Duration::from_millis(100)).await;
106 | // This get should return none, even though the cache expiration time is 1s. This is because
107 | // we should exceed 10 bytes of storage utilization so the cache should clean the item.
108 | let got = cache.hit_test::(&key).await;
109 | assert_eq!(None, got);
110 | }
111 |
112 | #[tokio::test]
113 | async fn test_many_memory_pressure() {
114 | let cache = TimedCache::new(
115 | Some(Duration::from_millis(1000)),
116 | Some(1024 * 5),
117 | Some(Duration::from_millis(50)),
118 | );
119 |
120 | let value_states = (1..100)
121 | .map(|i| ValueState {
122 | epoch: i as u64,
123 | version: i as u64,
124 | label: NodeLabel {
125 | label_len: 1,
126 | label_val: [0u8; 32],
127 | },
128 | value: AkdValue::from("test"),
129 | username: AkdLabel::from("user"),
130 | })
131 | .map(DbRecord::ValueState)
132 | .collect::>();
133 |
134 | cache.batch_put(&value_states).await;
135 |
136 | // we only do an "automated" clean every 50ms in test, which is when memory pressure is evaluated.
137 | // 100ms will make sure the clean op will run on the next `hit_test` op
138 | tokio::time::sleep(tokio::time::Duration::from_millis(100)).await;
139 |
140 | let all = cache.get_all().await;
141 | assert!(all.len() < 99);
142 | }
143 |
--------------------------------------------------------------------------------
/examples/src/fixture_generator/reader/yaml.rs:
--------------------------------------------------------------------------------
1 | // Copyright (c) Meta Platforms, Inc. and affiliates.
2 | //
3 | // This source code is dual-licensed under either the MIT license found in the
4 | // LICENSE-MIT file in the root directory of this source tree or the Apache
5 | // License, Version 2.0 found in the LICENSE-APACHE file in the root directory
6 | // of this source tree. You may select, at your option, one of the above-listed licenses.
7 |
8 | //! This module contains an implementor of the Reader trait for the YAML format.
9 |
10 | use std::fmt::Write as _;
11 | use std::fs::File;
12 | use std::io::{BufRead, BufReader, Lines, Seek};
13 | use std::iter::Peekable;
14 | use std::result::Result; // import without risk of name clashing
15 |
16 | use serde::de::DeserializeOwned;
17 |
18 | use crate::fixture_generator::generator::{Delta, Metadata, State};
19 | use crate::fixture_generator::reader::{Reader, ReaderError};
20 | use crate::fixture_generator::YAML_SEPARATOR;
21 |
22 | impl From for ReaderError {
23 | fn from(error: std::io::Error) -> Self {
24 | ReaderError::Input(error.to_string())
25 | }
26 | }
27 |
28 | #[allow(dead_code)]
29 | /// YAML format file reader.
30 | pub struct YamlFileReader {
31 | file: File,
32 | index: u32,
33 | buffer: Peekable>>,
34 | }
35 |
36 | #[allow(dead_code)]
37 | impl YamlFileReader {
38 | #[cfg(test)]
39 | pub fn new(file: File) -> Result {
40 | let index = 0;
41 | let buffer = Self::buffer(&file)?;
42 |
43 | Ok(Self {
44 | file,
45 | index,
46 | buffer,
47 | })
48 | }
49 |
50 | // Instantiates a new buffer for a given file.
51 | fn buffer(file: &File) -> Result>>, ReaderError> {
52 | let mut file_ref_copy = file.try_clone()?;
53 | file_ref_copy.rewind()?;
54 |
55 | Ok(BufReader::new(file_ref_copy).lines().peekable())
56 | }
57 |
58 | // Returns the next YAML "doc" in the file, looping back to the start of the
59 | // file if EOF is encountered.
60 | fn next_doc(&mut self) -> Result {
61 | // find start of doc
62 | loop {
63 | match self.buffer.peek() {
64 | Some(Ok(sep)) if sep.trim_end() == YAML_SEPARATOR => {
65 | self.buffer.next();
66 | break;
67 | }
68 | Some(Ok(_)) => {
69 | self.buffer.next();
70 | }
71 | None => {
72 | return Err(ReaderError::Format(
73 | "EOF encountered while looking for start of YAML doc".to_string(),
74 | ))
75 | }
76 | Some(Err(err)) => return Err(ReaderError::Input(err.to_string())),
77 | }
78 | }
79 |
80 | // collect lines until end of doc
81 | let mut doc = String::new();
82 | loop {
83 | match self.buffer.peek() {
84 | Some(Ok(sep)) if sep.trim_end() == YAML_SEPARATOR => {
85 | self.index += 1;
86 | return Ok(doc);
87 | }
88 | Some(Ok(line)) => {
89 | // avoid the extra allocation call with a format!
90 | let _ = writeln!(doc, "{line}");
91 | self.buffer.next();
92 | }
93 | None => {
94 | // EOF encountered, reset buffer before returning
95 | self.index = 0;
96 | self.buffer = Self::buffer(&self.file)?;
97 | return Ok(doc);
98 | }
99 | Some(Err(err)) => return Err(ReaderError::Input(err.to_string())),
100 | }
101 | }
102 | }
103 |
104 | // Reads an object from the YAML file, utilizing validate_fun to validate
105 | // the object before returning it.
106 | fn read_impl bool>(
107 | &mut self,
108 | validate_fun: F,
109 | ) -> Result {
110 | let start = self.index;
111 | loop {
112 | if let Ok(object) = serde_yaml::from_str::(&self.next_doc()?) {
113 | if validate_fun(&object) {
114 | return Ok(object);
115 | }
116 | }
117 | // exit if all docs have been checked
118 | if self.index == start {
119 | return Err(ReaderError::NotFound);
120 | }
121 | }
122 | }
123 | }
124 |
125 | impl Reader for YamlFileReader {
126 | fn read_metadata(&mut self) -> Result {
127 | self.read_impl(|_: &Metadata| true)
128 | }
129 |
130 | fn read_state(&mut self, epoch: u32) -> Result {
131 | self.read_impl(|state: &State| state.epoch == epoch)
132 | }
133 |
134 | fn read_delta(&mut self, epoch: u32) -> Result {
135 | self.read_impl(|delta: &Delta| delta.epoch == epoch)
136 | }
137 |
138 | fn read_string(&mut self) -> Result {
139 | self.read_impl(|_: &String| true)
140 | }
141 | }
142 |
--------------------------------------------------------------------------------
/akd/src/storage/mod.rs:
--------------------------------------------------------------------------------
1 | // Copyright (c) Meta Platforms, Inc. and affiliates.
2 | //
3 | // This source code is dual-licensed under either the MIT license found in the
4 | // LICENSE-MIT file in the root directory of this source tree or the Apache
5 | // License, Version 2.0 found in the LICENSE-APACHE file in the root directory
6 | // of this source tree. You may select, at your option, one of the above-listed licenses.
7 |
8 | //! Storage module for a auditable key directory
9 |
10 | use crate::errors::StorageError;
11 | use crate::storage::types::{DbRecord, StorageType};
12 | use crate::{AkdLabel, AkdValue};
13 |
14 | use async_trait::async_trait;
15 | #[cfg(feature = "serde_serialization")]
16 | use serde::{de::DeserializeOwned, Serialize};
17 | use std::collections::HashMap;
18 | use std::hash::Hash;
19 | use std::marker::{Send, Sync};
20 |
21 | pub mod cache;
22 | pub mod transaction;
23 | pub mod types;
24 |
25 | /*
26 | Various implementations supported by the library are imported here and usable at various checkpoints
27 | */
28 | pub mod manager;
29 | pub mod memory;
30 |
31 | pub use manager::StorageManager;
32 |
33 | #[cfg(any(test, feature = "public_tests"))]
34 | pub mod tests;
35 |
36 | /// Denotes the "state" when a batch_set is being called in the data layer
37 | pub enum DbSetState {
38 | /// Being called as part of a transaction commit operation
39 | TransactionCommit,
40 | /// Being called as a general, in-line operation
41 | General,
42 | }
43 |
44 | /// Storable represents an _item_ which can be stored in the storage layer
45 | #[cfg(feature = "serde_serialization")]
46 | pub trait Storable: Clone + Serialize + DeserializeOwned + Sync + 'static {
47 | /// This particular storage will have a key type
48 | type StorageKey: Clone + Serialize + Eq + Hash + Send + Sync + std::fmt::Debug;
49 |
50 | /// Must return a valid storage type
51 | fn data_type() -> StorageType;
52 |
53 | /// Retrieve an instance of the id of this storable. The combination of the
54 | /// storable's StorageType and this id are _globally_ unique
55 | fn get_id(&self) -> Self::StorageKey;
56 |
57 | /// Retrieve the full binary version of a key (for comparisons)
58 | fn get_full_binary_id(&self) -> Vec {
59 | Self::get_full_binary_key_id(&self.get_id())
60 | }
61 |
62 | /// Retrieve the full binary version of a key (for comparisons)
63 | fn get_full_binary_key_id(key: &Self::StorageKey) -> Vec;
64 |
65 | /// Reformat a key from the full-binary specification
66 | fn key_from_full_binary(bin: &[u8]) -> Result;
67 | }
68 |
69 | /// Storable represents an _item_ which can be stored in the storage layer
70 | #[cfg(not(feature = "serde_serialization"))]
71 | pub trait Storable: Clone + Sync + 'static {
72 | /// This particular storage will have a key type
73 | type StorageKey: Clone + Eq + Hash + Send + Sync + std::fmt::Debug;
74 |
75 | /// Must return a valid storage type
76 | fn data_type() -> StorageType;
77 |
78 | /// Retrieve an instance of the id of this storable. The combination of the
79 | /// storable's StorageType and this id are _globally_ unique
80 | fn get_id(&self) -> Self::StorageKey;
81 |
82 | /// Retrieve the full binary version of a key (for comparisons)
83 | fn get_full_binary_id(&self) -> Vec {
84 | Self::get_full_binary_key_id(&self.get_id())
85 | }
86 |
87 | /// Retrieve the full binary version of a key (for comparisons)
88 | fn get_full_binary_key_id(key: &Self::StorageKey) -> Vec;
89 |
90 | /// Reformat a key from the full-binary specification
91 | fn key_from_full_binary(bin: &[u8]) -> Result;
92 | }
93 |
94 | /// A database implementation backing storage for the AKD
95 | #[async_trait]
96 | pub trait Database: Send + Sync {
97 | /// Set a record in the database
98 | async fn set(&self, record: DbRecord) -> Result<(), StorageError>;
99 |
100 | /// Set multiple records in the database with a minimal set of operations
101 | async fn batch_set(
102 | &self,
103 | records: Vec,
104 | state: DbSetState,
105 | ) -> Result<(), StorageError>;
106 |
107 | /// Retrieve a stored record from the database
108 | async fn get(&self, id: &St::StorageKey) -> Result;
109 |
110 | /// Retrieve a batch of records by id from the database
111 | async fn batch_get(
112 | &self,
113 | ids: &[St::StorageKey],
114 | ) -> Result, StorageError>;
115 |
116 | /* User data searching */
117 |
118 | /// Retrieve the user data for a given user
119 | async fn get_user_data(&self, username: &AkdLabel) -> Result;
120 |
121 | /// Retrieve a specific state for a given user
122 | async fn get_user_state(
123 | &self,
124 | username: &AkdLabel,
125 | flag: types::ValueStateRetrievalFlag,
126 | ) -> Result;
127 |
128 | /// Retrieve the user -> state version mapping in bulk. This is the same as get_user_states but with less data retrieved from the storage layer
129 | async fn get_user_state_versions(
130 | &self,
131 | usernames: &[AkdLabel],
132 | flag: types::ValueStateRetrievalFlag,
133 | ) -> Result, StorageError>;
134 | }
135 |
136 | /// Optional storage layer utility functions for debug and test purposes
137 | #[async_trait]
138 | pub trait StorageUtil: Database {
139 | /// Retrieves all stored records of a given type from the data layer, ignoring any caching or transaction pending
140 | async fn batch_get_type_direct(&self) -> Result, StorageError>;
141 |
142 | /// Retrieves all stored records from the data layer, ignoring any caching or transaction pending
143 | async fn batch_get_all_direct(&self) -> Result, StorageError>;
144 | }
145 |
--------------------------------------------------------------------------------
/examples/src/fixture_generator/parser.rs:
--------------------------------------------------------------------------------
1 | // Copyright (c) Meta Platforms, Inc. and affiliates.
2 | //
3 | // This source code is dual-licensed under either the MIT license found in the
4 | // LICENSE-MIT file in the root directory of this source tree or the Apache
5 | // License, Version 2.0 found in the LICENSE-APACHE file in the root directory
6 | // of this source tree. You may select, at your option, one of the above-listed licenses.
7 |
8 | //! This module contains the CLI argument definitions and parser.
9 |
10 | use akd::{AkdLabel, AkdValue};
11 | use clap::Parser;
12 | use regex::Regex;
13 | use serde::{Deserialize, Serialize};
14 |
15 | /// Any alphanumeric string - spaces are allowed e.g. "User123" or "User 123"
16 | const USER_PATTERN: &str = r"[\w\s]+";
17 |
18 | /// A solo string of digits e.g. "10" or a tuple of digits and a string
19 | /// e.g."(10, 'abc')"
20 | const EVENT_PATTERN: &str = r"\d+|(\(\s*(\d+)\s*,\s*'(\w*)'\s*\))";
21 |
22 | /// A key update the tool should include in the tree at the given epoch.
23 | /// If "value" is None, the tool will randomly generate a value for the epoch.
24 | #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
25 | pub struct UserEvent {
26 | pub epoch: u32,
27 | pub value: Option,
28 | }
29 |
30 | /// A user whose key update events should be included in the tree.
31 | #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
32 | pub struct User {
33 | pub label: AkdLabel,
34 | pub events: Vec,
35 | }
36 |
37 | /// This tool allows a directory to be created with specified and random
38 | /// contents, capturing the directory state and epoch-to-epoch delta in
39 | /// an output file for use in debugging and as test fixtures.
40 | #[derive(Parser, Clone, Debug, Eq, PartialEq, Serialize, Deserialize)]
41 | pub struct Args {
42 | /// Users and their associated key update events.
43 | /// A username is expected, followed by a colon and a list of epochs OR
44 | /// (epoch, value). Usernames are expected to be utf-8 strings, which will
45 | /// be internally interpreted as bytes.
46 | /// The following are valid examples of user arguments:
47 | /// --user "username: 1, 3, (5, 'xyz')"
48 | /// --user="username: [(1,'abc'), 2]"
49 | /// -u "some username: 1"
50 | #[arg(
51 | long = "user",
52 | short = 'u',
53 | num_args = 0..,
54 | value_parser = parse_user_events,
55 | )]
56 | pub users: Vec,
57 |
58 | /// Number of epochs to advance the tree by
59 | /// e.g. a value of 3 will perform 3 publishes on an empty directory.
60 | #[arg(long = "epochs", short = 'e')]
61 | pub epochs: u32,
62 |
63 | /// Maximum number of key updates **per epoch** the tool should perform.
64 | /// Note that all user events explicitly passed for an epoch will be
65 | /// included even if the number exceeds this value.
66 | #[arg(long = "max_updates", default_value = "10")]
67 | pub max_updates: u32,
68 |
69 | /// Minimum number of key updates **per epoch** the tool should perform.
70 | /// The tool will generate random labels and values to include in an epoch
71 | /// if the user events explicitly passed for an epoch are not sufficients.
72 | #[arg(long = "min_updates", default_value = "0")]
73 | pub min_updates: u32,
74 |
75 | /// Epochs where the state of the directory should be captured in the output
76 | /// e.g. the value 3 will output all db records after epoch 3 is performed.
77 | /// Multiple values are accepted e.g. --capture_states 9 10
78 | #[arg(long = "capture_states", short = 's', num_args = 0..)]
79 | pub capture_states: Option>,
80 |
81 | /// Epochs where the key updates required to bring the directory to the
82 | /// epoch should be captured in the output.
83 | /// e.g. the value 3 will output all key updates that were performed to
84 | /// advance the directory from epoch 2 to 3.
85 | /// Multiple values are accepted e.g. --capture_deltas 9 10
86 | #[arg(long = "capture_deltas", short = 'd', num_args = 0..)]
87 | pub capture_deltas: Option>,
88 |
89 | /// Name of output path.
90 | /// If omitted, output will be printed to stdout.
91 | #[arg(long = "out", short = 'o')]
92 | pub out: Option,
93 |
94 | /// Stops tool from generating random key updates in publishes.
95 | /// Use this if you want the tool to only use explicitly passed key updates.
96 | /// Explicilty passed key updates without values would still use randomly
97 | /// generated values.
98 | #[arg(long = "no_generated_updates", short = 'n')]
99 | pub no_generated_updates: bool,
100 | }
101 |
102 | fn parse_user_events(s: &str) -> Result {
103 | let mut split = s.split(':');
104 | let username_text = split.next().unwrap();
105 | let maybe_events_text = split.next();
106 |
107 | let username = Regex::new(USER_PATTERN)
108 | .unwrap()
109 | .captures(username_text)
110 | .unwrap()
111 | .get(0)
112 | .unwrap()
113 | .as_str();
114 |
115 | let events = if let Some(events_text) = maybe_events_text {
116 | Regex::new(EVENT_PATTERN)
117 | .unwrap()
118 | .captures_iter(events_text)
119 | .map(|event| {
120 | let epoch: u32;
121 | let value: Option;
122 | if event.get(1).is_some() {
123 | epoch = event.get(2).unwrap().as_str().parse().unwrap();
124 | value = Some(AkdValue::from(event.get(3).unwrap().as_str()));
125 | } else {
126 | epoch = event.get(0).unwrap().as_str().parse().unwrap();
127 | value = None;
128 | }
129 | UserEvent { epoch, value }
130 | })
131 | .collect::>()
132 | } else {
133 | vec![]
134 | };
135 |
136 | Ok(User {
137 | label: AkdLabel::from(username),
138 | events,
139 | })
140 | }
141 |
--------------------------------------------------------------------------------
/examples/src/mysql_demo/commands.rs:
--------------------------------------------------------------------------------
1 | // Copyright (c) Meta Platforms, Inc. and affiliates.
2 | //
3 | // This source code is dual-licensed under either the MIT license found in the
4 | // LICENSE-MIT file in the root directory of this source tree or the Apache
5 | // License, Version 2.0 found in the LICENSE-APACHE file in the root directory
6 | // of this source tree. You may select, at your option, one of the above-listed licenses.
7 |
8 | use super::directory_host::DirectoryCommand;
9 | use colored::*;
10 |
11 | pub(crate) enum Command {
12 | Help,
13 | Exit,
14 | Flush,
15 | Info,
16 | Directory(DirectoryCommand),
17 | InvalidArgs(String),
18 | Unknown(String),
19 | }
20 |
21 | impl Command {
22 | pub(crate) fn parse(text: &mut String) -> Command {
23 | trim_newline(text);
24 | let parts: Vec<&str> = text.split(' ').collect();
25 |
26 | let mut command = String::new();
27 | if let Some(head) = parts.first() {
28 | command = String::from(*head);
29 | }
30 |
31 | match command.to_lowercase().as_ref() {
32 | "exit" | "x" => Command::Exit,
33 | "help" | "?" => Command::Help,
34 | "flush" => Command::Flush,
35 | "info" => Command::Info,
36 | cmd => Command::handle_dir_cmd(cmd, parts, text),
37 | }
38 | }
39 |
40 | pub(crate) fn print_help_menu() {
41 | println!(
42 | "{}",
43 | "*************************** Help menu ***************************".red()
44 | );
45 | println!(
46 | "{} are commands, {} are mandatory args, {} are optional args",
47 | "green".green(),
48 | "blue".blue(),
49 | "magenta".magenta()
50 | );
51 | println!("=============================================================");
52 | println!(" {}|{}:\t\t\tprint this menu", "help".green(), "?".green());
53 | println!(
54 | " {}|{}:\t\t\texit the application",
55 | "exit".green(),
56 | "x".green()
57 | );
58 | println!(" {}\t\t\t\tflush the database entries", "flush".green());
59 | println!(
60 | " {}\t\t\t\tprints information about the running instance",
61 | "info".green()
62 | );
63 | println!(
64 | " {} {} {}:\t\tpublish key material (value) for user",
65 | "publish".green(),
66 | "user".blue(),
67 | "value".blue()
68 | );
69 | println!(
70 | " {} {}:\t\t\tlookup a proof for user",
71 | "lookup".green(),
72 | "user".blue()
73 | );
74 | println!(
75 | " {} {}:\t\t\tlookup key history for user",
76 | "history".green(),
77 | "user".blue()
78 | );
79 | println!(
80 | " {} {} {}:\t\tretrieve audit proof between start and end epochs",
81 | "audit".green(),
82 | "start".blue(),
83 | "end".blue()
84 | );
85 | println!(
86 | " {}|{}\t\tretrieve the root hash at the latest epoch",
87 | "root".green(),
88 | "root_hash".green(),
89 | );
90 | }
91 |
92 | // ==== Helpers for managing directory commands ==== //
93 | fn handle_dir_cmd(command: &str, parts: Vec<&str>, full_text: &str) -> Command {
94 | let dir_cmd: Option