├── .github └── workflows │ ├── daphneci.yml │ └── semgrep.yml ├── .gitignore ├── CODEOWNERS ├── Cargo.lock ├── Cargo.toml ├── LICENSE ├── Makefile ├── README.md ├── SECURITY.md ├── crates ├── dapf │ ├── Cargo.toml │ ├── README.md │ ├── dapf_test.sh │ └── src │ │ ├── acceptance │ │ ├── load_testing.rs │ │ └── mod.rs │ │ ├── cli_parsers.rs │ │ ├── functions │ │ ├── decrypt.rs │ │ ├── helper.rs │ │ ├── hpke.rs │ │ ├── leader.rs │ │ ├── mod.rs │ │ └── test_routes.rs │ │ ├── http_client.rs │ │ ├── lib.rs │ │ ├── main.rs │ │ └── test_durations.rs ├── daphne-server │ ├── Cargo.toml │ ├── README.md │ ├── docker-compose-e2e.yaml │ ├── docker │ │ └── example-service.Dockerfile │ ├── examples │ │ ├── configuration-cpu-offload.toml │ │ ├── configuration-helper.toml │ │ ├── configuration-leader.toml │ │ └── service.rs │ ├── src │ │ ├── config.rs │ │ ├── lib.rs │ │ ├── metrics.rs │ │ ├── roles │ │ │ ├── aggregator.rs │ │ │ ├── helper.rs │ │ │ ├── leader.rs │ │ │ └── mod.rs │ │ ├── router │ │ │ ├── aggregator.rs │ │ │ ├── compute_offload.rs │ │ │ ├── extractor.rs │ │ │ ├── helper.rs │ │ │ ├── leader.rs │ │ │ ├── mod.rs │ │ │ └── test_routes.rs │ │ └── storage_proxy_connection │ │ │ ├── kv │ │ │ ├── cache.rs │ │ │ ├── mod.rs │ │ │ └── request_coalescer.rs │ │ │ └── mod.rs │ └── tests │ │ └── e2e │ │ ├── e2e.rs │ │ ├── main.rs │ │ └── test_runner.rs ├── daphne-service-utils │ ├── Cargo.toml │ ├── build.rs │ └── src │ │ ├── bearer_token.rs │ │ ├── capnproto │ │ ├── base.capnp │ │ └── mod.rs │ │ ├── compute_offload │ │ ├── compute_offload.capnp │ │ └── mod.rs │ │ ├── durable_requests │ │ ├── bindings │ │ │ ├── agg_job_response_store.capnp │ │ │ ├── agg_job_response_store.rs │ │ │ ├── aggregate_store.rs │ │ │ ├── aggregate_store_v2.capnp │ │ │ ├── aggregate_store_v2.rs │ │ │ ├── aggregation_job_store.capnp │ │ │ ├── aggregation_job_store.rs │ │ │ ├── mod.rs │ │ │ ├── replay_checker.capnp │ │ │ ├── replay_checker.rs │ │ │ └── test_state_cleaner.rs │ │ ├── durable_request.capnp │ │ └── mod.rs │ │ ├── http_headers.rs │ │ ├── lib.rs │ │ └── test_route_types.rs ├── daphne-worker-test │ ├── .gitignore │ ├── Cargo.toml │ ├── README.md │ ├── docker-compose-storage-proxy.yaml │ ├── docker │ │ ├── aggregator.Dockerfile │ │ ├── docker-compose-e2e.yaml │ │ ├── runtests.Dockerfile │ │ └── storage-proxy.Dockerfile │ ├── src │ │ ├── durable.rs │ │ ├── lib.rs │ │ └── utils.rs │ ├── wrangler.aggregator.toml │ └── wrangler.storage-proxy.toml ├── daphne-worker │ ├── Cargo.toml │ ├── build.rs │ ├── clippy.toml │ └── src │ │ ├── aggregator │ │ ├── config.rs │ │ ├── metrics.rs │ │ ├── mod.rs │ │ ├── queues │ │ │ ├── async_aggregator.rs │ │ │ ├── mod.rs │ │ │ └── queue_messages.capnp │ │ ├── roles │ │ │ ├── aggregator.rs │ │ │ ├── helper.rs │ │ │ ├── leader.rs │ │ │ └── mod.rs │ │ └── router │ │ │ ├── aggregator.rs │ │ │ ├── extractor.rs │ │ │ ├── helper.rs │ │ │ ├── leader.rs │ │ │ ├── mod.rs │ │ │ └── test_routes.rs │ │ ├── durable │ │ ├── agg_job_response_store.rs │ │ ├── aggregate_store.rs │ │ ├── aggregate_store_v2.rs │ │ ├── aggregation_job_store.rs │ │ ├── mod.rs │ │ ├── replay_checker.rs │ │ └── test_state_cleaner.rs │ │ ├── lib.rs │ │ ├── storage │ │ ├── kv │ │ │ ├── cache.rs │ │ │ └── mod.rs │ │ └── mod.rs │ │ ├── storage_proxy │ │ ├── metrics.rs │ │ ├── middleware.rs │ │ └── mod.rs │ │ └── tracing_utils │ │ ├── fields_recording_layer.rs │ │ ├── mod.rs │ │ └── workers_json_layer.rs └── daphne │ ├── Cargo.toml │ ├── benches │ ├── aggregation.rs │ ├── pine.rs │ └── vdaf.rs │ └── src │ ├── audit_log.rs │ ├── constants.rs │ ├── error │ ├── aborts.rs │ └── mod.rs │ ├── hpke.rs │ ├── lib.rs │ ├── messages │ ├── mod.rs │ ├── request.rs │ └── taskprov.rs │ ├── metrics.rs │ ├── pine │ ├── flp.rs │ ├── mod.rs │ ├── msg.rs │ ├── test_vec │ │ ├── 01 │ │ │ ├── Pine128_0.json │ │ │ ├── Pine128_1.json │ │ │ ├── Pine128_2.json │ │ │ ├── Pine128_3.json │ │ │ ├── Pine32HmacSha256Aes128_0.json │ │ │ ├── Pine32HmacSha256Aes128_1.json │ │ │ ├── Pine32HmacSha256Aes128_2.json │ │ │ ├── Pine32HmacSha256Aes128_3.json │ │ │ ├── Pine32HmacSha256Aes128_4.json │ │ │ ├── Pine32HmacSha256Aes128_5.json │ │ │ ├── Pine32HmacSha256Aes128_6.json │ │ │ ├── Pine32HmacSha256Aes128_7.json │ │ │ ├── Pine64HmacSha256Aes128_0.json │ │ │ ├── Pine64HmacSha256Aes128_1.json │ │ │ ├── Pine64HmacSha256Aes128_2.json │ │ │ ├── Pine64HmacSha256Aes128_3.json │ │ │ ├── Pine64HmacSha256Aes128_4.json │ │ │ ├── Pine64HmacSha256Aes128_5.json │ │ │ ├── Pine64HmacSha256Aes128_6.json │ │ │ ├── Pine64HmacSha256Aes128_7.json │ │ │ ├── Pine64_0.json │ │ │ ├── Pine64_1.json │ │ │ ├── Pine64_2.json │ │ │ └── Pine64_3.json │ │ └── mod.rs │ └── vdaf.rs │ ├── protocol │ ├── aggregator.rs │ ├── client.rs │ ├── collector.rs │ ├── mod.rs │ └── report_init.rs │ ├── roles │ ├── aggregator.rs │ ├── helper │ │ ├── handle_agg_job.rs │ │ └── mod.rs │ ├── leader │ │ ├── in_memory_leader.rs │ │ └── mod.rs │ └── mod.rs │ ├── taskprov.rs │ ├── testing │ ├── mod.rs │ └── report_generator.rs │ └── vdaf │ ├── draft09.rs │ ├── mastic.rs │ ├── mod.rs │ ├── pine.rs │ ├── prio2.rs │ ├── prio3.rs │ └── test_vec │ ├── Prio3SumVecField64MultiproofHmacSha256Aes128_0.json │ ├── Prio3SumVecField64MultiproofHmacSha256Aes128_1.json │ └── Prio3SumVecField64MultiproofHmacSha256Aes128_2.json └── interop ├── Dockerfile.interop_helper └── run_interop_helper.sh /.github/workflows/daphneci.yml: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2025 Cloudflare, Inc. All rights reserved. 2 | # SPDX-License-Identifier: BSD-3-Clause 3 | --- 4 | name: DaphneCI 5 | 6 | on: 7 | push: 8 | branches: 9 | - main 10 | pull_request: 11 | 12 | jobs: 13 | Testing: 14 | runs-on: ubuntu-latest 15 | steps: 16 | - name: Checking out 17 | uses: actions/checkout@v3 18 | - name: Setting up Rust 19 | uses: actions-rs/toolchain@v1 20 | with: 21 | profile: minimal 22 | toolchain: 1.85.0 23 | components: clippy, rustfmt 24 | override: true 25 | - name: Machete 26 | uses: bnjbvr/cargo-machete@main 27 | - name: Cargo hack 28 | uses: taiki-e/install-action@cargo-hack 29 | - name: Cap'n'proto 30 | run: sudo apt install capnproto 31 | - name: Rust cache 32 | uses: Swatinem/rust-cache@v1 33 | - name: Format 34 | run: cargo fmt --all --check 35 | - name: Documentation 36 | run: RUSTDOCFLAGS='-D warnings' cargo doc --locked --no-deps --all-features --workspace 37 | - name: Linting 38 | run: cargo hack clippy --locked --each-feature -- -D warnings 39 | - name: Linting Tests 40 | run: cargo hack clippy --tests --locked --each-feature -- -D warnings 41 | - name: Testing 42 | run: cargo test 43 | - name: Doc Testing 44 | run: cargo test --doc 45 | 46 | e2e: 47 | runs-on: ubuntu-latest 48 | steps: 49 | - name: Checking out 50 | uses: actions/checkout@v3 51 | 52 | - name: Generate HPKE Signing Key 53 | id: hpke_signing_key 54 | run: | 55 | key=$(openssl ecparam -name prime256v1 -genkey -noout -out -) 56 | { 57 | echo "hpke_signing_key<> "$GITHUB_OUTPUT" 61 | 62 | - name: Generate HPKE Signing Certificate 63 | id: hpke_signing_cert 64 | run: | 65 | echo "${{ steps.hpke_signing_key.outputs.hpke_signing_key }}" > private-key 66 | cert=$(openssl req -key private-key \ 67 | -new -x509 -days 365 -out - \ 68 | -subj '/C=PT/L=Braga/O=Cloudflare Lda/CN=helper.dap.cloudflare.com') 69 | { 70 | echo "hpke_signing_cert<> "$GITHUB_OUTPUT" 74 | 75 | - name: Run integration tests 76 | uses: hoverkraft-tech/compose-action@v2.0.1 77 | with: 78 | compose-file: "./crates/daphne-server/docker-compose-e2e.yaml" 79 | up-flags: "--build --abort-on-container-exit --exit-code-from test" 80 | env: 81 | HPKE_SIGNING_KEY: ${{ steps.hpke_signing_key.outputs.hpke_signing_key }} 82 | E2E_TEST_HPKE_SIGNING_CERTIFICATE: ${{ steps.hpke_signing_cert.outputs.hpke_signing_cert }} 83 | 84 | e2e-worker-aggregator: 85 | runs-on: ubuntu-latest 86 | steps: 87 | - name: Checking out 88 | uses: actions/checkout@v3 89 | 90 | - name: Run integration tests 91 | uses: hoverkraft-tech/compose-action@v2.0.1 92 | with: 93 | compose-file: "./crates/daphne-worker-test/docker/docker-compose-e2e.yaml" 94 | up-flags: "--build --abort-on-container-exit --exit-code-from test" 95 | -------------------------------------------------------------------------------- /.github/workflows/semgrep.yml: -------------------------------------------------------------------------------- 1 | on: 2 | pull_request: {} 3 | workflow_dispatch: {} 4 | push: 5 | branches: 6 | - main 7 | - master 8 | schedule: 9 | - cron: '0 0 * * *' 10 | name: Semgrep config 11 | jobs: 12 | semgrep: 13 | name: semgrep/ci 14 | runs-on: ubuntu-latest 15 | env: 16 | SEMGREP_APP_TOKEN: ${{ secrets.SEMGREP_APP_TOKEN }} 17 | SEMGREP_URL: https://cloudflare.semgrep.dev 18 | SEMGREP_APP_URL: https://cloudflare.semgrep.dev 19 | SEMGREP_VERSION_CHECK_URL: https://cloudflare.semgrep.dev/api/check-version 20 | container: 21 | image: semgrep/semgrep 22 | steps: 23 | - uses: actions/checkout@v4 24 | - run: semgrep ci 25 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | target 2 | node_modules 3 | .DS_Store 4 | **/*.rs.bk 5 | wasm-pack.log 6 | build/ 7 | dist 8 | .wrangler 9 | .idea 10 | .secrets 11 | .vscode 12 | .rusty-hook.toml 13 | package-lock.json 14 | -------------------------------------------------------------------------------- /CODEOWNERS: -------------------------------------------------------------------------------- 1 | * @armfazh @bhalleycf @cjpatton @fisherdarling @cbranch @lbaquerofierro @mendess @RageKnify @jhoyla 2 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2025 Cloudflare, Inc. All rights reserved. 2 | # SPDX-License-Identifier: BSD-3-Clause 3 | 4 | [workspace] 5 | # This is needed to avoid pulling in tokio features in wasm targets, due to new features in version 0.0.18 of the `worker` crate 6 | # See: https://doc.rust-lang.org/edition-guide/rust-2021/default-cargo-resolver.html#details 7 | resolver = "2" 8 | members = ["crates/*"] 9 | 10 | [workspace.package] 11 | version = "0.3.0" 12 | authors = [ 13 | "Christopher Patton ", 14 | "Armando Faz Hernandez ", 15 | "Pedro Mendes ", 16 | ] 17 | edition = "2021" 18 | license = "BSD-3-Clause" 19 | readme = "README.md" 20 | homepage = "https://github.com/cloudflare/daphne" 21 | repository = "https://github.com/cloudflare/daphne" 22 | rust-version = "1.80" 23 | 24 | [profile.release] 25 | opt-level = "s" 26 | 27 | [profile.release-symbols] 28 | inherits = "release" 29 | opt-level = 3 30 | debug = 1 31 | 32 | [workspace.dependencies] 33 | anyhow = "1.0.86" 34 | assert_matches = "1.5.0" 35 | async-trait = "0.1.80" 36 | axum = { version = "0.7.5", default-features = false } 37 | axum-extra = "0.9" 38 | base64 = "0.21.7" 39 | bytes = "1" 40 | cap = "0.1.2" 41 | capnp = "0.18.13" 42 | capnpc = "0.18.1" 43 | cfg-if = "1.0.0" 44 | chrono = { version = "0.4.38", default-features = false, features = ["clock", "wasmbind"] } 45 | clap = { version = "4.5.7", features = ["derive"] } 46 | config = "0.13.4" 47 | constcat = "0.5.0" 48 | criterion = { version = "0.5.1", features = ["async_tokio"] } 49 | deepsize = { version = "0.2.0" } 50 | dhat = "0.3.3" 51 | either = "1.13.0" 52 | futures = "0.3.30" 53 | getrandom = "0.2.15" 54 | headers = "0.4" 55 | hex = { version = "0.4.3", features = ["serde"] } 56 | hpke-rs = "0.2.0" 57 | hpke-rs-crypto = "0.2.0" 58 | hpke-rs-rust-crypto = "0.2.0" 59 | http = "1" 60 | http-body-util = "0.1.2" 61 | mappable-rc = "0.1.1" 62 | matchit = "0.7.3" 63 | p256 = { version = "0.13.2", features = ["ecdsa-core", "ecdsa", "pem"] } 64 | paste = "1.0.15" 65 | prio_draft09 = { package = "prio", version = "0.16.7" } 66 | # TODO Point to version `0.17.0` once release. This revision is one commit ahead of `0.17.0-alpha.0`. 67 | prio = { git = "https://github.com/divviup/libprio-rs.git", rev = "e5e8a47ee4567f7588d0b5c8d20f75dde4061b2f" } 68 | prometheus = "0.13.4" 69 | rand = "0.8.5" 70 | rayon = "1.10.0" 71 | rcgen = "0.12.1" 72 | regex = "1.10.5" 73 | reqwest = { version = "0.12.5", default-features = false, features = ["rustls-tls-native-roots"] } 74 | ring = "0.17.13" 75 | rustls = "0.23.10" 76 | rustls-native-certs = "0.7" 77 | rustls-pemfile = "2.1.3" 78 | serde = { version = "1.0.203", features = ["derive"] } 79 | serde-wasm-bindgen = "0.6.5" 80 | serde_json = "1.0.118" 81 | serde_yaml = "0.9.33" 82 | static_assertions = "1" 83 | strum = { version = "0.26.3", features = ["derive"] } 84 | subtle = "2.6.1" 85 | thiserror = "1.0.61" 86 | tokio = { version = "1.44.2", features = ["macros", "rt-multi-thread"] } 87 | tower = "0.4.13" 88 | tower-service = "0.3" 89 | tracing = "0.1.40" 90 | tracing-core = "0.1.32" 91 | tracing-subscriber = "0.3.18" 92 | url = { version = "2.5.4", features = ["serde"] } 93 | wasm-bindgen = "0.2.99" 94 | webpki = "0.22.4" 95 | worker = "0.5" 96 | x509-parser = "0.15.1" 97 | 98 | [workspace.dependencies.sentry] 99 | version = "0.32.3" 100 | default-features = false 101 | features = [ 102 | "backtrace", 103 | "contexts", 104 | "debug-images", 105 | "panic", 106 | "reqwest", 107 | "rustls", 108 | ] 109 | 110 | [workspace.lints.rustdoc] 111 | broken_intra_doc_links = "warn" 112 | 113 | [workspace.lints.clippy] 114 | pedantic = { level = "warn", priority = -1 } 115 | module_name_repetitions = "allow" 116 | must_use_candidate = "allow" 117 | missing_panics_doc = "allow" 118 | missing_errors_doc = "allow" 119 | cast_precision_loss = "allow" 120 | too_many_lines = "allow" 121 | needless_pass_by_value = "allow" 122 | if_not_else = "allow" 123 | default_trait_access = "allow" 124 | items_after_statements = "allow" 125 | redundant_closure_for_method_calls = "allow" 126 | inconsistent_struct_constructor = "allow" 127 | similar_names = "allow" 128 | inline_always = "allow" 129 | no_effect_underscore_binding = "allow" 130 | disallowed_methods = "deny" 131 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | BSD 3-Clause License 2 | 3 | Copyright (c) 2022, Cloudflare Inc. 4 | 5 | Redistribution and use in source and binary forms, with or without 6 | modification, are permitted provided that the following conditions are met: 7 | 8 | 1. Redistributions of source code must retain the above copyright notice, this 9 | list of conditions and the following disclaimer. 10 | 11 | 2. Redistributions in binary form must reproduce the above copyright notice, 12 | this list of conditions and the following disclaimer in the documentation 13 | and/or other materials provided with the distribution. 14 | 15 | 3. Neither the name of the copyright holder nor the names of its 16 | contributors may be used to endorse or promote products derived from 17 | this software without specific prior written permission. 18 | 19 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 20 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 22 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 23 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 25 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 26 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 27 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 28 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2025 Cloudflare, Inc. All rights reserved. 2 | # SPDX-License-Identifier: BSD-3-Clause 3 | 4 | .PHONY: accept acceptance e2e load leader l helper h storage-proxy s 5 | 6 | leader: 7 | RUST_LOG=hyper=off,debug cargo run \ 8 | --profile release-symbols \ 9 | --features test-utils \ 10 | --example service \ 11 | -- \ 12 | -c ./crates/daphne-server/examples/configuration-leader.toml 13 | l: leader 14 | 15 | helper: 16 | RUST_LOG=hyper=off,debug cargo run \ 17 | --profile release-symbols \ 18 | --features test-utils \ 19 | --example service \ 20 | -- \ 21 | -c ./crates/daphne-server/examples/configuration-helper.toml 22 | h: helper 23 | 24 | compute-offload: 25 | RUST_LOG=hyper=off,debug cargo run \ 26 | --profile release-symbols \ 27 | --features test-utils \ 28 | --example service \ 29 | -- \ 30 | -c ./crates/daphne-server/examples/configuration-cpu-offload.toml 31 | co: compute-offload 32 | 33 | 34 | helper-worker: 35 | cd ./crates/daphne-worker-test/ && \ 36 | wrangler dev -c wrangler.aggregator.toml --port 8788 -e helper 37 | hw: helper-worker 38 | 39 | leader-worker: 40 | cd ./crates/daphne-worker-test/ && \ 41 | wrangler dev -c wrangler.aggregator.toml --port 8788 -e leader 42 | lw: leader-worker 43 | 44 | storage-proxy: 45 | docker compose -f ./crates/daphne-worker-test/docker-compose-storage-proxy.yaml up --build 46 | s: storage-proxy 47 | 48 | e2e: /tmp/private-key /tmp/certificate 49 | export HPKE_SIGNING_KEY="$$(cat /tmp/private-key)"; \ 50 | export E2E_TEST_HPKE_SIGNING_CERTIFICATE="$$(cat /tmp/certificate)"; \ 51 | docker compose -f ./crates/daphne-server/docker-compose-e2e.yaml up \ 52 | --no-attach leader_storage \ 53 | --no-attach helper_storage \ 54 | --build \ 55 | --abort-on-container-exit \ 56 | --exit-code-from test 57 | 58 | e2e-worker: 59 | docker compose -f ./crates/daphne-worker-test/docker/docker-compose-e2e.yaml up \ 60 | --build \ 61 | --abort-on-container-exit \ 62 | --exit-code-from test 63 | 64 | build_interop: 65 | docker build . -f ./interop/Dockerfile.interop_helper --tag daphne-interop 66 | 67 | run_interop: 68 | docker run -it -p 8788:8788 -P daphne-interop --name daphne-interop 69 | 70 | /tmp/private-key: 71 | openssl ecparam -name prime256v1 -genkey -noout -out $@ 72 | 73 | /tmp/certificate: 74 | openssl req -key /tmp/private-key -new -x509 -days 1 -out /tmp/certificate -subj '/C=US/L=Palo Alto/O=Cloudflare Lda/CN=dap.cloudflare.com' 75 | 76 | reset-storage: 77 | -cargo run --bin dapf -- test-routes clear-storage -s p256_hkdf_sha256 http://localhost:8787/v09/ 78 | -cargo run --bin dapf -- test-routes clear-storage -s p256_hkdf_sha256 http://localhost:8788/v09/ 79 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Daphne 2 | 3 | Daphne is a Rust implementation of the Distributed Aggregation Protocol 4 | ([DAP](https://datatracker.ietf.org/doc/draft-ietf-ppm-dap/)) standard. DAP is 5 | under active development in the PPM working group of the IETF. 6 | 7 | Daphne currently implements: 8 | 9 | * draft-ietf-ppm-dap-09 10 | * Prio3: draft-irtf-cfrg-vdaf-08 11 | * Taskprov extension: draft-wang-ppm-dap-taskprov-06 12 | * Interop test API: draft-dcook-ppm-dap-interop-test-design-07 13 | 14 | The [repository](https://github.com/cloudflare/daphne) contains a number of 15 | crates. The main one, `daphne`, implements the core DAP protocol logic for 16 | Clients, Aggregators, and Collectors. This crate does not provide the complete, 17 | end-to-end functionality of any party. Instead, it defines traits for the 18 | functionalities that a concrete instantiation of the protocol is required to 19 | implement. We call these functionalities "roles". 20 | 21 | The remaining crates are not intended for general use: 22 | 23 | * `daphne-server`, `daphne-worker`, `daphne-service-utils` -- Components of 24 | Cloudflare's backend for its DAP deployments. These crates are not intended 25 | for general use. 26 | 27 | * `daphne-worker-test` -- Integration tests for `daphne` and Cloudflare's 28 | backend. 29 | 30 | * `dapf` (short for "DAP Functions") -- CLI for interacting with DAP 31 | deployments. Some of its features are specific to Cloudflare's own 32 | deployment. 33 | 34 | 35 | ## Requirements 36 | 37 | [Cap'n Proto](https://capnproto.org/) is required to build DAP. 38 | 39 | ```sh 40 | # debian 41 | apt install capnproto 42 | 43 | # macos 44 | brew install capnp 45 | ``` 46 | 47 | ## Testing 48 | 49 | The `daphne` crate relies on unit tests. To test integration with Cloudflare's 50 | backend, run `make e2e`. 51 | -------------------------------------------------------------------------------- /SECURITY.md: -------------------------------------------------------------------------------- 1 | # Reporting Security Vulnerabilities 2 | 3 | Please see [this page](https://www.cloudflare.com/.well-known/security.txt) for information on how to report a vulnerability to Cloudflare. Thanks! 4 | -------------------------------------------------------------------------------- /crates/dapf/Cargo.toml: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2024 Cloudflare, Inc. All rights reserved. 2 | # SPDX-License-Identifier: BSD-3-Clause 3 | 4 | [package] 5 | name = "dapf" 6 | version.workspace = true 7 | authors.workspace = true 8 | edition.workspace = true 9 | license.workspace = true 10 | readme.workspace = true 11 | homepage.workspace = true 12 | repository.workspace = true 13 | 14 | [dependencies] 15 | anyhow.workspace = true 16 | chrono.workspace = true 17 | clap = { workspace = true, features = ["env"] } 18 | daphne = { path = "../daphne", features = ["report-generator", "prometheus"] } 19 | daphne-service-utils = { path = "../daphne-service-utils", features = ["test-utils"] } 20 | futures.workspace = true 21 | hex.workspace = true 22 | prio.workspace = true 23 | prometheus.workspace = true 24 | rand.workspace = true 25 | reqwest = { workspace = true, features = ["json"] } 26 | rustls.workspace = true 27 | rustls-native-certs.workspace = true 28 | rustls-pemfile.workspace = true 29 | sentry.workspace = true 30 | serde.workspace = true 31 | serde_json.workspace = true 32 | tokio.workspace = true 33 | tracing-subscriber = { workspace = true, features = ["env-filter"] } 34 | tracing.workspace = true 35 | url.workspace = true 36 | webpki.workspace = true 37 | x509-parser.workspace = true 38 | -------------------------------------------------------------------------------- /crates/dapf/README.md: -------------------------------------------------------------------------------- 1 | # dapf (DAP functions) 2 | 3 | This binary provides various functions that can be called to interact with DAP 4 | deployments. 5 | 6 | Its subcommands and their functionality can be consulted with the built in 7 | help pages the CLI provides. 8 | 9 | 10 | ## Examples 11 | 12 | Here are some examples of common functionality of dapf. To avoid installing the 13 | tool replace all instances of `dapf` with `cargo run --bin dapf`. 14 | 15 | ### Generate an hpke config 16 | 17 | With the default algorithm: 18 | ```sh 19 | dapf hpke generate 20 | ``` 21 | 22 | With a specific algorithm: 23 | ```sh 24 | dapf hpke generate x25519_hkdf_sha256 25 | ``` 26 | 27 | This will output the config twice. 28 | 29 | First the receiver config JSON: 30 | ```json 31 | { 32 | "config": { 33 | "id": 155, 34 | "kem_id": "x25519_hkdf_sha256", 35 | "kdf_id": "hkdf_sha256", 36 | "aead_id": "aes128_gcm", 37 | "public_key": "a63dd7e6bbfc68d54e8b25e39ceb826cdf5101592a026590cb935d754e4f210d" 38 | }, 39 | "private_key": "a5adeefcb0e09053e6ebded53277b65c81412c8eafadfe5706aef3453d45b05c" 40 | } 41 | ``` 42 | 43 | Second as a [DAP encoded][hpke-config-encoding] + base64 encoded string which can be 44 | used when issuing requests to `internal/test/add_task`: 45 | ``` 46 | DAP and base64 encoded hpke config: mwAgAAEAAQAgpj3X5rv8aNVOiyXjnOuCbN9RAVkqAmWQy5NddU5PIQ0 47 | ``` 48 | 49 | ### Using the interop api defined by draft-dcook-ppm-dap-interop-test-design-07 50 | 51 | Draft: https://datatracker.ietf.org/doc/html/draft-dcook-ppm-dap-interop-test-design-07 52 | 53 | #### Generating the requert payload for `internal/test/add_task` 54 | 55 | Using `dapf test-routes create-add-task-json` the json used for adding a new 56 | testing task can be easily created. 57 | 58 | All the parameters of this task can be passed through commandline options, the 59 | parameters that aren't passed in are then asked interactively. 60 | 61 | ```sh 62 | Options: 63 | --task-id 64 | --leader-url 65 | --helper-url 66 | --vdaf 67 | --leader-auth-token 68 | --collector-authentication-token 69 | --role 70 | --query 71 | --min-batch-size 72 | --collector-hpke-config 73 | --time-precision 74 | --expires-in-seconds 75 | ``` 76 | 77 | Using this command should output something like this, which you can then use to 78 | issue requests to `internal/test/add_task` 79 | ```json 80 | { 81 | "task_id": "QXI0XDeCY06OtcHMWxwEyuLwe-MzUWQvBTlFXNl-H4U", 82 | "leader": "http://leader/", 83 | "helper": "http://helper/", 84 | "vdaf": { 85 | "type": "Prio3SumVecField64MultiproofHmacSha256Aes128", 86 | "bits": "1", 87 | "length": "100000", 88 | "chunk_length": "320" 89 | }, 90 | "leader_authentication_token": "I-am-the-leader", 91 | "role": "helper", 92 | "vdaf_verify_key": "dJXXtUfRAdIJ7z87revcZpqXZ16nbF9HB9OyZ1CMHxM", 93 | "batch_mode": 2, 94 | "min_batch_size": 10, 95 | "time_precision": 3600, 96 | "collector_hpke_config": "gwAgAAEAAQAgPMw62iLcCzNn0DHqSwKHanelnvMrWhwGEJVSpRpzmhM", 97 | "task_expiration": 1729263391 98 | } 99 | ``` 100 | 101 | ### Decoding responses from aggregators 102 | 103 | 104 | #### Decoding aggregate shares 105 | 106 | Once you've collected from the leader you may want to see what the result of the 107 | aggregation was, for that you can use the `decode` subcommand: 108 | 109 | ```sh 110 | dapf decode ./aggregate_share collection \ 111 | --vdaf-config '{"prio3": { "sum": { "bits": 8 } } }' \ 112 | --task-id "8TuT5Z5fAuutsX9DZWSqkUw6pzDl96d3tdsDJgWH2VY" \ 113 | --hpke-config-path ./hpke-config.json 114 | ``` 115 | 116 | [hpke-config-encoding]: https://datatracker.ietf.org/doc/html/draft-ietf-ppm-dap-09#section-4.4.1-6 117 | -------------------------------------------------------------------------------- /crates/dapf/dapf_test.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # Copyright (c) 2022 Cloudflare, Inc. All rights reserved. 4 | # SPDX-License-Identifier: BSD-3-Clause 5 | 6 | # Script for demonstrating how `dapf` is used. To run it, you need to have 7 | # `dapf` in your $PATH, e.g., by running `cargo install --path .`. 8 | 9 | set -eo pipefail 10 | # trap read debug 11 | 12 | dapf() ( 13 | set +x 14 | if command -v dapf > /dev/null 2>&1 && ! declare -F dapf > /dev/null 2>&1; then 15 | command dapf "$@" 16 | else 17 | cargo run --bin dapf --quiet -- "$@" 18 | fi 19 | ) 20 | 21 | 22 | # Task configuration 23 | LEADER_BASE_URL="http://127.0.0.1:8787" 24 | HELPER_BASE_URL="http://127.0.0.1:8788" 25 | for url in "$LEADER_BASE_URL" "$HELPER_BASE_URL"; do 26 | until curl --fail -X POST "$url/internal/test/ready"; do 27 | sleep 10 28 | done 29 | echo "$url ready" 30 | done 31 | 32 | MIN_BATCH_DURATION=3600 # seconds 33 | VDAF_CONFIG=$(cat << EOF 34 | { 35 | "prio3": { 36 | "sum": { 37 | "bits": 10 38 | } 39 | } 40 | } 41 | EOF 42 | ) 43 | 44 | LEADER_AUTH_TOKEN="I-am-the-leader" 45 | COLLECTOR_AUTH_TOKEN="I-am-the-collector" 46 | BASE64_ENCODED_HPKE_CONFIG=$(mktemp) 47 | COLLECTOR_HPKE_RECEIVER_CONFIG=$(dapf hpke generate 2>"$BASE64_ENCODED_HPKE_CONFIG") 48 | LEADER_TASK_CONFIG=$(dapf test-routes create-add-task-json \ 49 | --leader-url "$LEADER_BASE_URL/v09/" \ 50 | --helper-url "$HELPER_BASE_URL/v09/" \ 51 | --query "time-interval" \ 52 | --role leader \ 53 | --vdaf "$VDAF_CONFIG" \ 54 | --leader-auth-token "$LEADER_AUTH_TOKEN" \ 55 | --collector-auth-token "$COLLECTOR_AUTH_TOKEN" \ 56 | --collector-hpke-config "$(tail -n 1 "$BASE64_ENCODED_HPKE_CONFIG")" \ 57 | anyhow::Result { 22 | let hpke_config: HpkeReceiverConfig = serde_json::from_reader( 23 | std::fs::File::open(hpke_config_path) 24 | .with_context(|| format!("opening {}", hpke_config_path.display()))?, 25 | ) 26 | .with_context(|| format!("deserializing the config at {}", hpke_config_path.display()))?; 27 | let agg_shares = vdaf_config.consume_encrypted_agg_shares( 28 | &hpke_config, 29 | task_id, 30 | &batch_selector, 31 | collection.report_count, 32 | &DapAggregationParam::Empty, 33 | collection.encrypted_agg_shares.to_vec(), 34 | version, 35 | )?; 36 | Ok(agg_shares) 37 | } 38 | -------------------------------------------------------------------------------- /crates/dapf/src/functions/helper.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2024 Cloudflare, Inc. All rights reserved. 2 | // SPDX-License-Identifier: BSD-3-Clause 3 | 4 | use anyhow::Context as _; 5 | use daphne::{ 6 | constants::DapMediaType, 7 | messages::{ 8 | taskprov::TaskprovAdvertisement, AggregateShare, AggregateShareReq, AggregationJobInitReq, 9 | AggregationJobResp, 10 | }, 11 | DapVersion, 12 | }; 13 | use daphne_service_utils::{bearer_token::BearerToken, http_headers}; 14 | use prio::codec::ParameterizedEncode as _; 15 | use reqwest::header; 16 | use url::Url; 17 | 18 | use crate::HttpClient; 19 | 20 | use super::retry_and_decode; 21 | 22 | impl HttpClient { 23 | pub async fn submit_aggregation_job_init_req( 24 | &self, 25 | url: Url, 26 | agg_job_init_req: AggregationJobInitReq, 27 | version: DapVersion, 28 | opts: Options<'_>, 29 | ) -> anyhow::Result { 30 | retry_and_decode(&version, || async { 31 | self.put(url.clone()) 32 | .body(agg_job_init_req.get_encoded_with_param(&version).unwrap()) 33 | .headers(construct_request_headers( 34 | DapMediaType::AggregationJobInitReq 35 | .as_str_for_version(version) 36 | .with_context(|| { 37 | format!("AggregationJobInitReq media type is not defined for {version}") 38 | })?, 39 | version, 40 | opts, 41 | )?) 42 | .send() 43 | .await 44 | .context("sending AggregationJobInitReq") 45 | }) 46 | .await 47 | } 48 | 49 | pub async fn poll_aggregation_job_init( 50 | &self, 51 | url: Url, 52 | version: DapVersion, 53 | opts: Options<'_>, 54 | ) -> anyhow::Result { 55 | retry_and_decode(&version, || async { 56 | self.get(url.clone()) 57 | .headers(construct_request_headers( 58 | DapMediaType::AggregationJobInitReq 59 | .as_str_for_version(version) 60 | .with_context(|| { 61 | format!("AggregationJobInitReq media type is not defined for {version}") 62 | })?, 63 | version, 64 | opts, 65 | )?) 66 | .send() 67 | .await 68 | .context("polling aggregation job init req") 69 | }) 70 | .await 71 | } 72 | 73 | pub async fn get_aggregate_share( 74 | &self, 75 | url: Url, 76 | agg_share_req: AggregateShareReq, 77 | version: DapVersion, 78 | opts: Options<'_>, 79 | ) -> anyhow::Result { 80 | retry_and_decode(&(), || async { 81 | self.post(url.clone()) 82 | .body(agg_share_req.get_encoded_with_param(&version).unwrap()) 83 | .headers(construct_request_headers( 84 | DapMediaType::AggregateShareReq 85 | .as_str_for_version(version) 86 | .with_context(|| { 87 | format!("AggregateShareReq media type is not defined for {version}") 88 | })?, 89 | version, 90 | opts, 91 | )?) 92 | .send() 93 | .await 94 | .context("sending AggregateShareReq") 95 | }) 96 | .await 97 | } 98 | } 99 | 100 | #[derive(Default, Debug, Clone, Copy)] 101 | pub struct Options<'s> { 102 | pub taskprov_advertisement: Option<&'s TaskprovAdvertisement>, 103 | pub bearer_token: Option<&'s BearerToken>, 104 | } 105 | 106 | fn construct_request_headers( 107 | media_type: &str, 108 | version: DapVersion, 109 | options: Options<'_>, 110 | ) -> Result { 111 | let mut headers = header::HeaderMap::new(); 112 | headers.insert( 113 | header::CONTENT_TYPE, 114 | header::HeaderValue::from_str(media_type)?, 115 | ); 116 | let Options { 117 | taskprov_advertisement, 118 | bearer_token, 119 | } = options; 120 | if let Some(taskprov) = taskprov_advertisement { 121 | headers.insert( 122 | const { header::HeaderName::from_static(http_headers::DAP_TASKPROV) }, 123 | header::HeaderValue::from_str(&taskprov.serialize_to_header_value(version).unwrap())?, 124 | ); 125 | } 126 | if let Some(token) = bearer_token { 127 | headers.insert( 128 | const { header::HeaderName::from_static(http_headers::DAP_AUTH_TOKEN) }, 129 | header::HeaderValue::from_str(token.as_str())?, 130 | ); 131 | } 132 | Ok(headers) 133 | } 134 | -------------------------------------------------------------------------------- /crates/dapf/src/functions/hpke.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2024 Cloudflare, Inc. All rights reserved. 2 | // SPDX-License-Identifier: BSD-3-Clause 3 | 4 | use std::{io::Cursor, path::Path}; 5 | 6 | use anyhow::{anyhow, Context as _}; 7 | use daphne::messages::{decode_base64url_vec, HpkeConfigList}; 8 | use daphne_service_utils::http_headers; 9 | use prio::codec::Decode as _; 10 | use url::Url; 11 | use webpki::{EndEntityCert, ECDSA_P256_SHA256}; 12 | use x509_parser::pem::Pem; 13 | 14 | use crate::HttpClient; 15 | 16 | use super::retry; 17 | 18 | impl HttpClient { 19 | pub async fn get_hpke_config( 20 | &self, 21 | base_url: &Url, 22 | certificate_file: Option<&Path>, 23 | ) -> anyhow::Result { 24 | let url = base_url.join("hpke_config")?; 25 | retry( 26 | || async { 27 | self.get(url.as_str()) 28 | .send() 29 | .await 30 | .with_context(|| "request failed") 31 | }, 32 | |resp| async { 33 | let maybe_signature = resp.headers().get(http_headers::HPKE_SIGNATURE).cloned(); 34 | let hpke_config_bytes = resp.bytes().await.context("failed to read hpke config")?; 35 | if let Some(cert_path) = certificate_file { 36 | let cert = 37 | std::fs::read_to_string(cert_path).context("reading the certificate")?; 38 | let Some(signature) = maybe_signature else { 39 | anyhow::bail!("Aggregator did not sign its response"); 40 | }; 41 | let signature_bytes = decode_base64url_vec(signature.as_bytes()) 42 | .context("decoding the signature")?; 43 | let (cert_pem, _bytes_read) = Pem::read(Cursor::new(cert.as_bytes())) 44 | .context("reading PEM certificate")?; 45 | let cert = EndEntityCert::try_from(cert_pem.contents.as_ref()) 46 | .map_err(|e| anyhow!("{e:?}")) // webpki::Error does not implement std::error::Error 47 | .context("parsing PEM certificate")?; 48 | 49 | cert.verify_signature( 50 | &ECDSA_P256_SHA256, 51 | &hpke_config_bytes, 52 | signature_bytes.as_ref(), 53 | ) 54 | .map_err(|e| anyhow!("signature not verified: {}", e.to_string()))?; 55 | } 56 | Ok(HpkeConfigList::get_decoded(&hpke_config_bytes)?) 57 | }, 58 | ) 59 | .await 60 | } 61 | } 62 | -------------------------------------------------------------------------------- /crates/dapf/src/functions/leader.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2024 Cloudflare, Inc. All rights reserved. 2 | // SPDX-License-Identifier: BSD-3-Clause 3 | 4 | use anyhow::{anyhow, Context}; 5 | use daphne::{ 6 | constants::DapMediaType, 7 | messages::{Collection, CollectionJobId, CollectionReq, Report, TaskId}, 8 | DapVersion, 9 | }; 10 | use daphne_service_utils::{bearer_token::BearerToken, http_headers}; 11 | use prio::codec::{ParameterizedDecode as _, ParameterizedEncode as _}; 12 | use rand::{thread_rng, Rng}; 13 | use reqwest::StatusCode; 14 | use url::Url; 15 | 16 | use crate::HttpClient; 17 | 18 | use super::{retry, retry_and_decode}; 19 | 20 | impl HttpClient { 21 | pub async fn upload( 22 | &self, 23 | url: &Url, 24 | task_id: &TaskId, 25 | report: Report, 26 | version: DapVersion, 27 | ) -> anyhow::Result<()> { 28 | // Post the report to the Leader. 29 | let mut headers = reqwest::header::HeaderMap::new(); 30 | headers.insert( 31 | reqwest::header::CONTENT_TYPE, 32 | reqwest::header::HeaderValue::from_str( 33 | DapMediaType::Report 34 | .as_str_for_version(version) 35 | .ok_or_else(|| anyhow!("invalid content-type for dap version"))?, 36 | ) 37 | .expect("failecd to construct content-type header"), 38 | ); 39 | retry_and_decode(&(), || async { 40 | self.put(url.join(&format!("tasks/{task_id}/reports")).unwrap()) 41 | .body(report.get_encoded_with_param(&version).unwrap()) 42 | .headers(headers.clone()) 43 | .send() 44 | .await 45 | .context("uploading a report") 46 | }) 47 | .await 48 | } 49 | 50 | pub async fn start_collection_job( 51 | &self, 52 | url: &Url, 53 | task_id: &TaskId, 54 | collect_req: &CollectionReq, 55 | version: DapVersion, 56 | collector_auth_token: Option<&BearerToken>, 57 | ) -> anyhow::Result { 58 | let collect_job_id = CollectionJobId(thread_rng().gen()); 59 | let mut headers = reqwest::header::HeaderMap::new(); 60 | headers.insert( 61 | reqwest::header::CONTENT_TYPE, 62 | reqwest::header::HeaderValue::from_str( 63 | DapMediaType::CollectionReq 64 | .as_str_for_version(version) 65 | .ok_or_else(|| anyhow!("invalid content-type for dap version"))?, 66 | ) 67 | .expect("failed to construct content-type hader"), 68 | ); 69 | if let Some(collector_auth_token) = collector_auth_token { 70 | headers.insert( 71 | reqwest::header::HeaderName::from_static(http_headers::DAP_AUTH_TOKEN), 72 | reqwest::header::HeaderValue::from_str(collector_auth_token.as_str())?, 73 | ); 74 | } 75 | let () = retry_and_decode(&(), || async { 76 | self.put(url.join(&format!("tasks/{task_id}/collection_jobs/{collect_job_id}"))?) 77 | .body(collect_req.get_encoded_with_param(&version)?) 78 | .headers(headers.clone()) 79 | .send() 80 | .await 81 | .context("starting a collection job ") 82 | }) 83 | .await?; 84 | 85 | Ok(collect_job_id) 86 | } 87 | 88 | pub async fn poll_collection_job( 89 | &self, 90 | url: &Url, 91 | task_id: &TaskId, 92 | collect_job_id: &CollectionJobId, 93 | version: DapVersion, 94 | collector_auth_token: Option<&BearerToken>, 95 | ) -> anyhow::Result> { 96 | let mut headers = reqwest::header::HeaderMap::new(); 97 | if let Some(collector_auth_token) = collector_auth_token { 98 | headers.insert( 99 | reqwest::header::HeaderName::from_static(http_headers::DAP_AUTH_TOKEN), 100 | reqwest::header::HeaderValue::from_str(collector_auth_token.as_str())?, 101 | ); 102 | } 103 | retry( 104 | || async { 105 | self.post(url.join(&format!("tasks/{task_id}/collection_jobs/{collect_job_id}"))?) 106 | .headers(headers.clone()) 107 | .send() 108 | .await 109 | .context("polling collection job") 110 | }, 111 | |resp| async { 112 | if resp.status() == StatusCode::ACCEPTED { 113 | Ok(None) 114 | } else { 115 | let bytes = resp.bytes().await.context("reading bytes from wire")?; 116 | Ok(Some( 117 | Collection::get_decoded_with_param(&version, &bytes) 118 | .context("decoding Collection body")?, 119 | )) 120 | } 121 | }, 122 | ) 123 | .await 124 | } 125 | } 126 | -------------------------------------------------------------------------------- /crates/dapf/src/functions/mod.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2024 Cloudflare, Inc. All rights reserved. 2 | // SPDX-License-Identifier: BSD-3-Clause 3 | 4 | //! The various DAP functions dapf can perform. 5 | 6 | use anyhow::{anyhow, bail, Context as _}; 7 | use daphne::error::aborts::ProblemDetails; 8 | use prio::codec::ParameterizedDecode; 9 | use reqwest::StatusCode; 10 | use std::future::Future; 11 | 12 | pub mod decrypt; 13 | pub mod helper; 14 | pub mod hpke; 15 | pub mod leader; 16 | pub mod test_routes; 17 | 18 | async fn response_to_anyhow(resp: reqwest::Response) -> anyhow::Error { 19 | anyhow::anyhow!( 20 | "unexpected response: {}\n{}", 21 | format!("{resp:?}"), 22 | match resp 23 | .text() 24 | .await 25 | .context("reading body while processing error") 26 | .map_err(|e| e.to_string()) 27 | { 28 | Ok(body) => format!("body: {body}"), 29 | Err(error) => format!("{error:?}"), 30 | } 31 | ) 32 | } 33 | 34 | async fn retry(mut request: F, handle_status: H) -> anyhow::Result 35 | where 36 | F: FnMut() -> Fut, 37 | Fut: Future>, 38 | H: FnOnce(reqwest::Response) -> FutH, 39 | FutH: Future>, 40 | { 41 | const RETRY_COUNT: usize = 5; 42 | for i in 1..=RETRY_COUNT { 43 | let resp = request().await?; 44 | 45 | return match resp.status() { 46 | StatusCode::BAD_REQUEST => { 47 | let text = resp.text().await?; 48 | let problem_details: ProblemDetails = 49 | serde_json::from_str(&text).with_context(|| { 50 | format!( 51 | "400 Bad Request: failed to parse problem details document: {text:?}" 52 | ) 53 | })?; 54 | Err(anyhow!("400 Bad Request: {problem_details:?}")) 55 | } 56 | StatusCode::INTERNAL_SERVER_ERROR => Err(anyhow::anyhow!( 57 | "500 Internal Server Error: {}", 58 | resp.text().await? 59 | )), 60 | StatusCode::SERVICE_UNAVAILABLE if i == RETRY_COUNT => bail!("service unavailable"), 61 | StatusCode::SERVICE_UNAVAILABLE => { 62 | tracing::info!("retrying...."); 63 | continue; 64 | } 65 | s if !s.is_success() => Err(response_to_anyhow(resp).await), 66 | _ => handle_status(resp).await, 67 | }; 68 | } 69 | unreachable!() 70 | } 71 | 72 | async fn retry_and_decode(params: &P, request: F) -> anyhow::Result 73 | where 74 | F: FnMut() -> Fut, 75 | Fut: Future>, 76 | R: ParameterizedDecode

, 77 | { 78 | retry(request, |resp| async { 79 | let output_type = std::any::type_name::(); 80 | let bytes = resp 81 | .bytes() 82 | .await 83 | .with_context(|| format!("transfering bytes from the {output_type}"))?; 84 | 85 | R::get_decoded_with_param(params, &bytes) 86 | .with_context(|| format!("failed to parse response to {output_type} from Helper")) 87 | .with_context(|| format!("faulty bytes: {bytes:?}")) 88 | }) 89 | .await 90 | } 91 | -------------------------------------------------------------------------------- /crates/dapf/src/functions/test_routes.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2024 Cloudflare, Inc. All rights reserved. 2 | // SPDX-License-Identifier: BSD-3-Clause 3 | 4 | //! Functions used to interact with `/internal/*` routes that implement the [interop][interop] draft. 5 | //! 6 | //! [interop]: https://divergentdave.github.io/draft-dcook-ppm-dap-interop-test-design/draft-dcook-ppm-dap-interop-test-design.html 7 | 8 | use anyhow::{bail, Context}; 9 | use daphne::{ 10 | hpke::{HpkeKemId, HpkeReceiverConfig}, 11 | messages::HpkeConfigList, 12 | }; 13 | use rand::{thread_rng, Rng}; 14 | use url::Url; 15 | 16 | use crate::HttpClient; 17 | 18 | impl HttpClient { 19 | pub async fn add_hpke_config( 20 | &self, 21 | aggregator_url: &Url, 22 | kem_alg: HpkeKemId, 23 | ) -> anyhow::Result<()> { 24 | let mut rng = thread_rng(); 25 | let HpkeConfigList { hpke_configs } = match self.get_hpke_config(aggregator_url, None).await 26 | { 27 | Ok(configs) => configs, 28 | Err(_) => HpkeConfigList { 29 | hpke_configs: vec![], 30 | }, 31 | }; 32 | 33 | let receiver_config = loop { 34 | let config = HpkeReceiverConfig::gen(rng.gen(), kem_alg) 35 | .context("failed to generate HPKE receiver config")?; 36 | if hpke_configs.iter().all(|c| c.id != config.config.id) { 37 | break config; 38 | } 39 | }; 40 | 41 | self.post( 42 | aggregator_url 43 | .join("internal/test/add_hpke_config") 44 | .unwrap(), 45 | ) 46 | .json(&receiver_config) 47 | .send() 48 | .await 49 | .context("adding the hpke config")? 50 | .error_for_status() 51 | .context("adding the hpke config")?; 52 | Ok(()) 53 | } 54 | 55 | pub async fn delete_all_storage(&self, aggregator_url: &Url) -> anyhow::Result<()> { 56 | let resp = self 57 | .post(aggregator_url.join("/internal/delete_all").unwrap()) 58 | .send() 59 | .await 60 | .context("deleting storage")?; 61 | if resp.status().is_success() { 62 | return Ok(()); 63 | } 64 | bail!( 65 | "delete storage request failed. {} {}", 66 | resp.status(), 67 | resp.text().await? 68 | ); 69 | } 70 | } 71 | -------------------------------------------------------------------------------- /crates/dapf/src/lib.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2024 Cloudflare, Inc. All rights reserved. 2 | // SPDX-License-Identifier: BSD-3-Clause 3 | 4 | pub mod acceptance; 5 | pub mod cli_parsers; 6 | pub mod functions; 7 | pub mod http_client; 8 | mod test_durations; 9 | 10 | use anyhow::Context; 11 | use daphne::DapVersion; 12 | use url::Url; 13 | 14 | pub use http_client::HttpClient; 15 | 16 | pub fn deduce_dap_version_from_url(url: &Url) -> anyhow::Result { 17 | url.path_segments() 18 | .context("no version specified in leader url")? 19 | .next() 20 | .unwrap() // when path_segments returns Some it's guaranteed to contain at least one segment 21 | .parse() 22 | .context("failed to parse version parameter from url") 23 | } 24 | -------------------------------------------------------------------------------- /crates/dapf/src/test_durations.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2024 Cloudflare, Inc. All rights reserved. 2 | // SPDX-License-Identifier: BSD-3-Clause 3 | 4 | use std::{ 5 | ops::{Add, Div}, 6 | time::Duration, 7 | }; 8 | 9 | #[derive(Debug, Default, Clone, Copy)] 10 | pub struct TestDurations { 11 | pub hpke_config_fetch: Duration, 12 | pub aggregate_init_req: Duration, 13 | pub aggregate_share_req: Duration, 14 | } 15 | 16 | impl Add<&Self> for TestDurations { 17 | type Output = Self; 18 | fn add(self, rhs: &Self) -> Self::Output { 19 | Self { 20 | hpke_config_fetch: self.hpke_config_fetch + rhs.hpke_config_fetch, 21 | aggregate_init_req: self.aggregate_init_req + rhs.aggregate_init_req, 22 | aggregate_share_req: self.aggregate_share_req + rhs.aggregate_share_req, 23 | } 24 | } 25 | } 26 | 27 | impl Add for TestDurations { 28 | type Output = Self; 29 | fn add(self, rhs: Self) -> Self::Output { 30 | Self { 31 | hpke_config_fetch: self.hpke_config_fetch + rhs.hpke_config_fetch, 32 | aggregate_init_req: self.aggregate_init_req + rhs.aggregate_init_req, 33 | aggregate_share_req: self.aggregate_share_req + rhs.aggregate_share_req, 34 | } 35 | } 36 | } 37 | 38 | impl Div for TestDurations { 39 | type Output = Self; 40 | 41 | fn div(self, rhs: u32) -> Self::Output { 42 | Self { 43 | hpke_config_fetch: self.hpke_config_fetch / rhs, 44 | aggregate_init_req: self.aggregate_init_req / rhs, 45 | aggregate_share_req: self.aggregate_share_req / rhs, 46 | } 47 | } 48 | } 49 | 50 | impl AsRef for TestDurations { 51 | fn as_ref(&self) -> &Self { 52 | self 53 | } 54 | } 55 | 56 | impl TestDurations { 57 | pub fn total_service_time(&self) -> Duration { 58 | self.hpke_config_fetch + self.aggregate_init_req + self.aggregate_share_req 59 | } 60 | } 61 | -------------------------------------------------------------------------------- /crates/daphne-server/Cargo.toml: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2025 Cloudflare, Inc. All rights reserved. 2 | # SPDX-License-Identifier: BSD-3-Clause 3 | 4 | [package] 5 | name = "daphne-server" 6 | version.workspace = true 7 | authors.workspace = true 8 | edition.workspace = true 9 | license.workspace = true 10 | readme.workspace = true 11 | homepage.workspace = true 12 | repository.workspace = true 13 | description = "Workers backend for Daphne" 14 | 15 | [package.metadata."docs.rs"] 16 | all-features = true 17 | 18 | [dependencies] 19 | daphne = { path = "../daphne" } 20 | daphne-service-utils = { path = "../daphne-service-utils", features = ["durable_requests", "compute-offload"] } 21 | either.workspace = true 22 | futures.workspace = true 23 | hex.workspace = true 24 | http.workspace = true 25 | mappable-rc.workspace = true 26 | p256.workspace = true 27 | prio.workspace = true 28 | prometheus = { workspace = true, optional = true } 29 | serde.workspace = true 30 | serde_json.workspace = true 31 | thiserror.workspace = true 32 | tokio.workspace = true 33 | tower = { workspace = true, features = ["util"] } 34 | tracing.workspace = true 35 | url.workspace = true 36 | rayon.workspace = true 37 | 38 | [dependencies.axum] 39 | workspace = true 40 | features = ["query", "json", "tokio", "http1", "http2"] 41 | 42 | [dependencies.reqwest] 43 | workspace = true 44 | default-features = false 45 | features = ["rustls-tls-native-roots", "json"] 46 | 47 | [dev-dependencies] 48 | anyhow.workspace = true 49 | assert_matches.workspace = true 50 | clap.workspace = true 51 | config.workspace = true 52 | daphne = { path = "../daphne", features = ["test-utils"] } 53 | daphne-service-utils = { path = "../daphne-service-utils", features = ["test-utils"] } 54 | dhat.workspace = true 55 | hpke-rs.workspace = true 56 | paste.workspace = true 57 | prometheus.workspace = true 58 | rand.workspace = true 59 | rcgen.workspace = true 60 | serde_yaml.workspace = true 61 | tokio = { workspace = true, features = ["signal"] } 62 | tracing-subscriber = { workspace = true, features = ["env-filter"] } 63 | webpki.workspace = true 64 | x509-parser.workspace = true 65 | 66 | [features] 67 | default = ["prometheus"] 68 | test-utils = ["daphne/test-utils", "daphne-service-utils/test-utils"] 69 | test_e2e = [] 70 | prometheus = ["dep:prometheus", "daphne/prometheus"] 71 | 72 | [lints] 73 | workspace = true 74 | -------------------------------------------------------------------------------- /crates/daphne-server/README.md: -------------------------------------------------------------------------------- 1 | # The implementation of the DAP server 2 | 3 | ## Running the server locally 4 | 5 | All of these commands are run from the root of the repo. 6 | 7 | ## Requirements 8 | 9 | * [Rust 1.80+](https://www.rust-lang.org/tools/install) 10 | * [Cap'n Proto](https://capnproto.org/install.html) 11 | 12 | ### Starting the storage layer 13 | 14 | ```sh 15 | make storage-proxy 16 | ``` 17 | 18 | This should start the storage for both the leader and the helper, exposed at 19 | ports 4000 and 4001 respectively. 20 | 21 | 22 | ### Running the leader/helper 23 | 24 | **Leader**: 25 | ```sh 26 | make leader 27 | ``` 28 | The leader listens on port `8787` 29 | 30 | **Helper**: 31 | ```sh 32 | make helper 33 | ``` 34 | The leader listens on port `8788` 35 | 36 | ### Adding an hpke config 37 | 38 | The hpke config must be added everytime the storage layer is started as no state 39 | is persisted across runs. 40 | 41 | **Leader:** 42 | ``` 43 | cargo run --bin dapf -- test-routes add-hpke-config http://localhost:8787/v09/ --kem-alg x25519_hkdf_sha256 44 | ``` 45 | 46 | **Helper:** 47 | ``` 48 | cargo run --bin dapf -- test-routes add-hpke-config http://localhost:8788/v09/ --kem-alg x25519_hkdf_sha256 49 | ``` 50 | 51 | ### Clearing all of storage without restaring docker 52 | 53 | ``` 54 | cargo run --bin dapf -- test-routes clear-storage --help 55 | ``` 56 | -------------------------------------------------------------------------------- /crates/daphne-server/docker-compose-e2e.yaml: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2024 Cloudflare, Inc. All rights reserved. 2 | # SPDX-License-Identifier: BSD-3-Clause 3 | --- 4 | version: "3.9" 5 | 6 | networks: 7 | dap_network: 8 | driver: bridge 9 | 10 | services: 11 | leader_storage: 12 | networks: 13 | - dap_network 14 | ports: 15 | - "4000" 16 | extends: 17 | file: ../daphne-worker-test/docker-compose-storage-proxy.yaml 18 | service: leader_storage 19 | helper_storage: 20 | networks: 21 | - dap_network 22 | ports: 23 | - "4001" 24 | extends: 25 | file: ../daphne-worker-test/docker-compose-storage-proxy.yaml 26 | service: helper_storage 27 | leader: 28 | networks: 29 | - dap_network 30 | ports: 31 | - "8787" 32 | build: 33 | context: ../.. 34 | dockerfile: crates/daphne-server/docker/example-service.Dockerfile 35 | target: leader 36 | depends_on: 37 | - leader_storage 38 | environment: 39 | - RUST_LOG=info 40 | helper: 41 | networks: 42 | - dap_network 43 | ports: 44 | - "8788" 45 | build: 46 | context: ../.. 47 | dockerfile: crates/daphne-server/docker/example-service.Dockerfile 48 | target: helper 49 | depends_on: 50 | - helper_storage 51 | environment: 52 | - DAP_SERVICE__SIGNING_KEY=${HPKE_SIGNING_KEY} 53 | - RUST_LOG=info 54 | test: 55 | networks: 56 | - dap_network 57 | build: 58 | context: ../.. 59 | dockerfile: crates/daphne-worker-test/docker/runtests.Dockerfile 60 | depends_on: 61 | - leader 62 | - helper 63 | environment: 64 | - E2E_TEST_HPKE_SIGNING_CERTIFICATE=${E2E_TEST_HPKE_SIGNING_CERTIFICATE} 65 | -------------------------------------------------------------------------------- /crates/daphne-server/docker/example-service.Dockerfile: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2024 Cloudflare, Inc. All rights reserved. 2 | # SPDX-License-Identifier: BSD-3-Clause 3 | 4 | FROM rust:1.84.1-bookworm AS builder 5 | 6 | RUN apt update && \ 7 | apt install -y \ 8 | capnproto 9 | 10 | WORKDIR /dap 11 | 12 | COPY Cargo.toml Cargo.lock . 13 | COPY crates/daphne crates/daphne 14 | COPY crates/daphne-service-utils crates/daphne-service-utils 15 | COPY crates/daphne-server crates/daphne-server 16 | 17 | RUN cargo build -p daphne-server --example service --features test-utils 18 | 19 | FROM debian:bookworm AS helper 20 | 21 | COPY ./crates/daphne-server/examples/configuration-helper.toml configuration.toml 22 | RUN sed -i 's/localhost/helper_storage/g' configuration.toml 23 | COPY --from=builder /dap/target/debug/examples/service . 24 | 25 | ENTRYPOINT ["./service"] 26 | 27 | FROM debian:bookworm AS leader 28 | 29 | COPY ./crates/daphne-server/examples/configuration-leader.toml configuration.toml 30 | RUN sed -i 's/localhost/leader_storage/g' configuration.toml 31 | COPY --from=builder /dap/target/debug/examples/service . 32 | 33 | ENTRYPOINT ["./service"] 34 | 35 | FROM debian:bookworm AS compute-offload 36 | 37 | COPY ./crates/daphne-server/examples/configuration-cpu-offload.toml configuration.toml 38 | COPY --from=builder /dap/target/debug/examples/service . 39 | 40 | ENTRYPOINT ["./service"] 41 | -------------------------------------------------------------------------------- /crates/daphne-server/examples/configuration-cpu-offload.toml: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2024 Cloudflare, Inc. All rights reserved. 2 | # SPDX-License-Identifier: BSD-3-Clause 3 | 4 | port = 5000 5 | 6 | # None of these settings are relevant and can be deleted later when 7 | # daphne-server stops being an aggregator 8 | [storage_proxy] 9 | url = "http://localhost:4001" 10 | # SECRET: This is a test secret. In production, we'll generate and securely provision the token. 11 | auth_token = 'this-is-the-storage-proxy-auth-token' 12 | 13 | [service] 14 | env = "oxy" 15 | role = "helper" 16 | max_batch_duration = 360000 17 | min_batch_interval_start = 259200 18 | max_batch_interval_end = 259200 19 | supported_hpke_kems = ["x25519_hkdf_sha256"] 20 | default_version = "v09" 21 | report_storage_epoch_duration = 300000 22 | base_url = "http://127.0.0.1:8788" 23 | default_num_agg_span_shards = 4 24 | 25 | [service.taskprov] 26 | peer_auth.leader.expected_token = "I-am-the-leader" # SECRET 27 | vdaf_verify_key_init = "b029a72fa327931a5cb643dcadcaafa098fcbfac07d990cb9e7c9a8675fafb18" # SECRET 28 | hpke_collector_config = """{ 29 | "id": 23, 30 | "kem_id": "p256_hkdf_sha256", 31 | "kdf_id": "hkdf_sha256", 32 | "aead_id": "aes128_gcm", 33 | "public_key": "047dab625e0d269abcc28c611bebf5a60987ddf7e23df0e0aa343e5774ad81a1d0160d9252b82b4b5c52354205f5ec945645cb79facff8d85c9c31b490cdf35466" 34 | }""" 35 | -------------------------------------------------------------------------------- /crates/daphne-server/examples/configuration-helper.toml: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2024 Cloudflare, Inc. All rights reserved. 2 | # SPDX-License-Identifier: BSD-3-Clause 3 | 4 | port = 8788 5 | 6 | [storage_proxy] 7 | url = "http://localhost:4001" 8 | # SECRET: This is a test secret. In production, we'll generate and securely provision the token. 9 | auth_token = 'this-is-the-storage-proxy-auth-token' 10 | 11 | [service] 12 | env = "oxy" 13 | role = "helper" 14 | max_batch_duration = 360000 15 | min_batch_interval_start = 259200 16 | max_batch_interval_end = 259200 17 | supported_hpke_kems = ["x25519_hkdf_sha256"] 18 | default_version = "v09" 19 | report_storage_epoch_duration = 300000 20 | base_url = "http://127.0.0.1:8788" 21 | default_num_agg_span_shards = 4 22 | 23 | [service.taskprov] 24 | peer_auth.leader.expected_token = "I-am-the-leader" # SECRET 25 | vdaf_verify_key_init = "b029a72fa327931a5cb643dcadcaafa098fcbfac07d990cb9e7c9a8675fafb18" # SECRET 26 | hpke_collector_config = """{ 27 | "id": 23, 28 | "kem_id": "p256_hkdf_sha256", 29 | "kdf_id": "hkdf_sha256", 30 | "aead_id": "aes128_gcm", 31 | "public_key": "047dab625e0d269abcc28c611bebf5a60987ddf7e23df0e0aa343e5774ad81a1d0160d9252b82b4b5c52354205f5ec945645cb79facff8d85c9c31b490cdf35466" 32 | }""" 33 | -------------------------------------------------------------------------------- /crates/daphne-server/examples/configuration-leader.toml: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2024 Cloudflare, Inc. All rights reserved. 2 | # SPDX-License-Identifier: BSD-3-Clause 3 | 4 | port = 8787 5 | 6 | [storage_proxy] 7 | url = "http://localhost:4000" 8 | # SECRET: This is a test secret. In production, we'll generate and securely provision the token. 9 | auth_token = 'this-is-the-storage-proxy-auth-token' 10 | 11 | [service] 12 | env = "oxy" 13 | role = "leader" 14 | max_batch_duration = 360000 15 | min_batch_interval_start = 259200 16 | max_batch_interval_end = 259200 17 | supported_hpke_kems = ["x25519_hkdf_sha256"] 18 | default_version = "v09" 19 | report_storage_epoch_duration = 300000 20 | base_url = "http://127.0.0.1:8787" 21 | default_num_agg_span_shards = 4 22 | 23 | [service.taskprov] 24 | peer_auth.collector.expected_token = "I-am-the-collector" # SECRET 25 | self_bearer_token = "I-am-the-leader" # SECRET 26 | vdaf_verify_key_init = "b029a72fa327931a5cb643dcadcaafa098fcbfac07d990cb9e7c9a8675fafb18" # SECRET 27 | hpke_collector_config = """{ 28 | "id": 23, 29 | "kem_id": "p256_hkdf_sha256", 30 | "kdf_id": "hkdf_sha256", 31 | "aead_id": "aes128_gcm", 32 | "public_key": "047dab625e0d269abcc28c611bebf5a60987ddf7e23df0e0aa343e5774ad81a1d0160d9252b82b4b5c52354205f5ec945645cb79facff8d85c9c31b490cdf35466" 33 | }""" 34 | -------------------------------------------------------------------------------- /crates/daphne-server/examples/service.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2024 Cloudflare, Inc. All rights reserved. 2 | // SPDX-License-Identifier: BSD-3-Clause 3 | 4 | use std::path::PathBuf; 5 | 6 | use clap::Parser; 7 | use daphne::constants::DapAggregatorRole; 8 | use daphne_server::{ 9 | config::DaphneServiceConfig, metrics::DaphnePromServiceMetrics, router, App, StorageProxyConfig, 10 | }; 11 | use serde::{Deserialize, Serialize}; 12 | use tokio::net::TcpListener; 13 | use tracing_subscriber::EnvFilter; 14 | use url::Url; 15 | 16 | #[derive(Debug, Serialize, Deserialize)] 17 | struct Config { 18 | service: DaphneServiceConfig, 19 | port: u16, 20 | storage_proxy: StorageProxyConfig, 21 | } 22 | 23 | impl TryFrom for Config { 24 | type Error = config::ConfigError; 25 | fn try_from( 26 | Args { 27 | configuration, 28 | role, 29 | port, 30 | storage_proxy, 31 | }: Args, 32 | ) -> Result { 33 | config::Config::builder() 34 | .set_default("port", 3000)? 35 | .add_source(match configuration { 36 | Some(path) => config::File::from(path.as_ref()), 37 | None => config::File::with_name("configuration"), 38 | }) 39 | .add_source( 40 | config::Environment::with_prefix("DAP") 41 | .prefix_separator("_") 42 | .separator("__"), 43 | ) 44 | .set_override_option( 45 | "service.role", 46 | role.map(|role| { 47 | config::Value::new( 48 | Some(&String::from("args.role")), 49 | match role { 50 | DapAggregatorRole::Leader => "leader", 51 | DapAggregatorRole::Helper => "helper", 52 | }, 53 | ) 54 | }), 55 | )? 56 | .set_override_option( 57 | "port", 58 | port.map(|port| config::Value::new(Some(&String::from("args.port")), port)), 59 | )? 60 | .set_override_option( 61 | "storage_proxy", 62 | storage_proxy.map(|storage_proxy| { 63 | config::Value::new( 64 | Some(&String::from("args.storage_proxy")), 65 | storage_proxy.to_string(), 66 | ) 67 | }), 68 | )? 69 | .build()? 70 | .try_deserialize() 71 | } 72 | } 73 | 74 | /// Daphne test service used in e2e tests and general manual testing 75 | #[derive(clap::Parser)] 76 | struct Args { 77 | /// A configuration file, can be in json, yaml or toml. 78 | #[arg(short, long)] 79 | configuration: Option, 80 | 81 | // --- command line overridable parameters --- 82 | /// One of `leader` or `helper`. 83 | #[arg(short, long)] 84 | role: Option, 85 | /// The port to listen on. 86 | #[arg(short, long)] 87 | port: Option, 88 | /// The storage url. 89 | #[arg(short, long)] 90 | storage_proxy: Option, 91 | } 92 | 93 | #[global_allocator] 94 | static ALLOC: dhat::Alloc = dhat::Alloc; 95 | 96 | #[tokio::main] 97 | async fn main() -> Result<(), Box> { 98 | let _profiler = dhat::Profiler::new_heap(); 99 | 100 | // Parse the configuration from the command line arguments. 101 | let config = Config::try_from(Args::parse())?; 102 | println!( 103 | "starting service with config:\n{}", 104 | serde_yaml::to_string(&config).unwrap() 105 | ); 106 | 107 | // Create a new prometheus registry where metrics will be registered and measured 108 | let registry = prometheus::Registry::new(); 109 | let daphne_service_metrics = DaphnePromServiceMetrics::register(®istry)?; 110 | 111 | let role = config.service.role; 112 | // Configure the application 113 | let app = App::new(config.storage_proxy, daphne_service_metrics, config.service)?; 114 | 115 | // create the router that will handle the protocol's http requests 116 | let router = router::new(role, app); 117 | 118 | // initialize tracing in a very default way. 119 | tracing_subscriber::fmt() 120 | .with_env_filter(EnvFilter::from_default_env()) 121 | .init(); 122 | 123 | // hand the router to axum for it to run 124 | let serve = axum::serve( 125 | TcpListener::bind(std::net::SocketAddr::new( 126 | "0.0.0.0".parse().unwrap(), 127 | config.port, 128 | )) 129 | .await 130 | .unwrap(), 131 | router, 132 | ); 133 | 134 | let ctrl_c = tokio::signal::ctrl_c(); 135 | 136 | tokio::select! { 137 | _ = serve => {} 138 | _ = ctrl_c => {} 139 | } 140 | 141 | Ok(()) 142 | } 143 | -------------------------------------------------------------------------------- /crates/daphne-server/src/metrics.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2022 Cloudflare, Inc. All rights reserved. 2 | // SPDX-License-Identifier: BSD-3-Clause 3 | 4 | //! Daphne-Worker metrics. 5 | 6 | use daphne::metrics::DaphneMetrics; 7 | 8 | pub trait DaphneServiceMetrics: DaphneMetrics { 9 | fn abort_count_inc(&self, label: &str); 10 | fn count_http_status_code(&self, status_code: u16); 11 | fn daphne(&self) -> &dyn DaphneMetrics; 12 | fn auth_method_inc(&self, method: AuthMethod); 13 | fn aggregate_job_latency(&self, time: std::time::Duration); 14 | } 15 | 16 | #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] 17 | pub enum AuthMethod { 18 | BearerToken, 19 | TlsClientAuth, 20 | } 21 | 22 | #[cfg(any(feature = "prometheus", test))] 23 | mod prometheus { 24 | use super::DaphneServiceMetrics; 25 | use daphne::{ 26 | fatal_error, 27 | metrics::{prometheus::DaphnePromMetrics, DaphneMetrics, ReportStatus}, 28 | DapError, 29 | }; 30 | use prometheus::{register_int_counter_vec_with_registry, IntCounterVec, Registry}; 31 | use std::time::Duration; 32 | 33 | impl DaphneMetrics for DaphnePromServiceMetrics { 34 | fn report_inc_by(&self, status: ReportStatus, val: u64) { 35 | self.daphne.report_inc_by(status, val); 36 | } 37 | 38 | fn inbound_req_inc(&self, request_type: daphne::metrics::DaphneRequestType) { 39 | self.daphne.inbound_req_inc(request_type); 40 | } 41 | 42 | fn agg_job_started_inc(&self) { 43 | self.daphne.agg_job_started_inc(); 44 | } 45 | 46 | fn agg_job_completed_inc(&self) { 47 | self.daphne.agg_job_completed_inc(); 48 | } 49 | 50 | fn agg_job_observe_batch_size(&self, val: usize) { 51 | self.daphne.agg_job_observe_batch_size(val); 52 | } 53 | 54 | fn agg_job_put_span_retry_inc(&self) { 55 | self.daphne.agg_job_put_span_retry_inc(); 56 | } 57 | } 58 | 59 | impl DaphneServiceMetrics for DaphnePromServiceMetrics { 60 | fn abort_count_inc(&self, label: &str) { 61 | self.dap_abort_counter.with_label_values(&[label]).inc(); 62 | } 63 | 64 | fn count_http_status_code(&self, status_code: u16) { 65 | self.http_status_code_counter 66 | .with_label_values(&[&status_code.to_string()]) 67 | .inc(); 68 | } 69 | 70 | fn auth_method_inc(&self, method: super::AuthMethod) { 71 | let method = match method { 72 | super::AuthMethod::TlsClientAuth => "mutual_tls", 73 | super::AuthMethod::BearerToken => "tls_client_auth", 74 | }; 75 | self.auth_method.with_label_values(&[method]).inc(); 76 | } 77 | 78 | fn daphne(&self) -> &dyn DaphneMetrics { 79 | self 80 | } 81 | 82 | fn aggregate_job_latency(&self, _time: Duration) { 83 | // unimplemented by default due to elevated cardinality 84 | } 85 | } 86 | 87 | #[derive(Clone)] 88 | pub struct DaphnePromServiceMetrics { 89 | /// Daphne metrics. 90 | daphne: DaphnePromMetrics, 91 | 92 | /// HTTP response status. 93 | http_status_code_counter: IntCounterVec, 94 | 95 | /// DAP aborts. 96 | dap_abort_counter: IntCounterVec, 97 | 98 | /// Counts the used authentication methods 99 | auth_method: IntCounterVec, 100 | } 101 | 102 | impl DaphnePromServiceMetrics { 103 | pub fn register(registry: &Registry) -> Result { 104 | let http_status_code_counter = register_int_counter_vec_with_registry!( 105 | "http_status_code", 106 | "HTTP response status code.", 107 | &["code"], 108 | registry 109 | ) 110 | .map_err(|e| fatal_error!(err = ?e, "failed to register http_status_code"))?; 111 | 112 | let dap_abort_counter = register_int_counter_vec_with_registry!( 113 | "dap_abort", 114 | "DAP aborts.", 115 | &["reason"], 116 | registry 117 | ) 118 | .map_err(|e| fatal_error!(err = ?e, "failed to register dap_abort"))?; 119 | 120 | let auth_method = register_int_counter_vec_with_registry!( 121 | "auth_method", 122 | "The authentication method used", 123 | &["method"], 124 | registry 125 | ) 126 | .map_err(|e| fatal_error!(err = ?e, "failed to register dap_abort"))?; 127 | 128 | let daphne = DaphnePromMetrics::register(registry)?; 129 | 130 | Ok(Self { 131 | daphne, 132 | http_status_code_counter, 133 | dap_abort_counter, 134 | auth_method, 135 | }) 136 | } 137 | } 138 | } 139 | 140 | #[cfg(any(feature = "prometheus", test))] 141 | pub use prometheus::DaphnePromServiceMetrics; 142 | -------------------------------------------------------------------------------- /crates/daphne-server/src/roles/helper.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2025 Cloudflare, Inc. All rights reserved. 2 | // SPDX-License-Identifier: BSD-3-Clause 3 | 4 | use axum::async_trait; 5 | use daphne::{ 6 | fatal_error, 7 | messages::{AggregationJobId, AggregationJobResp, TaskId}, 8 | roles::{helper::AggregationJobRequestHash, DapHelper}, 9 | DapError, DapVersion, 10 | }; 11 | 12 | #[async_trait] 13 | impl DapHelper for crate::App { 14 | async fn assert_agg_job_is_legal( 15 | &self, 16 | _id: AggregationJobId, 17 | _version: DapVersion, 18 | _task_id: &TaskId, 19 | _req_hash: &AggregationJobRequestHash, 20 | ) -> Result<(), DapError> { 21 | // the server implementation can't check for this 22 | Ok(()) 23 | } 24 | 25 | async fn poll_aggregated( 26 | &self, 27 | _version: DapVersion, 28 | _task_id: &TaskId, 29 | _agg_job_id: &AggregationJobId, 30 | ) -> Result { 31 | Err(fatal_error!(err = "polling not implemented")) 32 | } 33 | } 34 | -------------------------------------------------------------------------------- /crates/daphne-server/src/router/compute_offload.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2025 Cloudflare, Inc. All rights reserved. 2 | // SPDX-License-Identifier: BSD-3-Clause 3 | 4 | use crate::App; 5 | use axum::{async_trait, extract::FromRequest, response::IntoResponse, routing::post}; 6 | use daphne::{error::DapAbort, InitializedReport}; 7 | use daphne_service_utils::{ 8 | capnproto::{CapnprotoPayloadDecode, CapnprotoPayloadDecodeExt, CapnprotoPayloadEncodeExt}, 9 | compute_offload::{InitializeReports, InitializedReports}, 10 | }; 11 | use http::StatusCode; 12 | use prio::codec::ParameterizedDecode; 13 | use rayon::iter::{IntoParallelIterator as _, ParallelIterator}; 14 | 15 | pub(super) fn add_routes(router: super::Router) -> super::Router { 16 | router.route( 17 | "/compute_offload/initialize_reports", 18 | post(initialize_reports), 19 | ) 20 | } 21 | 22 | struct CapnprotoExtractor(T); 23 | 24 | #[async_trait] 25 | impl FromRequest for CapnprotoExtractor 26 | where 27 | T: CapnprotoPayloadDecode, 28 | { 29 | type Rejection = StatusCode; 30 | 31 | async fn from_request( 32 | req: http::Request, 33 | _state: &S, 34 | ) -> Result { 35 | let bytes = axum::body::to_bytes(req.into_body(), usize::MAX) 36 | .await 37 | .map_err(|_| StatusCode::BAD_REQUEST)?; 38 | let t = T::decode_from_bytes(&bytes).map_err(|_| StatusCode::BAD_REQUEST)?; 39 | 40 | Ok(CapnprotoExtractor(t)) 41 | } 42 | } 43 | 44 | #[tracing::instrument(skip_all, fields(%task_id, report_count = prep_inits.len()))] 45 | async fn initialize_reports( 46 | CapnprotoExtractor(InitializeReports { 47 | hpke_keys, 48 | valid_report_range, 49 | task_id, 50 | task_config, 51 | agg_param, 52 | prep_inits, 53 | }): CapnprotoExtractor>, 54 | ) -> impl IntoResponse { 55 | tracing::info!("initializing reports"); 56 | let initialized_reports = prep_inits 57 | .into_par_iter() 58 | .map(|prep_init| { 59 | InitializedReport::from_leader( 60 | &hpke_keys.as_ref(), 61 | valid_report_range.clone(), 62 | &task_id, 63 | &task_config, 64 | prep_init.report_share, 65 | prep_init.payload, 66 | &daphne::DapAggregationParam::get_decoded_with_param(&task_config.vdaf, &agg_param) 67 | .map_err(|e| DapAbort::from_codec_error(e, task_id))?, 68 | ) 69 | }) 70 | .collect::, _>>(); 71 | 72 | match initialized_reports { 73 | Ok(reports) => { 74 | let body = InitializedReports { 75 | vdaf: task_config.vdaf.into_owned(), 76 | reports, 77 | } 78 | .encode_to_bytes(); 79 | 80 | (StatusCode::OK, body).into_response() 81 | } 82 | Err(error) => (StatusCode::BAD_REQUEST, axum::Json(error)).into_response(), 83 | } 84 | } 85 | -------------------------------------------------------------------------------- /crates/daphne-server/src/router/helper.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2024 Cloudflare, Inc. All rights reserved. 2 | // SPDX-License-Identifier: BSD-3-Clause 3 | 4 | use std::sync::Arc; 5 | 6 | use axum::{ 7 | extract::State, 8 | routing::{post, put}, 9 | }; 10 | use daphne::{ 11 | messages::AggregateShareReq, 12 | roles::{ 13 | helper::{self, HashedAggregationJobReq}, 14 | DapHelper, 15 | }, 16 | }; 17 | use http::StatusCode; 18 | 19 | use crate::{roles::fetch_replay_protection_override, App}; 20 | 21 | use super::{ 22 | extractor::dap_sender::FROM_LEADER, AxumDapResponse, DapRequestExtractor, DaphneService, 23 | }; 24 | 25 | pub(super) fn add_helper_routes(router: super::Router) -> super::Router { 26 | router 27 | .route( 28 | "/:version/tasks/:task_id/aggregation_jobs/:agg_job_id", 29 | put(agg_job), 30 | ) 31 | .route("/:version/tasks/:task_id/aggregate_shares", post(agg_share)) 32 | } 33 | 34 | #[tracing::instrument( 35 | skip_all, 36 | fields( 37 | media_type = ?req.media_type, 38 | task_id = ?req.task_id, 39 | version = ?req.version, 40 | ) 41 | )] 42 | async fn agg_job( 43 | State(app): State>, 44 | DapRequestExtractor(req): DapRequestExtractor, 45 | ) -> AxumDapResponse { 46 | let timer = std::time::Instant::now(); 47 | 48 | let resp = helper::handle_agg_job_init_req( 49 | &*app, 50 | req, 51 | fetch_replay_protection_override(app.kv()).await, 52 | ) 53 | .await; 54 | 55 | let elapsed = timer.elapsed(); 56 | 57 | app.server_metrics().aggregate_job_latency(elapsed); 58 | 59 | AxumDapResponse::from_result_with_success_code(resp, app.server_metrics(), StatusCode::CREATED) 60 | } 61 | 62 | #[tracing::instrument( 63 | skip_all, 64 | fields( 65 | media_type = ?req.media_type, 66 | task_id = ?req.task_id, 67 | version = ?req.version, 68 | ) 69 | )] 70 | async fn agg_share( 71 | State(app): State>, 72 | DapRequestExtractor(req): DapRequestExtractor, 73 | ) -> AxumDapResponse 74 | where 75 | A: DapHelper + DaphneService + Send + Sync, 76 | { 77 | AxumDapResponse::from_result( 78 | helper::handle_agg_share_req(&*app, req).await, 79 | app.server_metrics(), 80 | ) 81 | } 82 | -------------------------------------------------------------------------------- /crates/daphne-server/src/router/leader.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2024 Cloudflare, Inc. All rights reserved. 2 | // SPDX-License-Identifier: BSD-3-Clause 3 | 4 | use std::sync::Arc; 5 | 6 | use axum::{ 7 | extract::{Path, Request, State}, 8 | http::StatusCode, 9 | middleware::{from_fn, Next}, 10 | response::{IntoResponse, Response}, 11 | routing::{get, post, put}, 12 | }; 13 | use daphne::{ 14 | constants::DapMediaType, 15 | error::DapAbort, 16 | messages::{self, request::CollectionPollReq}, 17 | roles::leader::{self, DapLeader}, 18 | DapError, DapVersion, 19 | }; 20 | use prio::codec::ParameterizedEncode; 21 | 22 | use super::{ 23 | extractor::dap_sender::FROM_COLLECTOR, AxumDapResponse, DapRequestExtractor, DaphneService, 24 | UnauthenticatedDapRequestExtractor, 25 | }; 26 | use futures::{future::BoxFuture, FutureExt}; 27 | use serde::Deserialize; 28 | 29 | #[derive(Deserialize)] 30 | struct PathVersion { 31 | #[serde(rename = "version")] 32 | presented_version: DapVersion, 33 | } 34 | 35 | fn require_version( 36 | expected_version: DapVersion, 37 | ) -> impl Copy + Fn(Path, Request, Next) -> BoxFuture<'static, Response> { 38 | move |Path(PathVersion { presented_version }), req, next| { 39 | async move { 40 | if presented_version != expected_version { 41 | return StatusCode::METHOD_NOT_ALLOWED.into_response(); 42 | } 43 | next.run(req).await 44 | } 45 | .boxed() 46 | } 47 | } 48 | 49 | pub(super) fn add_leader_routes(router: super::Router) -> super::Router 50 | where 51 | A: DapLeader + DaphneService + Send + Sync + 'static, 52 | { 53 | router 54 | .route( 55 | "/:version/tasks/:task_id/reports", 56 | put(upload).layer(from_fn(require_version(DapVersion::Draft09))), 57 | ) 58 | .route( 59 | "/:version/tasks/:task_id/reports", 60 | post(upload).layer(from_fn(require_version(DapVersion::Latest))), 61 | ) 62 | .route( 63 | "/:version/tasks/:task_id/collection_jobs/:collect_job_id", 64 | put(start_collection_job), 65 | ) 66 | .route( 67 | "/:version/tasks/:task_id/collection_jobs/:collect_job_id", 68 | post(poll_collect).layer(from_fn(require_version(DapVersion::Draft09))), 69 | ) 70 | .route( 71 | "/:version/tasks/:task_id/collection_jobs/:collect_job_id", 72 | get(poll_collect).layer(from_fn(require_version(DapVersion::Latest))), 73 | ) 74 | } 75 | 76 | #[tracing::instrument( 77 | skip_all, 78 | fields( 79 | task_id = ?req.task_id, 80 | version = ?req.version, 81 | ) 82 | )] 83 | async fn upload( 84 | State(app): State>, 85 | UnauthenticatedDapRequestExtractor(req): UnauthenticatedDapRequestExtractor, 86 | ) -> Response 87 | where 88 | A: DapLeader + DaphneService + Send + Sync, 89 | { 90 | match leader::handle_upload_req(&*app, req).await { 91 | Ok(()) => StatusCode::OK.into_response(), 92 | Err(e) => AxumDapResponse::new_error(e, app.server_metrics()).into_response(), 93 | } 94 | } 95 | 96 | #[tracing::instrument( 97 | skip_all, 98 | fields( 99 | task_id = ?req.task_id, 100 | version = ?req.version, 101 | ) 102 | )] 103 | async fn start_collection_job( 104 | State(app): State>, 105 | DapRequestExtractor(req): DapRequestExtractor, 106 | ) -> Response 107 | where 108 | A: DapLeader + DaphneService + Send + Sync, 109 | { 110 | match leader::handle_coll_job_req(&*app, &req).await { 111 | Ok(()) => StatusCode::CREATED.into_response(), 112 | Err(e) => AxumDapResponse::new_error(e, app.server_metrics()).into_response(), 113 | } 114 | } 115 | 116 | #[tracing::instrument( 117 | skip_all, 118 | fields( 119 | task_id = ?req.task_id, 120 | version = ?req.version, 121 | ) 122 | )] 123 | async fn poll_collect( 124 | State(app): State>, 125 | DapRequestExtractor(req): DapRequestExtractor, 126 | ) -> Response 127 | where 128 | A: DapLeader + DaphneService + Send + Sync, 129 | { 130 | match app.poll_collect_job(&req.task_id, &req.resource_id).await { 131 | Ok(daphne::DapCollectionJob::Done(collect_resp)) => AxumDapResponse::new_success( 132 | daphne::DapResponse { 133 | version: req.version, 134 | media_type: DapMediaType::Collection, 135 | payload: match collect_resp.get_encoded_with_param(&req.version) { 136 | Ok(payload) => payload, 137 | Err(e) => { 138 | return AxumDapResponse::new_error( 139 | DapError::encoding(e), 140 | app.server_metrics(), 141 | ) 142 | .into_response() 143 | } 144 | }, 145 | }, 146 | app.server_metrics(), 147 | ) 148 | .into_response(), 149 | Ok(daphne::DapCollectionJob::Pending) => StatusCode::ACCEPTED.into_response(), 150 | Ok(daphne::DapCollectionJob::Unknown) => AxumDapResponse::new_error( 151 | DapAbort::BadRequest("unknown collection job id".into()), 152 | app.server_metrics(), 153 | ) 154 | .into_response(), 155 | Err(e) => AxumDapResponse::new_error(e, app.server_metrics()).into_response(), 156 | } 157 | } 158 | -------------------------------------------------------------------------------- /crates/daphne-server/src/storage_proxy_connection/kv/cache.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2024 Cloudflare, Inc. All rights reserved. 2 | // SPDX-License-Identifier: BSD-3-Clause 3 | 4 | use std::{ 5 | any::Any, 6 | collections::HashMap, 7 | time::{Duration, Instant}, 8 | }; 9 | 10 | use mappable_rc::Marc; 11 | 12 | use super::KvPrefix; 13 | 14 | const CACHE_VALUE_LIFETIME: Duration = Duration::from_secs(60 * 5); 15 | 16 | struct CacheLine { 17 | /// Time at which the cache item was set. 18 | ts: Instant, 19 | 20 | /// Either the value or an indication that no value was found. 21 | entry: Option>, 22 | } 23 | 24 | #[derive(Default)] 25 | pub struct Cache { 26 | /// This map follows the same structure of KV queries. 27 | /// The first key (&'static str) is a [`KvPrefix::PREFIX`] 28 | /// The second key (String) is the key that is associated with this value 29 | kv: HashMap<&'static str, HashMap>, 30 | } 31 | 32 | pub enum CacheResult { 33 | /// Cache hit. 34 | /// 35 | /// `None` indicates that the value is known to not exist. 36 | Hit(Option>), 37 | /// Cache Miss. It was never cached or it has expired. 38 | Miss, 39 | /// There is a value associated with this key, but it's type is not [`T`]. 40 | MismatchedType, 41 | } 42 | 43 | impl Cache { 44 | pub fn get

(&self, key: &str) -> CacheResult 45 | where 46 | P: KvPrefix, 47 | { 48 | match self.kv.get(P::PREFIX) { 49 | Some(cache) => match cache.get(key) { 50 | // Cache hit 51 | Some(CacheLine { ts, entry }) if ts.elapsed() < CACHE_VALUE_LIFETIME => entry 52 | .as_ref() 53 | .map(|entry| Marc::try_map(entry.clone(), |v| v.downcast_ref::())) 54 | .transpose() // bring out the try_map error 55 | .map_or(CacheResult::MismatchedType, CacheResult::Hit), 56 | 57 | // Cache miss or the cached value is stale. 58 | Some(_) | None => CacheResult::Miss, 59 | }, 60 | 61 | // Cache miss 62 | None => CacheResult::Miss, 63 | } 64 | } 65 | 66 | pub(super) fn put

(&mut self, key: String, entry: Option>) 67 | where 68 | P: KvPrefix, 69 | { 70 | self.kv.entry(P::PREFIX).or_default().insert( 71 | key, 72 | CacheLine { 73 | ts: Instant::now(), 74 | entry: entry.map(|value| Marc::map(value, |v| v as &(dyn Any + Send + Sync))), 75 | }, 76 | ); 77 | } 78 | 79 | #[allow(dead_code)] 80 | pub fn delete

(&mut self, key: &str) -> CacheResult 81 | where 82 | P: KvPrefix, 83 | { 84 | match self.kv.get_mut(P::PREFIX) { 85 | Some(cache) => match cache.remove(key) { 86 | // Cache hit 87 | Some(CacheLine { ts: _, entry }) => entry 88 | .map(|entry| Marc::try_map(entry, |v| v.downcast_ref::())) 89 | .transpose() // bring out the try_map error 90 | .map_or(CacheResult::MismatchedType, CacheResult::Hit), 91 | 92 | None => CacheResult::Miss, 93 | }, 94 | 95 | // Cache miss 96 | None => CacheResult::Miss, 97 | } 98 | } 99 | } 100 | -------------------------------------------------------------------------------- /crates/daphne-server/src/storage_proxy_connection/mod.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2024 Cloudflare, Inc. All rights reserved. 2 | // SPDX-License-Identifier: BSD-3-Clause 3 | 4 | pub(crate) mod kv; 5 | 6 | use std::fmt::Debug; 7 | 8 | use axum::http::StatusCode; 9 | use daphne_service_utils::{ 10 | capnproto::{CapnprotoPayloadEncode, CapnprotoPayloadEncodeExt as _}, 11 | durable_requests::{bindings::DurableMethod, DurableRequest, ObjectIdFrom, DO_PATH_PREFIX}, 12 | }; 13 | use serde::de::DeserializeOwned; 14 | 15 | pub(crate) use kv::Kv; 16 | 17 | use crate::StorageProxyConfig; 18 | 19 | #[derive(Debug, thiserror::Error)] 20 | pub(crate) enum Error { 21 | #[error("serialization error: {0}")] 22 | Serde(#[from] serde_json::Error), 23 | #[error("network error: {0}")] 24 | Reqwest(#[from] reqwest::Error), 25 | #[error("http error. request returned status code {status} with the body {body}")] 26 | Http { status: StatusCode, body: String }, 27 | } 28 | 29 | #[derive(Clone, Copy)] 30 | pub(crate) struct Do<'h> { 31 | config: &'h StorageProxyConfig, 32 | http: &'h reqwest::Client, 33 | retry: bool, 34 | } 35 | 36 | impl<'h> Do<'h> { 37 | pub fn new(config: &'h StorageProxyConfig, client: &'h reqwest::Client) -> Self { 38 | Self { 39 | config, 40 | http: client, 41 | retry: false, 42 | } 43 | } 44 | 45 | #[allow(dead_code)] 46 | pub fn with_retry(self) -> Self { 47 | Self { 48 | retry: true, 49 | ..self 50 | } 51 | } 52 | } 53 | 54 | pub struct RequestBuilder<'d, B: DurableMethod, P: AsRef<[u8]>> { 55 | durable: &'d Do<'d>, 56 | path: B, 57 | request: DurableRequest

, 58 | } 59 | 60 | impl> RequestBuilder<'_, B, P> { 61 | #[tracing::instrument(skip_all, fields(path = ?self.path))] 62 | pub async fn send(self) -> Result 63 | where 64 | R: DeserializeOwned, 65 | { 66 | tracing::debug!( 67 | obj = std::any::type_name::().split("::").last().unwrap(), 68 | path = ?self.path, 69 | "requesting DO", 70 | ); 71 | let url = self 72 | .durable 73 | .config 74 | .url 75 | .join(&format!("{DO_PATH_PREFIX}{}", self.path.to_uri())) 76 | .unwrap(); 77 | let resp = self 78 | .durable 79 | .http 80 | .post(url) 81 | .body(self.request.into_bytes()) 82 | .bearer_auth(self.durable.config.auth_token.as_str()) 83 | .send() 84 | .await?; 85 | 86 | if resp.status().is_success() { 87 | Ok(resp.json().await?) 88 | } else { 89 | Err(Error::Http { 90 | status: resp.status(), 91 | body: resp.text().await?, 92 | }) 93 | } 94 | } 95 | } 96 | 97 | impl<'d, B: DurableMethod> RequestBuilder<'d, B, [u8; 0]> { 98 | pub fn encode(self, payload: &T) -> RequestBuilder<'d, B, Vec> { 99 | self.with_body(payload.encode_to_bytes()) 100 | } 101 | 102 | pub fn with_body>(self, payload: T) -> RequestBuilder<'d, B, T> { 103 | RequestBuilder { 104 | durable: self.durable, 105 | path: self.path, 106 | request: self.request.with_body(payload), 107 | } 108 | } 109 | } 110 | 111 | impl Do<'_> { 112 | pub fn request( 113 | &self, 114 | path: B, 115 | params: B::NameParameters<'_>, 116 | ) -> RequestBuilder<'_, B, [u8; 0]> { 117 | let (request, _) = DurableRequest::new(path, params); 118 | RequestBuilder { 119 | durable: self, 120 | path, 121 | request: if self.retry { 122 | request.with_retry() 123 | } else { 124 | request 125 | }, 126 | } 127 | } 128 | 129 | #[allow(dead_code)] 130 | pub fn request_with_id( 131 | &self, 132 | path: B, 133 | object_id: ObjectIdFrom, 134 | ) -> RequestBuilder<'_, B, [u8; 0]> { 135 | let (request, _) = DurableRequest::new_with_id(path, object_id); 136 | RequestBuilder { 137 | durable: self, 138 | path, 139 | request: if self.retry { 140 | request.with_retry() 141 | } else { 142 | request 143 | }, 144 | } 145 | } 146 | } 147 | -------------------------------------------------------------------------------- /crates/daphne-server/tests/e2e/main.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2024 Cloudflare, Inc. All rights reserved. 2 | // SPDX-License-Identifier: BSD-3-Clause 3 | 4 | #[cfg(feature = "test_e2e")] 5 | mod e2e; 6 | #[cfg(feature = "test_e2e")] 7 | mod test_runner; 8 | -------------------------------------------------------------------------------- /crates/daphne-service-utils/Cargo.toml: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2025 Cloudflare, Inc. All rights reserved. 2 | # SPDX-License-Identifier: BSD-3-Clause 3 | 4 | [package] 5 | name = "daphne-service-utils" 6 | version.workspace = true 7 | authors.workspace = true 8 | edition.workspace = true 9 | license.workspace = true 10 | readme.workspace = true 11 | homepage.workspace = true 12 | repository.workspace = true 13 | 14 | [dependencies] 15 | capnp = { workspace = true, optional = true } 16 | daphne = { path = "../daphne", default-features = false } 17 | prio_draft09 = { workspace = true, optional = true } 18 | prio = { workspace = true, optional = true } 19 | serde.workspace = true 20 | serde_json = { workspace = true, optional = true } 21 | url = { workspace = true, optional = true } 22 | 23 | [dev-dependencies] 24 | daphne = { path = "../daphne", default-features = false, features = ["prometheus", "test-utils"] } 25 | rand.workspace = true 26 | 27 | [build-dependencies] 28 | capnpc = { workspace = true, optional = true } 29 | 30 | [features] 31 | test-utils = ["dep:url", "daphne/prometheus", "daphne/test-utils"] 32 | compute-offload = ["dep:capnp", "dep:capnpc", "dep:serde_json", "dep:prio"] 33 | durable_requests = [ 34 | "dep:capnp", 35 | "dep:capnpc", 36 | "dep:prio_draft09", 37 | "dep:prio", 38 | ] 39 | experimental = ["daphne/experimental"] 40 | 41 | [lints] 42 | workspace = true 43 | -------------------------------------------------------------------------------- /crates/daphne-service-utils/build.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2025 Cloudflare, Inc. All rights reserved. 2 | // SPDX-License-Identifier: BSD-3-Clause 3 | 4 | fn main() { 5 | #[cfg(any(feature = "durable_requests", feature = "compute-offload"))] 6 | { 7 | let mut compiler = ::capnpc::CompilerCommand::new(); 8 | 9 | compiler.file("./src/capnproto/base.capnp"); 10 | 11 | #[cfg(feature = "durable_requests")] 12 | compiler 13 | .file("./src/durable_requests/durable_request.capnp") 14 | .file("./src/durable_requests/bindings/aggregation_job_store.capnp") 15 | .file("./src/durable_requests/bindings/aggregate_store_v2.capnp") 16 | .file("./src/durable_requests/bindings/agg_job_response_store.capnp") 17 | .file("./src/durable_requests/bindings/replay_checker.capnp"); 18 | 19 | #[cfg(feature = "compute-offload")] 20 | compiler.file("./src/compute_offload/compute_offload.capnp"); 21 | 22 | compiler.run().expect("compiling schema"); 23 | } 24 | } 25 | -------------------------------------------------------------------------------- /crates/daphne-service-utils/src/bearer_token.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2022 Cloudflare, Inc. All rights reserved. 2 | // SPDX-License-Identifier: BSD-3-Clause 3 | 4 | //! DAP request authorization. 5 | 6 | use core::fmt; 7 | 8 | use daphne::messages::constant_time_eq; 9 | use serde::{Deserialize, Serialize}; 10 | 11 | /// A bearer token used for authorizing DAP requests. 12 | #[derive(Clone, Deserialize, Serialize, Eq)] 13 | #[serde(transparent)] 14 | pub struct BearerToken { 15 | raw: String, 16 | } 17 | 18 | impl BearerToken { 19 | pub fn as_str(&self) -> &str { 20 | self.raw.as_str() 21 | } 22 | } 23 | 24 | impl fmt::Debug for BearerToken { 25 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 26 | #[cfg(feature = "test-utils")] 27 | { 28 | write!(f, "BearerToken({})", self.raw) 29 | } 30 | #[cfg(not(feature = "test-utils"))] 31 | write!(f, "BearerToken(REDACTED)") 32 | } 33 | } 34 | 35 | impl AsRef for BearerToken { 36 | fn as_ref(&self) -> &str { 37 | self.as_str() 38 | } 39 | } 40 | 41 | impl PartialEq for BearerToken { 42 | fn eq(&self, other: &Self) -> bool { 43 | constant_time_eq(self.raw.as_bytes(), other.raw.as_bytes()) 44 | } 45 | } 46 | 47 | impl From for BearerToken { 48 | fn from(raw: String) -> Self { 49 | Self { raw } 50 | } 51 | } 52 | 53 | impl From<&str> for BearerToken { 54 | fn from(raw: &str) -> Self { 55 | Self::from(raw.to_string()) 56 | } 57 | } 58 | -------------------------------------------------------------------------------- /crates/daphne-service-utils/src/capnproto/base.capnp: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2024 Cloudflare, Inc. All rights reserved. 2 | # SPDX-License-Identifier: BSD-3-Clause 3 | 4 | @0xba869f168ff63e77; 5 | 6 | enum DapVersion @0xb5b2c8705a8b22d5 { 7 | draft09 @0; 8 | draftLatest @1; 9 | } 10 | 11 | # [u8; 32] 12 | struct U8L32 @0x9e42cda292792294 { 13 | fst @0 :UInt64; 14 | snd @1 :UInt64; 15 | thr @2 :UInt64; 16 | frh @3 :UInt64; 17 | } 18 | 19 | # [u8; 16] 20 | struct U8L16 @0x9e3f65b13f71cfcb { 21 | fst @0 :UInt64; 22 | snd @1 :UInt64; 23 | } 24 | 25 | struct PartialBatchSelector @0xae86084e56c22fc0 { 26 | union { 27 | timeInterval @0 :Void; 28 | leaderSelectedByBatchId @1 :BatchId; 29 | } 30 | } 31 | 32 | enum ReportError @0xa76428617779e659 { 33 | reserved @0; 34 | batchCollected @1; 35 | reportReplayed @2; 36 | reportDropped @3; 37 | hpkeUnknownConfigId @4; 38 | hpkeDecryptError @5; 39 | vdafPrepError @6; 40 | batchSaturated @7; 41 | taskExpired @8; 42 | invalidMessage @9; 43 | reportTooEarly @10; 44 | taskNotStarted @11; 45 | } 46 | 47 | 48 | using ReportId = U8L16; 49 | using BatchId = U8L32; 50 | using TaskId = U8L32; 51 | using AggregationJobId = U8L16; 52 | using Time = UInt64; 53 | -------------------------------------------------------------------------------- /crates/daphne-service-utils/src/compute_offload/compute_offload.capnp: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2025 Cloudflare, Inc. All rights reserved. 2 | # SPDX-License-Identifier: BSD-3-Clause 3 | 4 | @0xd932f3d934afce3b; 5 | 6 | using Base = import "../capnproto/base.capnp"; 7 | 8 | using VdafConfig = Text; # json encoded 9 | using VdafVerifyKey = Base.U8L32; 10 | 11 | struct TimeRange @0xf0d27aaa9b1959f7 { 12 | start @0 :UInt64; 13 | end @1 :UInt64; 14 | } 15 | 16 | # Top level message 17 | struct InitializeReports @0x90aadb2f44c9fb78 { 18 | hpkeKeys @0 :List(HpkeReceiverConfig); 19 | validReportRange @1 :TimeRange; 20 | taskId @2 :Base.TaskId; 21 | taskConfig @3 :PartialDapTaskConfig; 22 | aggParam @4 :Data; # encoded 23 | prepInits @5 :List(PrepareInit); 24 | } 25 | 26 | struct HpkeReceiverConfig @0xeec9b4a50458edb7 { 27 | struct HpkeConfig @0xa546066418a5cdc7 { 28 | enum HpkeKemId @0xf4bbeaed8d1fd18a { 29 | p256HkdfSha256 @0; x25519HkdfSha256 @1; 30 | } 31 | enum HpkeKdfId @0x9336afc63df27ba3 { hkdfSha256 @0; } 32 | enum HpkeAeadId @0xd68d403e118c806c { aes128Gcm @0; } 33 | 34 | id @0 :UInt8; 35 | kemId @1 :HpkeKemId; 36 | kdfId @2 :HpkeKdfId; 37 | aeadId @3 :HpkeAeadId; 38 | publicKey @4 :Data; 39 | } 40 | 41 | config @0 :HpkeConfig; 42 | privateKey @1 :Data; 43 | } 44 | 45 | struct PartialDapTaskConfig @0xb11c76132b15968a { 46 | 47 | version @0 :Base.DapVersion; 48 | methodIsTaskprov @1 :Bool; 49 | notBefore @2 :Base.Time; 50 | notAfter @3 :Base.Time; 51 | vdaf @4 :VdafConfig; 52 | vdafVerifyKey @5 :VdafVerifyKey; 53 | } 54 | 55 | struct PublicExtensionsList @0x8b3c98c0ddd0043e { 56 | 57 | union { 58 | # Each extension is encoded according to the DAP spec in 59 | # tag-length-value form. 60 | list @0 :List(Data); 61 | 62 | # draft09 compatibility: Previously DAP had no extensions in the 63 | # report. 64 | none @1 :Void; 65 | } 66 | } 67 | 68 | struct ReportMetadata @0xefba178ad4584bc4 { 69 | 70 | id @0 :Base.ReportId; 71 | time @1 :Base.Time; 72 | publicExtensions @2 :PublicExtensionsList; 73 | } 74 | 75 | struct PrepareInit @0x8192568cb3d03f59 { 76 | 77 | struct HpkeCiphertext @0xf0813319decf7eaf { 78 | configId @0 :UInt8; 79 | enc @1 :Data; 80 | payload @2 :Data; 81 | } 82 | 83 | struct ReportShare @0xb4134aa2db41ef60 { 84 | reportMetadata @0 :ReportMetadata; 85 | publicShare @1 :Data; 86 | encryptedInputShare @2 :HpkeCiphertext; 87 | } 88 | 89 | reportShare @0 :ReportShare; 90 | payload @1 :Data; 91 | } 92 | 93 | 94 | 95 | struct InitializedReports @0xf36341397ae4a146 { 96 | struct InitializedReport @0xfa833aa6b5d03d6d { 97 | using VdafPrepShare = Data; 98 | using VdafPrepState = Data; 99 | 100 | union { 101 | ready :group { 102 | metadata @0 :ReportMetadata; 103 | publicShare @1 :Data; 104 | prepShare @2 :VdafPrepShare; 105 | prepState @3 :VdafPrepState; 106 | peerPrepShare @4 :Data; 107 | } 108 | rejected :group { 109 | metadata @5 :ReportMetadata; 110 | failure @6 :Base.ReportError; 111 | } 112 | } 113 | } 114 | 115 | vdafConfig @0 :VdafConfig; 116 | reports @1 :List(InitializedReport); 117 | } 118 | -------------------------------------------------------------------------------- /crates/daphne-service-utils/src/durable_requests/bindings/agg_job_response_store.capnp: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2025 Cloudflare, Inc. All rights reserved. 2 | # SPDX-License-Identifier: BSD-3-Clause 3 | 4 | @0xd30da336463f3205; 5 | 6 | using Base = import "../../capnproto/base.capnp"; 7 | 8 | struct AggregationJobResponse @0xebda3ce03fce7e72 { 9 | struct PrepareRespVar @0xc41a0ca7156794f0 { 10 | union { 11 | continue @0 :Data; 12 | reject @1 :Base.ReportError; 13 | } 14 | } 15 | 16 | struct PrepareResp @0xc8b6a95ad17a2152 { 17 | reportId @0 :Base.ReportId; 18 | var @1 :PrepareRespVar; 19 | } 20 | 21 | prepResps @0 :List(PrepareResp); 22 | } 23 | 24 | -------------------------------------------------------------------------------- /crates/daphne-service-utils/src/durable_requests/bindings/agg_job_response_store.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2025 Cloudflare, Inc. All rights reserved. 2 | // SPDX-License-Identifier: BSD-3-Clause 3 | 4 | use daphne::{ 5 | messages::{AggregationJobId, PrepareResp, PrepareRespVar, TaskId}, 6 | protocol::ReadyAggregationJobResp, 7 | DapVersion, 8 | }; 9 | 10 | use crate::{ 11 | agg_job_response_store_capnp::aggregation_job_response, 12 | capnproto::{ 13 | decode_list, encode_list, usize_to_capnp_len, CapnprotoPayloadDecode, 14 | CapnprotoPayloadEncode, 15 | }, 16 | durable_requests::ObjectIdFrom, 17 | }; 18 | 19 | super::define_do_binding! { 20 | const BINDING = "AGGREGATE_JOB_RESULT_STORE"; 21 | enum Command { 22 | Get = "/get", 23 | Put = "/put", 24 | } 25 | 26 | fn name( 27 | (version, task_id, agg_job_id): 28 | (DapVersion, &'n TaskId, &'n AggregationJobId) 29 | ) -> ObjectIdFrom { 30 | ObjectIdFrom::Name(format!("{version}/task/{task_id}/agg_job/{agg_job_id}")) 31 | } 32 | } 33 | 34 | impl CapnprotoPayloadEncode for ReadyAggregationJobResp { 35 | type Builder<'a> = aggregation_job_response::Builder<'a>; 36 | 37 | fn encode_to_builder(&self, builder: Self::Builder<'_>) { 38 | let Self { prep_resps } = self; 39 | encode_list( 40 | prep_resps, 41 | builder.init_prep_resps(usize_to_capnp_len(prep_resps.len())), 42 | ); 43 | } 44 | } 45 | 46 | impl CapnprotoPayloadEncode for PrepareResp { 47 | type Builder<'a> = aggregation_job_response::prepare_resp::Builder<'a>; 48 | 49 | fn encode_to_builder(&self, mut builder: Self::Builder<'_>) { 50 | let Self { report_id, var } = self; 51 | report_id.encode_to_builder(builder.reborrow().init_report_id()); 52 | let mut builder = builder.init_var(); 53 | match var { 54 | PrepareRespVar::Continue(vec) => builder.set_continue(vec), 55 | PrepareRespVar::Reject(report_error) => builder.set_reject((*report_error).into()), 56 | } 57 | } 58 | } 59 | 60 | impl CapnprotoPayloadDecode for PrepareResp { 61 | type Reader<'a> = aggregation_job_response::prepare_resp::Reader<'a>; 62 | 63 | fn decode_from_reader(reader: Self::Reader<'_>) -> capnp::Result 64 | where 65 | Self: Sized, 66 | { 67 | Ok(Self { 68 | report_id: <_>::decode_from_reader(reader.get_report_id()?)?, 69 | var: match reader.get_var()?.which()? { 70 | aggregation_job_response::prepare_resp_var::Which::Continue(data) => { 71 | PrepareRespVar::Continue(data?.to_vec()) 72 | } 73 | aggregation_job_response::prepare_resp_var::Which::Reject(report_error) => { 74 | PrepareRespVar::Reject(report_error?.into()) 75 | } 76 | }, 77 | }) 78 | } 79 | } 80 | 81 | impl CapnprotoPayloadDecode for ReadyAggregationJobResp { 82 | type Reader<'a> = aggregation_job_response::Reader<'a>; 83 | 84 | fn decode_from_reader(reader: Self::Reader<'_>) -> capnp::Result 85 | where 86 | Self: Sized, 87 | { 88 | Ok(Self { 89 | prep_resps: decode_list::(reader.get_prep_resps()?)?, 90 | }) 91 | } 92 | } 93 | 94 | #[cfg(test)] 95 | mod test { 96 | use super::*; 97 | use crate::capnproto::{CapnprotoPayloadDecodeExt as _, CapnprotoPayloadEncodeExt as _}; 98 | use daphne::messages::ReportId; 99 | use rand::{thread_rng, Rng}; 100 | 101 | fn gen_agg_job_resp() -> ReadyAggregationJobResp { 102 | ReadyAggregationJobResp { 103 | prep_resps: vec![ 104 | PrepareResp { 105 | report_id: ReportId(thread_rng().gen()), 106 | var: PrepareRespVar::Continue(vec![1, 2, 3]), 107 | }, 108 | PrepareResp { 109 | report_id: ReportId(thread_rng().gen()), 110 | var: PrepareRespVar::Reject(daphne::messages::ReportError::InvalidMessage), 111 | }, 112 | ], 113 | } 114 | } 115 | 116 | #[test] 117 | fn serialization_deserialization_round_trip() { 118 | let this = gen_agg_job_resp(); 119 | let other = ReadyAggregationJobResp::decode_from_bytes(&this.encode_to_bytes()).unwrap(); 120 | assert_eq!(this, other); 121 | } 122 | } 123 | -------------------------------------------------------------------------------- /crates/daphne-service-utils/src/durable_requests/bindings/aggregate_store_v2.capnp: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2025 Cloudflare, Inc. All rights reserved. 2 | # SPDX-License-Identifier: BSD-3-Clause 3 | 4 | @0x822b0e344bf68531; 5 | 6 | using Base = import "../../capnproto/base.capnp"; 7 | 8 | struct PutRequest @0xbabd9e0f2a99569a { 9 | aggShareDelta @0 :import "../durable_request.capnp".DapAggregateShare; 10 | aggJobId @1 :Base.AggregationJobId; 11 | } 12 | -------------------------------------------------------------------------------- /crates/daphne-service-utils/src/durable_requests/bindings/aggregation_job_store.capnp: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2025 Cloudflare, Inc. All rights reserved. 2 | # SPDX-License-Identifier: BSD-3-Clause 3 | 4 | @0xa11edd1197dbcf0b; 5 | 6 | using Base = import "../../capnproto/base.capnp"; 7 | 8 | struct NewJobRequest @0xdd285ccdbb2cd14e { 9 | id @0 :Base.AggregationJobId; 10 | aggJobHash @1 :Data; 11 | } 12 | -------------------------------------------------------------------------------- /crates/daphne-service-utils/src/durable_requests/bindings/aggregation_job_store.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2025 Cloudflare, Inc. All rights reserved. 2 | // SPDX-License-Identifier: BSD-3-Clause 3 | 4 | use crate::{ 5 | aggregation_job_store_capnp::new_job_request, 6 | capnproto::{CapnprotoPayloadDecode, CapnprotoPayloadEncode}, 7 | durable_requests::ObjectIdFrom, 8 | }; 9 | use daphne::{ 10 | messages::{AggregationJobId, TaskId}, 11 | DapVersion, 12 | }; 13 | use serde::{Deserialize, Serialize}; 14 | use std::borrow::Cow; 15 | 16 | super::define_do_binding! { 17 | const BINDING = "AGGREGATION_JOB_STORE"; 18 | 19 | enum Command { 20 | NewJob = "/new-job", 21 | ContainsJob = "/contains", 22 | } 23 | 24 | fn name((version, task_id): (DapVersion, &'n TaskId)) -> ObjectIdFrom { 25 | ObjectIdFrom::Name(format!("{version}/task/{task_id}")) 26 | } 27 | } 28 | 29 | #[derive(Debug)] 30 | pub struct NewJobRequest<'h> { 31 | pub id: AggregationJobId, 32 | pub agg_job_hash: Cow<'h, [u8]>, 33 | } 34 | 35 | impl CapnprotoPayloadEncode for NewJobRequest<'_> { 36 | type Builder<'a> = new_job_request::Builder<'a>; 37 | 38 | fn encode_to_builder(&self, mut builder: Self::Builder<'_>) { 39 | self.id.encode_to_builder(builder.reborrow().init_id()); 40 | builder.set_agg_job_hash(&self.agg_job_hash); 41 | } 42 | } 43 | 44 | impl CapnprotoPayloadDecode for NewJobRequest<'static> { 45 | type Reader<'a> = new_job_request::Reader<'a>; 46 | 47 | fn decode_from_reader(reader: Self::Reader<'_>) -> capnp::Result 48 | where 49 | Self: Sized, 50 | { 51 | Ok(Self { 52 | id: <_>::decode_from_reader(reader.get_id()?)?, 53 | agg_job_hash: reader.get_agg_job_hash()?.to_vec().into(), 54 | }) 55 | } 56 | } 57 | 58 | #[derive(Debug, Serialize, Deserialize)] 59 | pub enum NewJobResponse { 60 | Ok, 61 | /// Request would change an existing aggregation job's parameters. 62 | IllegalJobParameters, 63 | } 64 | -------------------------------------------------------------------------------- /crates/daphne-service-utils/src/durable_requests/bindings/mod.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2025 Cloudflare, Inc. All rights reserved. 2 | // SPDX-License-Identifier: BSD-3-Clause 3 | 4 | //! This module defines the durable objects' binding and methods as implementors of the 5 | //! [`DurableMethod`] trait. 6 | //! 7 | //! It also defines types that are used as the body of requests sent to these objects. 8 | 9 | pub mod agg_job_response_store; 10 | mod aggregate_store; 11 | pub mod aggregate_store_v2; 12 | pub mod aggregation_job_store; 13 | pub mod replay_checker; 14 | #[cfg(feature = "test-utils")] 15 | mod test_state_cleaner; 16 | 17 | use super::ObjectIdFrom; 18 | 19 | pub use aggregate_store::{ 20 | AggregateStore, AggregateStoreMergeOptions, AggregateStoreMergeReq, AggregateStoreMergeResp, 21 | }; 22 | #[cfg(feature = "test-utils")] 23 | pub use test_state_cleaner::TestStateCleaner; 24 | 25 | /// A durable object method. 26 | pub trait DurableMethod { 27 | /// The binding of the object this method belongs to as configured in the wrangler.toml 28 | const BINDING: &'static str; 29 | 30 | type NameParameters<'n>; 31 | 32 | /// Try to parse a uri into one of methods of this object. 33 | fn try_from_uri(s: &str) -> Option 34 | where 35 | Self: Sized; 36 | 37 | /// Convert this method into a uri. 38 | fn to_uri(&self) -> &'static str; 39 | 40 | /// Generate the durable object name 41 | fn name(params: Self::NameParameters<'_>) -> ObjectIdFrom; 42 | } 43 | 44 | macro_rules! define_do_binding { 45 | ( 46 | const BINDING = $binding:literal; 47 | enum $name:ident { 48 | $($op:ident = $route:literal),*$(,)? 49 | } 50 | 51 | fn name($params:tt : $params_ty:ty) -> ObjectIdFrom $name_impl:block 52 | 53 | ) => { 54 | // guarantee at compile time that all paths start with a / to ensure that concatenation 55 | // with the base url later has a predictable outcome. 56 | $(const _: () = assert!(matches!($route.as_bytes().first(), Some(b'/')));)* 57 | 58 | #[derive( 59 | serde::Serialize, 60 | serde::Deserialize, 61 | Debug, 62 | Clone, 63 | Copy, 64 | PartialEq, 65 | Eq, 66 | PartialOrd, 67 | Ord, 68 | Hash 69 | )] 70 | pub enum $name { 71 | $($op),* 72 | } 73 | 74 | impl $crate::durable_requests::bindings::DurableMethod for $name { 75 | const BINDING: &'static str = $binding; 76 | 77 | type NameParameters<'n> = $params_ty; 78 | 79 | fn try_from_uri(s: &str) -> Option { 80 | match (s) { 81 | $($route => Some(Self::$op),)* 82 | _ => return None, 83 | } 84 | } 85 | 86 | fn to_uri(&self) -> &'static str { 87 | match self { 88 | $(Self::$op => $route,)* 89 | } 90 | } 91 | 92 | fn name($params: Self::NameParameters<'_>) -> $crate::durable_requests::bindings::ObjectIdFrom { 93 | $name_impl 94 | } 95 | } 96 | }; 97 | } 98 | 99 | pub(crate) use define_do_binding; 100 | 101 | #[cfg(test)] 102 | mod tests { 103 | use daphne::{messages::BatchId, DapBatchBucket}; 104 | 105 | // We use `std::fmt::Display` for `DapBatchBucket` to format names for DO instances. Ensure 106 | // that they are formatted the way we expect. 107 | #[test] 108 | fn bucket_display() { 109 | assert_eq!( 110 | "batch/IiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiI", 111 | format!( 112 | "{}", 113 | DapBatchBucket::LeaderSelected { 114 | batch_id: BatchId([34; 32]), 115 | shard: 0, 116 | } 117 | ) 118 | ); 119 | assert_eq!( 120 | "window/1337", 121 | format!( 122 | "{}", 123 | DapBatchBucket::TimeInterval { 124 | batch_window: 1337, 125 | shard: 0, 126 | } 127 | ) 128 | ); 129 | assert_eq!( 130 | "batch/IiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiI/shard/2323", 131 | format!( 132 | "{}", 133 | DapBatchBucket::LeaderSelected { 134 | batch_id: BatchId([34; 32]), 135 | shard: 2323, 136 | } 137 | ) 138 | ); 139 | assert_eq!( 140 | "window/1337/shard/99", 141 | format!( 142 | "{}", 143 | DapBatchBucket::TimeInterval { 144 | batch_window: 1337, 145 | shard: 99, 146 | } 147 | ) 148 | ); 149 | } 150 | } 151 | -------------------------------------------------------------------------------- /crates/daphne-service-utils/src/durable_requests/bindings/replay_checker.capnp: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2025 Cloudflare, Inc. All rights reserved. 2 | # SPDX-License-Identifier: BSD-3-Clause 3 | 4 | @0xaaa529cce40f45d7; 5 | 6 | using Base = import "../../capnproto/base.capnp"; 7 | 8 | struct CheckReplaysFor @0xe1e6a4a1695238ca { 9 | reports @0 :List(Base.ReportId); 10 | aggJobId @1 :Base.AggregationJobId; 11 | } 12 | -------------------------------------------------------------------------------- /crates/daphne-service-utils/src/durable_requests/bindings/replay_checker.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2025 Cloudflare, Inc. All rights reserved. 2 | // SPDX-License-Identifier: BSD-3-Clause 3 | 4 | use crate::{ 5 | capnproto::{ 6 | decode_list, encode_list, usize_to_capnp_len, CapnprotoPayloadDecode, 7 | CapnprotoPayloadEncode, 8 | }, 9 | durable_requests::ObjectIdFrom, 10 | replay_checker_capnp::check_replays_for, 11 | }; 12 | use daphne::messages::{AggregationJobId, ReportId, TaskId, Time}; 13 | use serde::{Deserialize, Serialize}; 14 | use std::{borrow::Cow, collections::HashSet}; 15 | 16 | super::define_do_binding! { 17 | const BINDING = "REPLAY_CHECK_STORE"; 18 | 19 | enum Command { 20 | Check = "/check", 21 | } 22 | 23 | fn name((task_id, epoch, shard): (&'n TaskId, Time, usize)) -> ObjectIdFrom { 24 | ObjectIdFrom::Name(format!("replay-checker/{task_id}/epoch/{epoch}/shard/{shard}")) 25 | } 26 | } 27 | 28 | pub struct Request<'s> { 29 | pub report_ids: Cow<'s, [ReportId]>, 30 | pub agg_job_id: AggregationJobId, 31 | } 32 | 33 | impl CapnprotoPayloadEncode for Request<'_> { 34 | type Builder<'a> = check_replays_for::Builder<'a>; 35 | 36 | fn encode_to_builder(&self, mut builder: Self::Builder<'_>) { 37 | let Self { 38 | report_ids, 39 | agg_job_id, 40 | } = self; 41 | encode_list( 42 | report_ids.iter(), 43 | builder 44 | .reborrow() 45 | .init_reports(usize_to_capnp_len(report_ids.len())), 46 | ); 47 | agg_job_id.encode_to_builder(builder.init_agg_job_id()); 48 | } 49 | } 50 | 51 | impl CapnprotoPayloadDecode for Request<'static> { 52 | type Reader<'a> = check_replays_for::Reader<'a>; 53 | 54 | fn decode_from_reader(reader: Self::Reader<'_>) -> capnp::Result 55 | where 56 | Self: Sized, 57 | { 58 | Ok(Self { 59 | report_ids: decode_list::(reader.get_reports()?)?, 60 | agg_job_id: <_>::decode_from_reader(reader.get_agg_job_id()?)?, 61 | }) 62 | } 63 | } 64 | 65 | #[derive(Debug, Serialize, Deserialize)] 66 | pub struct Response { 67 | pub duplicates: HashSet, 68 | } 69 | -------------------------------------------------------------------------------- /crates/daphne-service-utils/src/durable_requests/bindings/test_state_cleaner.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2024 Cloudflare, Inc. All rights reserved. 2 | // SPDX-License-Identifier: BSD-3-Clause 3 | 4 | use crate::durable_requests::ObjectIdFrom; 5 | 6 | super::define_do_binding! { 7 | const BINDING = "DAP_TEST_STATE_CLEANER"; 8 | enum TestStateCleaner { 9 | Put = "/internal/do/test_state_cleaner/put", 10 | DeleteAll = "/internal/do/delete_all", 11 | } 12 | 13 | fn name((): ()) -> ObjectIdFrom { 14 | ObjectIdFrom::Name(Self::NAME_STR.into()) 15 | } 16 | } 17 | 18 | impl TestStateCleaner { 19 | pub const NAME_STR: &'static str = "test_do_cleaner"; 20 | } 21 | -------------------------------------------------------------------------------- /crates/daphne-service-utils/src/durable_requests/durable_request.capnp: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2024 Cloudflare, Inc. All rights reserved. 2 | # SPDX-License-Identifier: BSD-3-Clause 3 | 4 | @0xd076f8051f8de41a; 5 | 6 | using Base = import "../capnproto/base.capnp"; 7 | 8 | enum Method @0xdd078556311145a1 { 9 | get @0; 10 | post @1; 11 | put @2; 12 | patch @3; 13 | delete @4; 14 | options @5; 15 | head @6; 16 | trace @7; 17 | connect @8; 18 | } 19 | 20 | struct DurableRequest @0xfbd55b93d47690b9 { 21 | binding @0 :Text; 22 | id :union { 23 | name @1 :Text; 24 | hex @2 :Text; 25 | } 26 | retry @3 :Bool; 27 | } 28 | 29 | struct DapAggregateShare @0xb34ce529a4a66aed { 30 | reportCount @0 :UInt64; 31 | minTime @1 :UInt64; 32 | maxTime @2 :UInt64; 33 | checksum @3 :Data; 34 | data :union { 35 | field64Draft09@4 :Data; 36 | field128Draft09@5 :Data; 37 | fieldPrio2Draft09@6 :Data; 38 | none @7 :Void; 39 | field64 @8 :Data; 40 | field128 @9 :Data; 41 | fieldPrio2 @10 :Data; 42 | } 43 | } 44 | 45 | struct AggregateStoreMergeReq @0xbaad7bdeb4b06161 { 46 | containedReports @0 :List(Base.ReportId); 47 | aggShareDelta @1 :DapAggregateShare; 48 | options @2 :MergeReqOptions; 49 | } 50 | 51 | struct MergeReqOptions @0x9e03186eae71ca92 { 52 | skipReplayProtection @0 :Bool; 53 | } 54 | 55 | -------------------------------------------------------------------------------- /crates/daphne-service-utils/src/http_headers.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2024 Cloudflare, Inc. All rights reserved. 2 | // SPDX-License-Identifier: BSD-3-Clause 3 | 4 | pub const HPKE_SIGNATURE: &str = "x-hpke-config-signature"; 5 | pub const DAP_AUTH_TOKEN: &str = "dap-auth-token"; 6 | pub const DAP_TASKPROV: &str = "dap-taskprov"; 7 | pub const STORAGE_PROXY_PUT_KV_EXPIRATION: &str = "x-daphne-storage-proxy-kv-put-expiration"; 8 | -------------------------------------------------------------------------------- /crates/daphne-service-utils/src/lib.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2025 Cloudflare, Inc. All rights reserved. 2 | // SPDX-License-Identifier: BSD-3-Clause 3 | 4 | #![cfg_attr(not(test), deny(unused_crate_dependencies))] 5 | 6 | pub mod bearer_token; 7 | #[cfg(any(feature = "durable_requests", feature = "compute-offload"))] 8 | pub mod capnproto; 9 | #[cfg(feature = "compute-offload")] 10 | pub mod compute_offload; 11 | #[cfg(feature = "durable_requests")] 12 | pub mod durable_requests; 13 | pub mod http_headers; 14 | #[cfg(feature = "test-utils")] 15 | pub mod test_route_types; 16 | 17 | // the generated code expects this module to be defined at the root of the library. 18 | #[cfg(any(feature = "durable_requests", feature = "compute-offload"))] 19 | #[doc(hidden)] 20 | pub mod base_capnp { 21 | #![allow(dead_code)] 22 | #![allow(clippy::pedantic)] 23 | #![allow(clippy::needless_lifetimes)] 24 | include!(concat!(env!("OUT_DIR"), "/src/capnproto/base_capnp.rs")); 25 | } 26 | 27 | #[cfg(feature = "durable_requests")] 28 | mod durable_request_capnp { 29 | #![allow(dead_code)] 30 | #![allow(clippy::pedantic)] 31 | #![allow(clippy::needless_lifetimes)] 32 | include!(concat!( 33 | env!("OUT_DIR"), 34 | "/src/durable_requests/durable_request_capnp.rs" 35 | )); 36 | } 37 | 38 | #[cfg(feature = "durable_requests")] 39 | mod aggregation_job_store_capnp { 40 | #![allow(dead_code)] 41 | #![allow(clippy::pedantic)] 42 | #![allow(clippy::needless_lifetimes)] 43 | include!(concat!( 44 | env!("OUT_DIR"), 45 | "/src/durable_requests/bindings/aggregation_job_store_capnp.rs" 46 | )); 47 | } 48 | 49 | #[cfg(feature = "durable_requests")] 50 | mod agg_job_response_store_capnp { 51 | #![allow(dead_code)] 52 | #![allow(clippy::pedantic)] 53 | #![allow(clippy::needless_lifetimes)] 54 | include!(concat!( 55 | env!("OUT_DIR"), 56 | "/src/durable_requests/bindings/agg_job_response_store_capnp.rs" 57 | )); 58 | } 59 | 60 | #[cfg(feature = "durable_requests")] 61 | mod aggregate_store_v2_capnp { 62 | #![allow(dead_code)] 63 | #![allow(clippy::pedantic)] 64 | #![allow(clippy::needless_lifetimes)] 65 | include!(concat!( 66 | env!("OUT_DIR"), 67 | "/src/durable_requests/bindings/aggregate_store_v2_capnp.rs" 68 | )); 69 | } 70 | 71 | #[cfg(feature = "durable_requests")] 72 | mod replay_checker_capnp { 73 | #![allow(dead_code)] 74 | #![allow(clippy::pedantic)] 75 | #![allow(clippy::needless_lifetimes)] 76 | include!(concat!( 77 | env!("OUT_DIR"), 78 | "/src/durable_requests/bindings/replay_checker_capnp.rs" 79 | )); 80 | } 81 | 82 | #[cfg(feature = "compute-offload")] 83 | #[doc(hidden)] 84 | pub mod compute_offload_capnp { 85 | #![allow(dead_code)] 86 | #![allow(clippy::pedantic)] 87 | #![allow(clippy::needless_lifetimes)] 88 | include!(concat!( 89 | env!("OUT_DIR"), 90 | "/src/compute_offload/compute_offload_capnp.rs" 91 | )); 92 | } 93 | -------------------------------------------------------------------------------- /crates/daphne-service-utils/src/test_route_types.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2024 Cloudflare, Inc. All rights reserved. 2 | // SPDX-License-Identifier: BSD-3-Clause 3 | 4 | //! Types used in `/internal/*` routes to implement the [interop][interop] draft 5 | //! 6 | //! [interop]: https://divergentdave.github.io/draft-dcook-ppm-dap-interop-test-design/draft-dcook-ppm-dap-interop-test-design.html 7 | 8 | use daphne::{ 9 | constants::DapAggregatorRole, 10 | messages::{Duration, TaskId}, 11 | vdaf::{Prio3Config, VdafConfig}, 12 | DapTaskLifetime, 13 | }; 14 | use serde::{Deserialize, Serialize}; 15 | use std::num::NonZeroU32; 16 | use url::Url; 17 | 18 | #[derive(Deserialize)] 19 | #[serde(rename_all = "snake_case")] 20 | pub struct InternalTestEndpointForTask { 21 | pub role: DapAggregatorRole, 22 | } 23 | 24 | #[derive(Serialize, Deserialize)] 25 | pub struct InternalTestVdaf { 26 | #[serde(rename = "type")] 27 | pub typ: String, 28 | #[serde(skip_serializing_if = "Option::is_none")] 29 | pub bits: Option, 30 | #[serde(skip_serializing_if = "Option::is_none")] 31 | pub length: Option, 32 | #[serde(skip_serializing_if = "Option::is_none")] 33 | pub chunk_length: Option, 34 | #[serde(skip_serializing_if = "Option::is_none")] 35 | pub dimension: Option, 36 | } 37 | 38 | impl From for InternalTestVdaf { 39 | fn from(vdaf: VdafConfig) -> Self { 40 | let (typ, bits, length, chunk_length, dimension) = match vdaf { 41 | VdafConfig::Prio3(prio3) => match prio3 { 42 | Prio3Config::Count => ("Prio3Count", None, None, None, None), 43 | Prio3Config::Sum { max_measurement } => ( 44 | "Prio3Sum", 45 | Some(usize::try_from(max_measurement).unwrap()), 46 | None, 47 | None, 48 | None, 49 | ), 50 | Prio3Config::Histogram { 51 | length, 52 | chunk_length, 53 | } => ( 54 | "Prio3Histogram", 55 | None, 56 | Some(length), 57 | Some(chunk_length), 58 | None, 59 | ), 60 | Prio3Config::SumVec { 61 | bits, 62 | length, 63 | chunk_length, 64 | } => ( 65 | "Prio3SumVec", 66 | Some(bits), 67 | Some(length), 68 | Some(chunk_length), 69 | None, 70 | ), 71 | Prio3Config::Draft09SumVecField64MultiproofHmacSha256Aes128 { 72 | bits, 73 | length, 74 | chunk_length, 75 | num_proofs: _unimplemented, 76 | } => ( 77 | "Prio3Draft09SumVecField64MultiproofHmacSha256Aes128", 78 | Some(bits), 79 | Some(length), 80 | Some(chunk_length), 81 | None, 82 | ), 83 | }, 84 | VdafConfig::Prio2 { dimension } => ("Prio2", None, None, None, Some(dimension)), 85 | VdafConfig::Pine(_) => ("Pine", None, None, None, None), 86 | #[cfg(feature = "experimental")] 87 | VdafConfig::Mastic { .. } => todo!(), 88 | }; 89 | Self { 90 | typ: typ.into(), 91 | bits: bits.map(|a| a.to_string()), 92 | length: length.map(|a| a.to_string()), 93 | chunk_length: chunk_length.map(|a| a.to_string()), 94 | dimension: dimension.map(|a| a.to_string()), 95 | } 96 | } 97 | } 98 | 99 | #[derive(Serialize, Deserialize)] 100 | #[serde(rename_all = "snake_case")] 101 | pub struct InternalTestAddTask { 102 | pub task_id: TaskId, // base64url 103 | pub leader: Url, 104 | pub helper: Url, 105 | pub vdaf: InternalTestVdaf, 106 | pub leader_authentication_token: String, 107 | #[serde(skip_serializing_if = "Option::is_none")] 108 | pub collector_authentication_token: Option, 109 | pub role: DapAggregatorRole, 110 | pub vdaf_verify_key: String, // base64url 111 | pub batch_mode: u8, 112 | pub min_batch_size: u64, 113 | #[serde(skip_serializing_if = "Option::is_none")] 114 | pub max_batch_size: Option, 115 | pub time_precision: Duration, 116 | pub collector_hpke_config: String, // base64url 117 | // TODO(cjpatton) Align this with draft-dcook-ppm-dap-interop-test-design once it's updated to 118 | // DAP-13. I'm pretty sure we won't need to be backwards compatible. 119 | pub lifetime: DapTaskLifetime, 120 | } 121 | -------------------------------------------------------------------------------- /crates/daphne-worker-test/.gitignore: -------------------------------------------------------------------------------- 1 | .mf 2 | -------------------------------------------------------------------------------- /crates/daphne-worker-test/Cargo.toml: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2025 Cloudflare, Inc. All rights reserved. 2 | # SPDX-License-Identifier: BSD-3-Clause 3 | 4 | [package] 5 | name = "daphne-worker-test" 6 | version.workspace = true 7 | authors.workspace = true 8 | edition.workspace = true 9 | license.workspace = true 10 | readme.workspace = true 11 | homepage.workspace = true 12 | repository.workspace = true 13 | 14 | [lib] 15 | crate-type = ["cdylib", "rlib"] 16 | 17 | [features] 18 | default = ["console_error_panic_hook"] 19 | 20 | [dependencies] 21 | async-trait.workspace = true 22 | cap.workspace = true 23 | cfg-if.workspace = true 24 | console_error_panic_hook = { version = "0.1.7", optional = true } 25 | daphne-worker = { path = "../daphne-worker", features = ["test-utils"] } 26 | futures.workspace = true 27 | prometheus.workspace = true 28 | tracing.workspace = true 29 | worker.workspace = true 30 | 31 | [lints] 32 | workspace = true 33 | -------------------------------------------------------------------------------- /crates/daphne-worker-test/README.md: -------------------------------------------------------------------------------- 1 | # Daphne-Worker Tests 2 | 3 | This directory defines a deployment of `daphne-worker` for testing integration 4 | with `daphne`. 5 | -------------------------------------------------------------------------------- /crates/daphne-worker-test/docker-compose-storage-proxy.yaml: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2022 Cloudflare, Inc. All rights reserved. 2 | # SPDX-License-Identifier: BSD-3-Clause 3 | --- 4 | version: "3.3" 5 | services: 6 | leader_storage: 7 | ports: 8 | - 4000:4000 9 | build: 10 | context: ../.. 11 | dockerfile: crates/daphne-worker-test/docker/storage-proxy.Dockerfile 12 | command: 13 | - "--port=4000" 14 | helper_storage: 15 | ports: 16 | - 4001:4001 17 | build: 18 | context: ../.. 19 | dockerfile: crates/daphne-worker-test/docker/storage-proxy.Dockerfile 20 | command: 21 | - "--port=4001" 22 | -------------------------------------------------------------------------------- /crates/daphne-worker-test/docker/aggregator.Dockerfile: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2025 Cloudflare, Inc. All rights reserved. 2 | # SPDX-License-Identifier: BSD-3-Clause 3 | 4 | FROM rust:1.84.1-bookworm AS builder 5 | RUN apt update && apt install -y capnproto clang cmake 6 | 7 | # Pre-install worker-build and Rust's wasm32 target to speed up our custom build command 8 | RUN rustup target add wasm32-unknown-unknown 9 | RUN echo Ola 10 | RUN cargo install worker-build@0.1.2 --locked 11 | 12 | # Build the worker. 13 | WORKDIR /tmp/dap_test 14 | COPY Cargo.toml Cargo.lock ./ 15 | COPY crates/daphne-worker-test ./crates/daphne-worker-test 16 | COPY crates/daphne-worker ./crates/daphne-worker 17 | COPY crates/daphne-service-utils ./crates/daphne-service-utils 18 | COPY crates/daphne ./crates/daphne 19 | WORKDIR /tmp/dap_test/crates/daphne-worker-test 20 | RUN worker-build --dev 21 | 22 | FROM node:bookworm AS leader 23 | RUN npm install -g wrangler@3.60.1 && npm cache clean --force 24 | COPY --from=builder /tmp/dap_test/crates/daphne-worker-test/build/ /build 25 | COPY crates/daphne-worker-test/wrangler.aggregator.toml / 26 | # this container doesn't need worker-build but the wrangler.toml requires it, so we just fake it 27 | RUN ln -s /usr/bin/true /usr/bin/worker-build 28 | 29 | ENTRYPOINT ["wrangler", "dev", "--config", "wrangler.aggregator.toml", "-e", "leader", "--port", "8787"] 30 | 31 | FROM node:bookworm AS helper 32 | RUN npm install -g wrangler@3.60.1 && npm cache clean --force 33 | COPY --from=builder /tmp/dap_test/crates/daphne-worker-test/build/ /build 34 | COPY crates/daphne-worker-test/wrangler.aggregator.toml / 35 | # this container doesn't need worker-build but the wrangler.toml requires it, so we just fake it 36 | RUN ln -s /usr/bin/true /usr/bin/worker-build 37 | 38 | ENTRYPOINT ["wrangler", "dev", "--config", "wrangler.aggregator.toml", "-e", "helper", "--port", "8788", "--var", "COMPUTE_OFFLOAD_HOST:compute-offload:5000"] 39 | -------------------------------------------------------------------------------- /crates/daphne-worker-test/docker/docker-compose-e2e.yaml: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2025 Cloudflare, Inc. All rights reserved. 2 | # SPDX-License-Identifier: BSD-3-Clause 3 | --- 4 | version: "3.9" 5 | 6 | networks: 7 | dap_network: 8 | driver: bridge 9 | 10 | services: 11 | compute-offload: 12 | networks: 13 | - dap_network 14 | ports: 15 | - "4000" 16 | build: 17 | context: ../../.. 18 | dockerfile: crates/daphne-server/docker/example-service.Dockerfile 19 | target: compute-offload 20 | environment: 21 | - RUST_LOG=info 22 | leader: 23 | networks: 24 | - dap_network 25 | ports: 26 | - "8787" 27 | build: 28 | context: ../../.. 29 | dockerfile: crates/daphne-worker-test/docker/aggregator.Dockerfile 30 | target: leader 31 | environment: 32 | - RUST_LOG=info 33 | helper: 34 | networks: 35 | - dap_network 36 | ports: 37 | - "8788" 38 | build: 39 | context: ../../.. 40 | dockerfile: crates/daphne-worker-test/docker/aggregator.Dockerfile 41 | target: helper 42 | environment: 43 | - RUST_LOG=info 44 | test: 45 | networks: 46 | - dap_network 47 | build: 48 | context: ../../.. 49 | dockerfile: crates/daphne-worker-test/docker/runtests.Dockerfile 50 | depends_on: 51 | - leader 52 | - helper 53 | environment: 54 | - "E2E_TEST_HPKE_SIGNING_CERTIFICATE=-----BEGIN CERTIFICATE-----\nMIICCTCCAa+gAwIBAgIUBECNyioI8d+hgXsgmVI+TcRD8wUwCgYIKoZIzj0EAwIw\nWjELMAkGA1UEBhMCUFQxDjAMBgNVBAcMBUJyYWdhMRcwFQYDVQQKDA5DbG91ZGZs\nYXJlIExkYTEiMCAGA1UEAwwZaGVscGVyLmRhcC5jbG91ZGZsYXJlLmNvbTAeFw0y\nNTAxMDYxMTAwNDdaFw0yNjAxMDYxMTAwNDdaMFoxCzAJBgNVBAYTAlBUMQ4wDAYD\nVQQHDAVCcmFnYTEXMBUGA1UECgwOQ2xvdWRmbGFyZSBMZGExIjAgBgNVBAMMGWhl\nbHBlci5kYXAuY2xvdWRmbGFyZS5jb20wWTATBgcqhkjOPQIBBggqhkjOPQMBBwNC\nAASheYdDsJLsG4UG95bs2qlVr1QQcK6+k6emAJSDAlr7bIGjHUoLwUdIQK818g/N\ngVL0vig90b4uGTS7KdKJ9o4Ko1MwUTAdBgNVHQ4EFgQUeOUaahWphjiaQotYoRfb\nVBdby+wwHwYDVR0jBBgwFoAUeOUaahWphjiaQotYoRfbVBdby+wwDwYDVR0TAQH/\nBAUwAwEB/zAKBggqhkjOPQQDAgNIADBFAiEAl0pg+5iQC3yskSbZrz8gyEgAaKx2\niyrASYsFh2gdfkICIAgkOlAOHsUHlhh0zRt9m283dLR0/ZYVoEhII8ZMkb1/\n-----END CERTIFICATE-----" 55 | -------------------------------------------------------------------------------- /crates/daphne-worker-test/docker/runtests.Dockerfile: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2024 Cloudflare, Inc. All rights reserved. 2 | # SPDX-License-Identifier: BSD-3-Clause 3 | 4 | FROM rust:1.84.1-bookworm 5 | 6 | WORKDIR /tmp/dap_test 7 | 8 | RUN apt-get update && \ 9 | apt-get install -y \ 10 | libssl-dev \ 11 | pkg-config \ 12 | capnproto 13 | 14 | COPY Cargo.toml Cargo.lock ./ 15 | COPY crates/daphne-server ./crates/daphne-server 16 | COPY crates/daphne-worker ./crates/daphne-worker 17 | COPY crates/daphne-service-utils ./crates/daphne-service-utils 18 | COPY crates/daphne ./crates/daphne 19 | 20 | ENV PATH="${PATH}:/root/.cargo/bin" 21 | ENV RUST_BACKTRACE=1 22 | CMD ["cargo", "test", \ 23 | "--features=test_e2e", \ 24 | "--", \ 25 | "--nocapture", \ 26 | "--test-threads=1", \ 27 | "e2e"] 28 | -------------------------------------------------------------------------------- /crates/daphne-worker-test/docker/storage-proxy.Dockerfile: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2024 Cloudflare, Inc. All rights reserved. 2 | # SPDX-License-Identifier: BSD-3-Clause 3 | 4 | FROM rust:1.84.1-bookworm AS builder 5 | RUN apt update && apt install -y capnproto clang cmake 6 | 7 | # Pre-install worker-build and Rust's wasm32 target to speed up our custom build command 8 | RUN rustup target add wasm32-unknown-unknown 9 | RUN echo ola 10 | RUN cargo install worker-build@0.1.2 --locked 11 | 12 | # Build the storage proxy. 13 | WORKDIR /tmp/dap_test 14 | COPY Cargo.toml Cargo.lock ./ 15 | COPY crates/daphne-worker-test ./crates/daphne-worker-test 16 | COPY crates/daphne-worker ./crates/daphne-worker 17 | COPY crates/daphne-service-utils ./crates/daphne-service-utils 18 | COPY crates/daphne ./crates/daphne 19 | WORKDIR /tmp/dap_test/crates/daphne-worker-test 20 | RUN worker-build --dev 21 | 22 | FROM node:bookworm AS final 23 | RUN npm install -g wrangler@3.60.1 && npm cache clean --force 24 | COPY --from=builder /tmp/dap_test/crates/daphne-worker-test/build/ /build 25 | COPY crates/daphne-worker-test/wrangler.storage-proxy.toml / 26 | 27 | ENTRYPOINT ["wrangler", "dev", "--config", "wrangler.storage-proxy.toml"] 28 | -------------------------------------------------------------------------------- /crates/daphne-worker-test/src/durable.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2025 Cloudflare, Inc. All rights reserved. 2 | // SPDX-License-Identifier: BSD-3-Clause 3 | 4 | use daphne_worker::durable::{self, instantiate_durable_object}; 5 | 6 | instantiate_durable_object! { 7 | struct AggregateStore < durable::AggregateStore; 8 | 9 | fn init_user_data(_state: State, env: Env) { 10 | daphne_worker::tracing_utils::initialize_tracing(env); 11 | } 12 | } 13 | 14 | instantiate_durable_object! { 15 | struct ReplayChecker < durable::ReplayChecker; 16 | 17 | fn init_user_data(_state: State, env: Env) { 18 | daphne_worker::tracing_utils::initialize_tracing(env); 19 | } 20 | } 21 | 22 | instantiate_durable_object! { 23 | struct AggregationJobStore < durable::AggregationJobStore; 24 | 25 | fn init_user_data(_state: State, env: Env) { 26 | daphne_worker::tracing_utils::initialize_tracing(env); 27 | } 28 | } 29 | 30 | instantiate_durable_object! { 31 | struct AggJobResponseStore < durable::AggregationJobResp; 32 | 33 | fn init_user_data(_state: State, env: Env) { 34 | daphne_worker::tracing_utils::initialize_tracing(env); 35 | } 36 | } 37 | 38 | instantiate_durable_object! { 39 | struct AggregateStoreV2 < durable::AggregateStoreV2; 40 | 41 | fn init_user_data(_state: State, env: Env) { 42 | daphne_worker::tracing_utils::initialize_tracing(env); 43 | } 44 | } 45 | -------------------------------------------------------------------------------- /crates/daphne-worker-test/src/lib.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2025 Cloudflare, Inc. All rights reserved. 2 | // SPDX-License-Identifier: BSD-3-Clause 3 | 4 | use daphne_worker::{aggregator::App, initialize_tracing}; 5 | use futures::stream; 6 | use std::convert::Infallible; 7 | use tracing::info; 8 | use worker::{event, Env, HttpRequest, MessageBatch, ResponseBody}; 9 | 10 | mod durable; 11 | mod utils; 12 | 13 | #[global_allocator] 14 | static CAP: cap::Cap = cap::Cap::new(std::alloc::System, 65_000_000); 15 | 16 | fn load_compute_offload_host(env: &worker::Env) -> String { 17 | env.var("COMPUTE_OFFLOAD_HOST") 18 | .map_or_else(|_| "localhost:5000".into(), |t| t.to_string()) 19 | } 20 | 21 | #[event(fetch, respond_with_errors)] 22 | pub async fn main( 23 | req: HttpRequest, 24 | env: Env, 25 | _ctx: worker::Context, 26 | ) -> worker::Result { 27 | // Optionally, get more helpful error messages written to the console in the case of a panic. 28 | utils::set_panic_hook(); 29 | 30 | // We set up logging as soon as possible so that logging can be estabished and functional 31 | // before we do anything likely to fail. 32 | initialize_tracing(&env); 33 | 34 | info!(method = ?req.method(), "{}", req.uri().path()); 35 | 36 | let registry = prometheus::Registry::new(); 37 | let response = match env 38 | .var("DAP_WORKER_MODE") 39 | .map(|t| t.to_string()) 40 | .ok() 41 | .as_deref() 42 | { 43 | Some("storage-proxy") | None => { 44 | daphne_worker::storage_proxy::handle_request(req, env, ®istry).await 45 | } 46 | Some("aggregator") => { 47 | let host = load_compute_offload_host(&env); 48 | 49 | daphne_worker::aggregator::handle_dap_request( 50 | App::new(env, ®istry, None, Box::new(ComputeOffload { host })).unwrap(), 51 | req, 52 | ) 53 | .await 54 | } 55 | Some(invalid) => { 56 | return Err(worker::Error::RustError(format!( 57 | "{invalid} is not a valid DAP_WORKER_MODE" 58 | ))) 59 | } 60 | }; 61 | 62 | Ok(response) 63 | } 64 | 65 | struct ComputeOffload { 66 | host: String, 67 | } 68 | 69 | #[async_trait::async_trait(?Send)] 70 | impl daphne_worker::aggregator::ComputeOffload for ComputeOffload { 71 | async fn request( 72 | &self, 73 | path: &str, 74 | body: &[u8], 75 | ) -> worker::Result> { 76 | let response = worker::Fetch::Request( 77 | worker::Request::new_with_init( 78 | &format!("http://{}{path}", self.host), 79 | &worker::RequestInit { 80 | body: Some(worker::js_sys::Uint8Array::from(body).into()), 81 | method: worker::Method::Post, 82 | ..Default::default() 83 | }, 84 | ) 85 | .unwrap(), 86 | ) 87 | .send() 88 | .await?; 89 | 90 | Ok(daphne_worker::Response::builder() 91 | .status(response.status_code()) 92 | .body(match response.into_parts().1 { 93 | ResponseBody::Empty => worker::Body::empty(), 94 | ResponseBody::Body(vec) => { 95 | worker::Body::from_stream(stream::iter([Ok::<_, Infallible>(vec)])).unwrap() 96 | } 97 | ResponseBody::Stream(readable_stream) => worker::Body::new(readable_stream), 98 | }) 99 | .unwrap()) 100 | } 101 | } 102 | 103 | #[event(queue)] 104 | pub async fn queue( 105 | batch: MessageBatch<()>, 106 | env: worker::Env, 107 | _ctx: worker::Context, 108 | ) -> worker::Result<()> { 109 | let registry = prometheus::Registry::new(); 110 | let host = load_compute_offload_host(&env); 111 | let app = App::new(env, ®istry, None, Box::new(ComputeOffload { host })).unwrap(); 112 | daphne_worker::aggregator::queues::async_aggregate_batch(app, batch).await; 113 | Ok(()) 114 | } 115 | -------------------------------------------------------------------------------- /crates/daphne-worker-test/src/utils.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2022 Cloudflare, Inc. All rights reserved. 2 | // SPDX-License-Identifier: BSD-3-Clause 3 | 4 | use cfg_if::cfg_if; 5 | 6 | cfg_if! { 7 | // https://github.com/rustwasm/console_error_panic_hook#readme 8 | if #[cfg(feature = "console_error_panic_hook")] { 9 | extern crate console_error_panic_hook; 10 | pub use self::console_error_panic_hook::set_once as set_panic_hook; 11 | } else { 12 | #[inline] 13 | pub fn set_panic_hook() {} 14 | } 15 | } 16 | -------------------------------------------------------------------------------- /crates/daphne-worker-test/wrangler.storage-proxy.toml: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2024 Cloudflare, Inc. All rights reserved. 2 | # SPDX-License-Identifier: BSD-3-Clause 3 | 4 | name = "daphne_storage_proxy" 5 | main = "build/worker/shim.mjs" 6 | compatibility_date = "2023-12-21" 7 | 8 | # Don't ask to send metrics to Cloudflare. The worker may be run from a container. 9 | send_metrics = false 10 | 11 | # Before starting the worker, run `worker-build`. 12 | #[build] 13 | #command = "cargo install --git https://github.com/cloudflare/workers-rs && worker-build --dev" 14 | 15 | [[rules]] 16 | globs = ["**/*.wasm"] 17 | type = "CompiledWasm" 18 | fallthrough = false 19 | 20 | # NOTE: Variables marked as SECRET need to be provisioned securely in 21 | # production. In particular, they will not be passed as environment variables 22 | # as they are here. See 23 | # https://developers.cloudflare.com/workers/wrangler/commands/#secret. 24 | [vars] 25 | DAPHNE_SERVER_AUTH_TOKEN = "this-is-the-storage-proxy-auth-token" # SECRET 26 | DAP_WORKER_MODE = "storage-proxy" 27 | DAP_DEPLOYMENT = "dev" 28 | DAP_DURABLE_HELPER_STATE_STORE_GC_AFTER_SECS = "30" 29 | DAP_DURABLE_AGGREGATE_STORE_GC_AFTER_SECS = "30" 30 | 31 | [dev] 32 | ip = "0.0.0.0" 33 | 34 | [durable_objects] 35 | bindings = [ 36 | { name = "DAP_AGGREGATE_STORE", class_name = "AggregateStore" }, 37 | { name = "DAP_TEST_STATE_CLEANER", class_name = "TestStateCleaner" }, 38 | ] 39 | 40 | 41 | [[kv_namespaces]] 42 | binding = "DAP_CONFIG" 43 | # KV bindings are in a looked up in a namespace identified by a 16-byte id number. 44 | # This number is assigned by calling 45 | # 46 | # wrangler kv:namespace create 47 | # 48 | # for some unique name you specify, and it returns a unique id number to use. 49 | # Here we should use something like "leader" for the . 50 | id = "" 51 | # A "preview id" is an id used when running in "wrangler dev" mode locally, and 52 | # can just be made up. We generated the number below with the following python 53 | # code: 54 | # 55 | # import secrets 56 | # print(secrets.token_hex(16)) 57 | # 58 | preview_id = "24c4dc92d5cf4680e508fe18eb8f0281" 59 | 60 | [[migrations]] 61 | tag = "v1" 62 | new_classes = [ 63 | "AggregateStore", 64 | "GarbageCollector", 65 | "HelperStateStore", 66 | ] 67 | 68 | [[migrations]] 69 | tag = "v2" 70 | renamed_classes = [ 71 | { from = "GarbageCollector", to = "TestStateCleaner" }, 72 | ] 73 | -------------------------------------------------------------------------------- /crates/daphne-worker/Cargo.toml: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2025 Cloudflare, Inc. All rights reserved. 2 | # SPDX-License-Identifier: BSD-3-Clause 3 | 4 | [package] 5 | name = "daphne-worker" 6 | version.workspace = true 7 | authors.workspace = true 8 | edition.workspace = true 9 | license.workspace = true 10 | readme.workspace = true 11 | homepage.workspace = true 12 | repository.workspace = true 13 | description = "Workers backend for Daphne" 14 | 15 | [lib] 16 | crate-type = ["cdylib", "rlib"] 17 | 18 | [dependencies] 19 | async-trait = { workspace = true } 20 | axum-extra = { workspace = true, features = ["typed-header"] } 21 | bytes.workspace = true 22 | capnp = { workspace = true } 23 | chrono = { workspace = true, default-features = false, features = ["clock", "wasmbind"] } 24 | constcat.workspace = true 25 | daphne = { path = "../daphne", features = ["prometheus"] } 26 | either = { workspace = true } 27 | futures = { workspace = true } 28 | # We don't use getrandom directly but this allows us to enable the 'js' feature 29 | # of getrandom in the crates we depend on, that depend on getrandom 30 | getrandom = { workspace = true, features = ["js"] } 31 | headers.workspace = true 32 | hex.workspace = true 33 | http-body-util.workspace = true 34 | http.workspace = true 35 | mappable-rc.workspace = true 36 | p256 = { workspace = true } 37 | prio.workspace = true 38 | prio_draft09.workspace = true 39 | prometheus.workspace = true 40 | rand.workspace = true 41 | reqwest.workspace = true 42 | serde-wasm-bindgen.workspace = true 43 | serde.workspace = true 44 | serde_json.workspace = true 45 | static_assertions.workspace = true 46 | thiserror.workspace = true 47 | tower-service.workspace = true 48 | tower = { workspace = true, features = ["util"] } 49 | tracing-core.workspace = true 50 | tracing-subscriber = { workspace = true, features = ["env-filter", "json"]} 51 | tracing.workspace = true 52 | url.workspace = true 53 | wasm-bindgen.workspace = true 54 | worker = { workspace = true , features = ["http", "queue"] } 55 | 56 | [dependencies.axum] 57 | workspace = true 58 | features = ["query", "json", "http1", "http2"] 59 | 60 | [dependencies.daphne-service-utils] 61 | path = "../daphne-service-utils" 62 | features = ["durable_requests", "compute-offload"] 63 | 64 | [dev-dependencies] 65 | daphne = { path = "../daphne", features = ["test-utils"] } 66 | paste.workspace = true 67 | rcgen.workspace = true 68 | reqwest.workspace = true # used in doc tests 69 | tokio.workspace = true 70 | webpki.workspace = true 71 | 72 | [build-dependencies] 73 | capnpc = { workspace = true } 74 | 75 | [features] 76 | test-utils = ["daphne-service-utils/test-utils"] 77 | 78 | [lints] 79 | workspace = true 80 | 81 | [package.metadata.cargo-machete] 82 | ignored = ["getrandom"] 83 | -------------------------------------------------------------------------------- /crates/daphne-worker/build.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2025 Cloudflare, Inc. All rights reserved. 2 | // SPDX-License-Identifier: BSD-3-Clause 3 | 4 | fn main() { 5 | ::capnpc::CompilerCommand::new() 6 | .import_path("../daphne-service-utils/src") 7 | .file("./src/aggregator/queues/queue_messages.capnp") 8 | .run() 9 | .expect("compiling schema"); 10 | } 11 | -------------------------------------------------------------------------------- /crates/daphne-worker/clippy.toml: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2025 Cloudflare, Inc. All rights reserved. 2 | # SPDX-License-Identifier: BSD-3-Clause 3 | 4 | disallowed-methods = [ 5 | { path = "std::time::Instant::now", reason = "not implemented in wasm. Use worker::Date::now()" }, 6 | { path = "std::time::SystemTime::now", reason = "not implemented in wasm. Use worker::Date::now()" }, 7 | ] 8 | -------------------------------------------------------------------------------- /crates/daphne-worker/src/aggregator/metrics.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2025 Cloudflare, Inc. All rights reserved. 2 | // SPDX-License-Identifier: BSD-3-Clause 3 | 4 | //! Daphne-Worker metrics. 5 | 6 | use daphne::metrics::DaphneMetrics; 7 | 8 | pub trait DaphneServiceMetrics { 9 | fn abort_count_inc(&self, label: &str); 10 | fn count_http_status_code(&self, status_code: u16); 11 | fn daphne(&self) -> &dyn DaphneMetrics; 12 | fn auth_method_inc(&self, method: AuthMethod); 13 | fn aggregate_job_latency(&self, time: std::time::Duration); 14 | } 15 | 16 | #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] 17 | pub enum AuthMethod { 18 | BearerToken, 19 | TlsClientAuth, 20 | } 21 | 22 | mod prometheus { 23 | use super::DaphneServiceMetrics; 24 | use daphne::{ 25 | fatal_error, 26 | metrics::{prometheus::DaphnePromMetrics, DaphneMetrics, ReportStatus}, 27 | DapError, 28 | }; 29 | use prometheus::{register_int_counter_vec_with_registry, IntCounterVec, Registry}; 30 | use std::time::Duration; 31 | 32 | impl DaphneMetrics for DaphnePromServiceMetrics { 33 | fn report_inc_by(&self, status: ReportStatus, val: u64) { 34 | self.daphne.report_inc_by(status, val); 35 | } 36 | 37 | fn inbound_req_inc(&self, request_type: daphne::metrics::DaphneRequestType) { 38 | self.daphne.inbound_req_inc(request_type); 39 | } 40 | 41 | fn agg_job_started_inc(&self) { 42 | self.daphne.agg_job_started_inc(); 43 | } 44 | 45 | fn agg_job_completed_inc(&self) { 46 | self.daphne.agg_job_completed_inc(); 47 | } 48 | 49 | fn agg_job_observe_batch_size(&self, val: usize) { 50 | self.daphne.agg_job_observe_batch_size(val); 51 | } 52 | 53 | fn agg_job_put_span_retry_inc(&self) { 54 | self.daphne.agg_job_put_span_retry_inc(); 55 | } 56 | } 57 | 58 | impl DaphneServiceMetrics for DaphnePromServiceMetrics { 59 | fn abort_count_inc(&self, label: &str) { 60 | self.dap_abort_counter.with_label_values(&[label]).inc(); 61 | } 62 | 63 | fn count_http_status_code(&self, status_code: u16) { 64 | self.http_status_code_counter 65 | .with_label_values(&[&status_code.to_string()]) 66 | .inc(); 67 | } 68 | 69 | fn auth_method_inc(&self, method: super::AuthMethod) { 70 | let method = match method { 71 | super::AuthMethod::TlsClientAuth => "mutual_tls", 72 | super::AuthMethod::BearerToken => "tls_client_auth", 73 | }; 74 | self.auth_method.with_label_values(&[method]).inc(); 75 | } 76 | 77 | fn daphne(&self) -> &dyn DaphneMetrics { 78 | self 79 | } 80 | 81 | fn aggregate_job_latency(&self, _time: Duration) { 82 | // unimplemented by default due to elevated cardinality 83 | } 84 | } 85 | 86 | #[derive(Clone)] 87 | pub struct DaphnePromServiceMetrics { 88 | /// Daphne metrics. 89 | daphne: DaphnePromMetrics, 90 | 91 | /// HTTP response status. 92 | http_status_code_counter: IntCounterVec, 93 | 94 | /// DAP aborts. 95 | dap_abort_counter: IntCounterVec, 96 | 97 | /// Counts the used authentication methods 98 | auth_method: IntCounterVec, 99 | } 100 | 101 | impl DaphnePromServiceMetrics { 102 | pub fn register(registry: &Registry) -> Result { 103 | let http_status_code_counter = register_int_counter_vec_with_registry!( 104 | "http_status_code", 105 | "HTTP response status code.", 106 | &["code"], 107 | registry 108 | ) 109 | .map_err(|e| fatal_error!(err = ?e, "failed to register http_status_code"))?; 110 | 111 | let dap_abort_counter = register_int_counter_vec_with_registry!( 112 | "dap_abort", 113 | "DAP aborts.", 114 | &["reason"], 115 | registry 116 | ) 117 | .map_err(|e| fatal_error!(err = ?e, "failed to register dap_abort"))?; 118 | 119 | let auth_method = register_int_counter_vec_with_registry!( 120 | "auth_method", 121 | "The authentication method used", 122 | &["method"], 123 | registry 124 | ) 125 | .map_err(|e| fatal_error!(err = ?e, "failed to register dap_abort"))?; 126 | 127 | let daphne = DaphnePromMetrics::register(registry)?; 128 | 129 | Ok(Self { 130 | daphne, 131 | http_status_code_counter, 132 | dap_abort_counter, 133 | auth_method, 134 | }) 135 | } 136 | } 137 | } 138 | 139 | pub use prometheus::DaphnePromServiceMetrics; 140 | -------------------------------------------------------------------------------- /crates/daphne-worker/src/aggregator/queues/mod.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2025 Cloudflare, Inc. All rights reserved. 2 | // SPDX-License-Identifier: BSD-3-Clause 3 | 4 | mod async_aggregator; 5 | 6 | pub use async_aggregator::{async_aggregate_batch, AsyncAggregationMessage}; 7 | use daphne_service_utils::capnproto::{CapnprotoPayloadEncode, CapnprotoPayloadEncodeExt as _}; 8 | use std::marker::PhantomData; 9 | use worker::RawMessageBuilder; 10 | 11 | pub struct Queue { 12 | queue: worker::Queue, 13 | _message_type: PhantomData, 14 | } 15 | 16 | impl Queue { 17 | #[tracing::instrument(skip_all, fields(message = std::any::type_name::()))] 18 | pub async fn send(&self, message: &T) -> worker::Result<()> { 19 | tracing::info!("submiting queue message"); 20 | let bytes = worker::js_sys::Uint8Array::from(message.encode_to_bytes().as_slice()); 21 | self.queue 22 | .send_raw( 23 | RawMessageBuilder::new(bytes.into()) 24 | .build_with_content_type(worker::QueueContentType::V8), 25 | ) 26 | .await?; 27 | 28 | Ok(()) 29 | } 30 | } 31 | 32 | impl From for Queue { 33 | fn from(queue: worker::Queue) -> Self { 34 | Self { 35 | queue, 36 | _message_type: PhantomData, 37 | } 38 | } 39 | } 40 | -------------------------------------------------------------------------------- /crates/daphne-worker/src/aggregator/queues/queue_messages.capnp: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2025 Cloudflare, Inc. All rights reserved. 2 | # SPDX-License-Identifier: BSD-3-Clause 3 | 4 | @0x8240fbeac47031a3; 5 | 6 | using Base = import "/capnproto/base.capnp"; 7 | using ComputeOffload = import "/compute_offload/compute_offload.capnp"; 8 | 9 | struct Option(T) { 10 | union { 11 | none @0 :Void; 12 | some @1 :T; 13 | } 14 | } 15 | 16 | struct AsyncAggregationMessage @0xbe3d785aff491226 { 17 | version @0 :Base.DapVersion; 18 | reports @1 :List(Base.ReportId); 19 | aggregationJobId @2 :Base.AggregationJobId; 20 | partialBatchSelector @3 :Base.PartialBatchSelector; 21 | initializeReports @4 :ComputeOffload.InitializeReports; 22 | taskprovAdvertisement @5 :Option(Text); 23 | } 24 | -------------------------------------------------------------------------------- /crates/daphne-worker/src/aggregator/roles/helper.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2025 Cloudflare, Inc. All rights reserved. 2 | // SPDX-License-Identifier: BSD-3-Clause 3 | 4 | use crate::aggregator::App; 5 | use daphne::{ 6 | error::DapAbort, 7 | fatal_error, 8 | messages::{AggregationJobId, AggregationJobResp, TaskId}, 9 | protocol::ReadyAggregationJobResp, 10 | roles::{helper::AggregationJobRequestHash, DapHelper}, 11 | DapError, DapVersion, 12 | }; 13 | use daphne_service_utils::durable_requests::bindings::{ 14 | agg_job_response_store, aggregation_job_store, 15 | }; 16 | use std::borrow::Cow; 17 | 18 | #[axum::async_trait] 19 | impl DapHelper for App { 20 | async fn assert_agg_job_is_legal( 21 | &self, 22 | id: AggregationJobId, 23 | version: DapVersion, 24 | task_id: &TaskId, 25 | req_hash: &AggregationJobRequestHash, 26 | ) -> Result<(), DapError> { 27 | let response = self 28 | .durable() 29 | .with_retry() 30 | .request(aggregation_job_store::Command::NewJob, (version, task_id)) 31 | .encode(&aggregation_job_store::NewJobRequest { 32 | id, 33 | agg_job_hash: Cow::Borrowed(req_hash.get()), 34 | }) 35 | .send::() 36 | .await 37 | .map_err(|e| fatal_error!(err = ?e, "failed to store aggregation job hash"))?; 38 | 39 | match response { 40 | aggregation_job_store::NewJobResponse::Ok => Ok(()), 41 | aggregation_job_store::NewJobResponse::IllegalJobParameters => Err( 42 | DapAbort::BadRequest("aggregation job replay changes parameters".to_string()) 43 | .into(), 44 | ), 45 | } 46 | } 47 | 48 | async fn poll_aggregated( 49 | &self, 50 | version: DapVersion, 51 | task_id: &TaskId, 52 | agg_job_id: &AggregationJobId, 53 | ) -> Result { 54 | let valid_agg_job_id = self 55 | .durable() 56 | .with_retry() 57 | .request( 58 | aggregation_job_store::Command::ContainsJob, 59 | (version, task_id), 60 | ) 61 | .encode(agg_job_id) 62 | .send::() 63 | .await 64 | .map_err(|e| fatal_error!(err = ?e, "failed to query the validity of the aggregation job id"))?; 65 | 66 | if !valid_agg_job_id { 67 | return Err(DapError::Abort(DapAbort::UnrecognizedAggregationJob { 68 | task_id: *task_id, 69 | agg_job_id: *agg_job_id, 70 | })); 71 | } 72 | 73 | let response = self 74 | .durable() 75 | .with_retry() 76 | .request( 77 | agg_job_response_store::Command::Get, 78 | (version, task_id, agg_job_id), 79 | ) 80 | .send::>() 81 | .await 82 | .map_err(|e| fatal_error!(err = ?e, "failed to poll for aggregation job response"))?; 83 | 84 | match response { 85 | Some(ready) => Ok(ready.into()), 86 | None => Ok(AggregationJobResp::Processing), 87 | } 88 | } 89 | } 90 | -------------------------------------------------------------------------------- /crates/daphne-worker/src/aggregator/router/leader.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2025 Cloudflare, Inc. All rights reserved. 2 | // SPDX-License-Identifier: BSD-3-Clause 3 | 4 | use std::sync::Arc; 5 | 6 | use axum::{ 7 | extract::{Path, Request, State}, 8 | http::StatusCode, 9 | middleware::{from_fn, Next}, 10 | response::{IntoResponse, Response}, 11 | routing::{get, post, put}, 12 | }; 13 | use daphne::{ 14 | constants::DapMediaType, 15 | error::DapAbort, 16 | messages::{self, request::CollectionPollReq}, 17 | roles::leader::{self, DapLeader}, 18 | DapError, DapVersion, 19 | }; 20 | use prio::codec::ParameterizedEncode; 21 | 22 | use super::{ 23 | extractor::dap_sender::FROM_COLLECTOR, AxumDapResponse, DapRequestExtractor, DaphneService, 24 | UnauthenticatedDapRequestExtractor, 25 | }; 26 | use futures::{future::BoxFuture, FutureExt}; 27 | use serde::Deserialize; 28 | 29 | #[derive(Deserialize)] 30 | struct PathVersion { 31 | #[serde(rename = "version")] 32 | presented_version: DapVersion, 33 | } 34 | 35 | fn require_version( 36 | expected_version: DapVersion, 37 | ) -> impl Copy + Fn(Path, Request, Next) -> BoxFuture<'static, Response> { 38 | move |Path(PathVersion { presented_version }), req, next| { 39 | async move { 40 | if presented_version != expected_version { 41 | return StatusCode::METHOD_NOT_ALLOWED.into_response(); 42 | } 43 | next.run(req).await 44 | } 45 | .boxed() 46 | } 47 | } 48 | 49 | pub(super) fn add_leader_routes(router: super::Router) -> super::Router 50 | where 51 | A: DapLeader + DaphneService + Send + Sync + 'static, 52 | { 53 | router 54 | .route( 55 | "/:version/tasks/:task_id/reports", 56 | put(upload).layer(from_fn(require_version(DapVersion::Draft09))), 57 | ) 58 | .route( 59 | "/:version/tasks/:task_id/reports", 60 | post(upload).layer(from_fn(require_version(DapVersion::Latest))), 61 | ) 62 | .route( 63 | "/:version/tasks/:task_id/collection_jobs/:collect_job_id", 64 | put(start_collection_job), 65 | ) 66 | .route( 67 | "/:version/tasks/:task_id/collection_jobs/:collect_job_id", 68 | post(poll_collect).layer(from_fn(require_version(DapVersion::Draft09))), 69 | ) 70 | .route( 71 | "/:version/tasks/:task_id/collection_jobs/:collect_job_id", 72 | get(poll_collect).layer(from_fn(require_version(DapVersion::Latest))), 73 | ) 74 | } 75 | 76 | #[tracing::instrument( 77 | skip_all, 78 | fields( 79 | task_id = ?req.task_id, 80 | version = ?req.version, 81 | ) 82 | )] 83 | async fn upload( 84 | State(app): State>, 85 | UnauthenticatedDapRequestExtractor(req): UnauthenticatedDapRequestExtractor, 86 | ) -> Response 87 | where 88 | A: DapLeader + DaphneService + Send + Sync, 89 | { 90 | match leader::handle_upload_req(&*app, req).await { 91 | Ok(()) => StatusCode::OK.into_response(), 92 | Err(e) => AxumDapResponse::new_error(e, app.server_metrics()).into_response(), 93 | } 94 | } 95 | 96 | #[tracing::instrument( 97 | skip_all, 98 | fields( 99 | task_id = ?req.task_id, 100 | version = ?req.version, 101 | ) 102 | )] 103 | async fn start_collection_job( 104 | State(app): State>, 105 | DapRequestExtractor(req): DapRequestExtractor, 106 | ) -> Response 107 | where 108 | A: DapLeader + DaphneService + Send + Sync, 109 | { 110 | match leader::handle_coll_job_req(&*app, &req).await { 111 | Ok(()) => StatusCode::CREATED.into_response(), 112 | Err(e) => AxumDapResponse::new_error(e, app.server_metrics()).into_response(), 113 | } 114 | } 115 | 116 | #[tracing::instrument( 117 | skip_all, 118 | fields( 119 | task_id = ?req.task_id, 120 | version = ?req.version, 121 | ) 122 | )] 123 | async fn poll_collect( 124 | State(app): State>, 125 | DapRequestExtractor(req): DapRequestExtractor, 126 | ) -> Response 127 | where 128 | A: DapLeader + DaphneService + Send + Sync, 129 | { 130 | match app.poll_collect_job(&req.task_id, &req.resource_id).await { 131 | Ok(daphne::DapCollectionJob::Done(collect_resp)) => AxumDapResponse::new_success( 132 | daphne::DapResponse { 133 | version: req.version, 134 | media_type: DapMediaType::Collection, 135 | payload: match collect_resp.get_encoded_with_param(&req.version) { 136 | Ok(payload) => payload, 137 | Err(e) => { 138 | return AxumDapResponse::new_error( 139 | DapError::encoding(e), 140 | app.server_metrics(), 141 | ) 142 | .into_response() 143 | } 144 | }, 145 | }, 146 | app.server_metrics(), 147 | ) 148 | .into_response(), 149 | Ok(daphne::DapCollectionJob::Pending) => StatusCode::ACCEPTED.into_response(), 150 | Ok(daphne::DapCollectionJob::Unknown) => AxumDapResponse::new_error( 151 | DapAbort::BadRequest("unknown collection job id".into()), 152 | app.server_metrics(), 153 | ) 154 | .into_response(), 155 | Err(e) => AxumDapResponse::new_error(e, app.server_metrics()).into_response(), 156 | } 157 | } 158 | -------------------------------------------------------------------------------- /crates/daphne-worker/src/durable/agg_job_response_store.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2025 Cloudflare, Inc. All rights reserved. 2 | // SPDX-License-Identifier: BSD-3-Clause 3 | 4 | //! Durable Object for storing the result of an aggregation job. 5 | 6 | use super::{req_parse, GcDurableObject}; 7 | use crate::int_err; 8 | use daphne::protocol::ReadyAggregationJobResp; 9 | use daphne_service_utils::durable_requests::bindings::{ 10 | self, agg_job_response_store, DurableMethod as _, 11 | }; 12 | use std::{sync::OnceLock, time::Duration}; 13 | use worker::{js_sys, Env, Request, Response, Result, ScheduledTime, State}; 14 | 15 | const AGGREGATE_RESPONSE_CHUNK_KEY_PREFIX: &str = "dap_agg_response_chunk"; 16 | 17 | super::mk_durable_object! { 18 | struct AggregationJobResp { 19 | state: State, 20 | env: Env, 21 | agg_job_resp: Option, 22 | } 23 | } 24 | 25 | impl AggregationJobResp { 26 | async fn get_agg_job_response(&mut self) -> Result> { 27 | let agg_job_resp = if let Some(agg_job_resp) = self.agg_job_resp.take() { 28 | agg_job_resp 29 | } else { 30 | let Some(agg_job_resp) = self 31 | .load_chuncked_value(AGGREGATE_RESPONSE_CHUNK_KEY_PREFIX) 32 | .await? 33 | else { 34 | return Ok(None); 35 | }; 36 | agg_job_resp 37 | }; 38 | 39 | self.agg_job_resp = Some(agg_job_resp); 40 | 41 | Ok(self.agg_job_resp.as_ref()) 42 | } 43 | 44 | fn put_agg_job_response(&mut self, resp: ReadyAggregationJobResp) -> Result { 45 | let obj = self.serialize_chunked_value(AGGREGATE_RESPONSE_CHUNK_KEY_PREFIX, &resp, None)?; 46 | self.agg_job_resp = Some(resp); 47 | Ok(obj) 48 | } 49 | } 50 | 51 | impl GcDurableObject for AggregationJobResp { 52 | type DurableMethod = bindings::AggregateStore; 53 | 54 | fn with_state_and_env(state: State, env: Env) -> Self { 55 | Self { 56 | state, 57 | env, 58 | agg_job_resp: None, 59 | } 60 | } 61 | 62 | async fn handle(&mut self, mut req: Request) -> Result { 63 | match agg_job_response_store::Command::try_from_uri(&req.path()) { 64 | // Store an aggregate share and aggregation job response. 65 | // 66 | // Idempotent 67 | // Input: `agg_share_dellta: agg_job_result_store::FinishRequest` 68 | // Output: `agg_job_result_store::FinishResponse` 69 | Some(agg_job_response_store::Command::Put) => { 70 | let response = req_parse::(&mut req).await?; 71 | 72 | self.state 73 | .storage() 74 | .put_multiple_raw(self.put_agg_job_response(response)?) 75 | .await?; 76 | 77 | Response::from_json(&()) 78 | } 79 | 80 | // Get the AggregationJobResp 81 | // 82 | // Idempotent 83 | // Output: `Option` 84 | Some(agg_job_response_store::Command::Get) => { 85 | let response = self.get_agg_job_response().await?; 86 | Response::from_json(&response) 87 | } 88 | 89 | None => Err(int_err(format!( 90 | "AggregatesStore: unexpected request: method={:?}; path={:?}", 91 | req.method(), 92 | req.path() 93 | ))), 94 | } 95 | } 96 | 97 | fn should_cleanup_at(&self) -> Option { 98 | const VAR_NAME: &str = "DAP_DURABLE_AGGREGATE_STORE_GC_AFTER_SECS"; 99 | static SELF_DELETE_AFTER: OnceLock = OnceLock::new(); 100 | 101 | let duration = SELF_DELETE_AFTER.get_or_init(|| { 102 | Duration::from_secs( 103 | self.env 104 | .var(VAR_NAME) 105 | .map(|v| { 106 | v.to_string().parse().unwrap_or_else(|e| { 107 | panic!("{VAR_NAME} could not be parsed as a number of seconds: {e}") 108 | }) 109 | }) 110 | .unwrap_or(60 * 60 * 24 * 7), // one week 111 | ) 112 | }); 113 | 114 | Some(ScheduledTime::from(*duration)) 115 | } 116 | } 117 | -------------------------------------------------------------------------------- /crates/daphne-worker/src/durable/aggregation_job_store.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2025 Cloudflare, Inc. All rights reserved. 2 | // SPDX-License-Identifier: BSD-3-Clause 3 | 4 | use super::{req_parse, GcDurableObject}; 5 | use crate::int_err; 6 | use daphne::messages::AggregationJobId; 7 | use daphne_service_utils::durable_requests::bindings::{ 8 | aggregation_job_store::{self, NewJobResponse}, 9 | DurableMethod, 10 | }; 11 | use std::{collections::HashSet, sync::OnceLock, time::Duration}; 12 | use worker::{js_sys::Uint8Array, Request, Response}; 13 | 14 | super::mk_durable_object! { 15 | struct AggregationJobStore { 16 | state: State, 17 | env: Env, 18 | seen_agg_job_ids: Option>, 19 | } 20 | } 21 | 22 | const SEEN_AGG_JOB_IDS_KEY: &str = "agg-job-ids"; 23 | 24 | impl GcDurableObject for AggregationJobStore { 25 | type DurableMethod = aggregation_job_store::Command; 26 | 27 | fn with_state_and_env(state: worker::State, env: worker::Env) -> Self { 28 | Self { 29 | state, 30 | env, 31 | seen_agg_job_ids: None, 32 | } 33 | } 34 | 35 | async fn handle(&mut self, mut req: Request) -> worker::Result { 36 | match Self::DurableMethod::try_from_uri(&req.path()) { 37 | Some(aggregation_job_store::Command::NewJob) => { 38 | let aggregation_job_store::NewJobRequest { id, agg_job_hash } = 39 | req_parse(&mut req).await?; 40 | 41 | let key = &id.to_string(); 42 | let response = match self.get::>(key).await? { 43 | Some(hash) if hash == *agg_job_hash => NewJobResponse::Ok, 44 | Some(_) => NewJobResponse::IllegalJobParameters, 45 | None => { 46 | self.state 47 | .storage() 48 | .put_raw(key, Uint8Array::from(agg_job_hash.as_ref())) 49 | .await?; 50 | let seen_agg_job_ids = self.load_seen_agg_job_ids().await?; 51 | seen_agg_job_ids.insert(id); 52 | self.store_seen_agg_job_ids().await?; 53 | NewJobResponse::Ok 54 | } 55 | }; 56 | 57 | Response::from_json(&response) 58 | } 59 | Some(aggregation_job_store::Command::ContainsJob) => { 60 | let agg_job_id = req_parse::(&mut req).await?; 61 | let has = self.has(&agg_job_id.to_string()).await?; 62 | Response::from_json(&has) 63 | } 64 | None => Err(int_err(format!( 65 | "AggregationJobStore: unexpected request: method={:?}; path={:?}", 66 | req.method(), 67 | req.path() 68 | ))), 69 | } 70 | } 71 | 72 | fn should_cleanup_at(&self) -> Option { 73 | const VAR_NAME: &str = "DO_AGGREGATION_JOB_STORE_GC_AFTER_SECS"; 74 | static SELF_DELETE_AFTER: OnceLock = OnceLock::new(); 75 | 76 | let duration = SELF_DELETE_AFTER.get_or_init(|| { 77 | Duration::from_secs( 78 | self.env 79 | .var(VAR_NAME) 80 | .map(|v| { 81 | v.to_string().parse().unwrap_or_else(|e| { 82 | panic!("{VAR_NAME} could not be parsed as a number of seconds: {e}") 83 | }) 84 | }) 85 | .unwrap_or(60 * 60 * 24 * 7), // one week 86 | ) 87 | }); 88 | 89 | Some(worker::ScheduledTime::from(*duration)) 90 | } 91 | } 92 | 93 | impl AggregationJobStore { 94 | async fn load_seen_agg_job_ids(&mut self) -> worker::Result<&mut HashSet> { 95 | let seen_agg_job_ids = if let Some(seen_agg_job_ids) = self.seen_agg_job_ids.take() { 96 | seen_agg_job_ids 97 | } else { 98 | self.get_or_default(SEEN_AGG_JOB_IDS_KEY).await? 99 | }; 100 | 101 | self.seen_agg_job_ids = Some(seen_agg_job_ids); 102 | 103 | Ok(self.seen_agg_job_ids.as_mut().unwrap()) 104 | } 105 | 106 | async fn store_seen_agg_job_ids(&mut self) -> worker::Result<()> { 107 | self.put( 108 | SEEN_AGG_JOB_IDS_KEY, 109 | self.seen_agg_job_ids.as_ref().unwrap(), 110 | ) 111 | .await?; 112 | Ok(()) 113 | } 114 | } 115 | -------------------------------------------------------------------------------- /crates/daphne-worker/src/lib.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2025 Cloudflare, Inc. All rights reserved. 2 | // SPDX-License-Identifier: BSD-3-Clause 3 | 4 | #![allow(clippy::semicolon_if_nothing_returned)] 5 | 6 | //! Workers backend for `daphne-server`. 7 | 8 | pub mod aggregator; 9 | pub mod durable; 10 | pub mod storage; 11 | pub mod storage_proxy; 12 | pub mod tracing_utils; 13 | 14 | use tracing::error; 15 | use worker::Error; 16 | 17 | pub use crate::tracing_utils::initialize_tracing; 18 | pub use axum::{ 19 | body::Body, 20 | response::{IntoResponse, Response}, 21 | }; 22 | pub use daphne::DapRequest; 23 | use std::time::Duration; 24 | 25 | pub(crate) fn int_err(s: S) -> Error { 26 | error!("internal error: {}", s.to_string()); 27 | Error::RustError("internalError".to_string()) 28 | } 29 | 30 | pub(crate) fn elapsed(date: &worker::Date) -> Duration { 31 | Duration::from_millis(worker::Date::now().as_millis() - date.as_millis()) 32 | } 33 | 34 | pub(crate) use daphne_service_utils::base_capnp; 35 | pub(crate) use daphne_service_utils::compute_offload_capnp; 36 | 37 | mod queue_messages_capnp { 38 | #![allow(dead_code)] 39 | #![allow(clippy::pedantic)] 40 | #![allow(clippy::needless_lifetimes)] 41 | #![allow(clippy::extra_unused_type_parameters)] 42 | include!(concat!( 43 | env!("OUT_DIR"), 44 | "/src/aggregator/queues/queue_messages_capnp.rs" 45 | )); 46 | } 47 | -------------------------------------------------------------------------------- /crates/daphne-worker/src/storage/kv/cache.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2025 Cloudflare, Inc. All rights reserved. 2 | // SPDX-License-Identifier: BSD-3-Clause 3 | 4 | use std::{any::Any, collections::HashMap, time::Duration}; 5 | 6 | use mappable_rc::Marc; 7 | 8 | use super::KvPrefix; 9 | use crate::elapsed; 10 | use worker::send::SendWrapper; 11 | 12 | const CACHE_VALUE_LIFETIME: Duration = Duration::from_secs(60 * 5); 13 | 14 | struct CacheLine { 15 | /// Time at which the cache item was set. 16 | ts: SendWrapper, 17 | 18 | /// Either the value or an indication that no value was found. 19 | entry: Option>, 20 | } 21 | 22 | #[derive(Default)] 23 | pub struct Cache { 24 | /// This map follows the same structure of KV queries. 25 | /// The first key (&'static str) is a [`KvPrefix::PREFIX`] 26 | /// The second key (String) is the key that is associated with this value 27 | kv: HashMap<&'static str, HashMap>, 28 | } 29 | 30 | pub enum CacheResult { 31 | /// Cache hit. 32 | /// 33 | /// `None` indicates that the value is known to not exist. 34 | Hit(Option>), 35 | /// Cache Miss. It was never cached or it has expired. 36 | Miss, 37 | /// There is a value associated with this key, but it's type is not [`T`]. 38 | MismatchedType, 39 | } 40 | 41 | impl Cache { 42 | pub fn get

(&self, key: &str) -> CacheResult 43 | where 44 | P: KvPrefix, 45 | { 46 | match self.kv.get(P::PREFIX) { 47 | Some(cache) => match cache.get(key) { 48 | // Cache hit 49 | Some(CacheLine { ts, entry }) if elapsed(ts) < CACHE_VALUE_LIFETIME => entry 50 | .as_ref() 51 | .map(|entry| Marc::try_map(entry.clone(), |v| v.downcast_ref::())) 52 | .transpose() // bring out the try_map error 53 | .map_or(CacheResult::MismatchedType, CacheResult::Hit), 54 | 55 | // Cache miss or the cached value is stale. 56 | Some(_) | None => CacheResult::Miss, 57 | }, 58 | 59 | // Cache miss 60 | None => CacheResult::Miss, 61 | } 62 | } 63 | 64 | pub(super) fn put

(&mut self, key: String, entry: Option>) 65 | where 66 | P: KvPrefix, 67 | { 68 | self.kv.entry(P::PREFIX).or_default().insert( 69 | key, 70 | CacheLine { 71 | ts: SendWrapper(worker::Date::now()), 72 | entry: entry.map(|value| Marc::map(value, |v| v as &(dyn Any + Send + Sync))), 73 | }, 74 | ); 75 | } 76 | 77 | #[allow(dead_code)] 78 | pub fn delete

(&mut self, key: &str) -> CacheResult 79 | where 80 | P: KvPrefix, 81 | { 82 | match self.kv.get_mut(P::PREFIX) { 83 | Some(cache) => match cache.remove(key) { 84 | // Cache hit 85 | Some(CacheLine { ts: _, entry }) => entry 86 | .map(|entry| Marc::try_map(entry, |v| v.downcast_ref::())) 87 | .transpose() // bring out the try_map error 88 | .map_or(CacheResult::MismatchedType, CacheResult::Hit), 89 | 90 | None => CacheResult::Miss, 91 | }, 92 | 93 | // Cache miss 94 | None => CacheResult::Miss, 95 | } 96 | } 97 | } 98 | -------------------------------------------------------------------------------- /crates/daphne-worker/src/storage/mod.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2025 Cloudflare, Inc. All rights reserved. 2 | // SPDX-License-Identifier: BSD-3-Clause 3 | 4 | pub(crate) mod kv; 5 | 6 | use crate::storage_proxy; 7 | use axum::http::StatusCode; 8 | use daphne_service_utils::{ 9 | capnproto::{CapnprotoPayloadEncode, CapnprotoPayloadEncodeExt}, 10 | durable_requests::{bindings::DurableMethod, DurableRequest, ObjectIdFrom}, 11 | }; 12 | pub(crate) use kv::Kv; 13 | use serde::de::DeserializeOwned; 14 | use std::fmt::Debug; 15 | use worker::Env; 16 | 17 | #[derive(Debug, thiserror::Error)] 18 | pub(crate) enum Error { 19 | #[error("serialization error: {0}")] 20 | Serde(#[from] serde_json::Error), 21 | #[error("worker error: {0}")] 22 | Worker(#[from] worker::Error), 23 | #[error("http error. request returned status code {status} with the body {body}")] 24 | Http { status: StatusCode, body: String }, 25 | } 26 | 27 | #[derive(Clone, Copy)] 28 | pub(crate) struct Do<'h> { 29 | env: &'h Env, 30 | retry: bool, 31 | } 32 | 33 | impl<'h> Do<'h> { 34 | pub fn new(env: &'h Env) -> Self { 35 | Self { env, retry: false } 36 | } 37 | 38 | pub fn with_retry(self) -> Self { 39 | Self { 40 | retry: true, 41 | ..self 42 | } 43 | } 44 | } 45 | 46 | pub struct RequestBuilder<'d, B: DurableMethod, P: AsRef<[u8]>> { 47 | durable: &'d Do<'d>, 48 | path: B, 49 | request: DurableRequest

, 50 | } 51 | 52 | impl> RequestBuilder<'_, B, P> { 53 | #[tracing::instrument(skip_all, fields(path = ?self.path))] 54 | pub async fn send(self) -> Result 55 | where 56 | R: DeserializeOwned, 57 | { 58 | let resp = storage_proxy::handle_do_request( 59 | self.durable.env, 60 | Default::default(), 61 | self.path.to_uri(), 62 | self.request, 63 | |_, _, _| {}, // retry metric 64 | ) 65 | .await?; 66 | 67 | use http_body_util::BodyExt; 68 | let (resp, body) = resp.into_parts(); 69 | let body = body.collect().await?.to_bytes(); 70 | if resp.status.is_success() { 71 | Ok(serde_json::from_slice(&body)?) 72 | } else { 73 | Err(Error::Http { 74 | status: resp.status, 75 | body: String::from_utf8_lossy(&body).into_owned(), 76 | }) 77 | } 78 | } 79 | } 80 | 81 | impl<'d, B: DurableMethod> RequestBuilder<'d, B, [u8; 0]> { 82 | pub fn encode(self, payload: &T) -> RequestBuilder<'d, B, Vec> { 83 | self.with_body(payload.encode_to_bytes()) 84 | } 85 | 86 | pub fn with_body>(self, payload: T) -> RequestBuilder<'d, B, T> { 87 | RequestBuilder { 88 | durable: self.durable, 89 | path: self.path, 90 | request: self.request.with_body(payload), 91 | } 92 | } 93 | } 94 | 95 | impl Do<'_> { 96 | pub fn request( 97 | &self, 98 | path: B, 99 | params: B::NameParameters<'_>, 100 | ) -> RequestBuilder<'_, B, [u8; 0]> { 101 | let (request, _) = DurableRequest::new(path, params); 102 | RequestBuilder { 103 | durable: self, 104 | path, 105 | request: if self.retry { 106 | request.with_retry() 107 | } else { 108 | request 109 | }, 110 | } 111 | } 112 | 113 | #[allow(dead_code)] 114 | pub fn request_with_id( 115 | &self, 116 | path: B, 117 | object_id: ObjectIdFrom, 118 | ) -> RequestBuilder<'_, B, [u8; 0]> { 119 | let (request, _) = DurableRequest::new_with_id(path, object_id); 120 | RequestBuilder { 121 | durable: self, 122 | path, 123 | request: if self.retry { 124 | request.with_retry() 125 | } else { 126 | request 127 | }, 128 | } 129 | } 130 | } 131 | -------------------------------------------------------------------------------- /crates/daphne-worker/src/storage_proxy/metrics.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2024 Cloudflare, Inc. All rights reserved. 2 | // SPDX-License-Identifier: BSD-3-Clause 3 | 4 | use prometheus::{ 5 | register_histogram_vec_with_registry, register_int_counter_vec_with_registry, HistogramVec, 6 | IntCounterVec, Registry, 7 | }; 8 | use std::time::Duration; 9 | 10 | pub struct Metrics { 11 | /// Number of retries done in durable object requests before returning (whether by success 12 | /// or failure). 13 | durable_request_retry_count: IntCounterVec, 14 | 15 | durable_request_time_seconds: HistogramVec, 16 | 17 | kv_request_time_seconds: HistogramVec, 18 | } 19 | 20 | impl Metrics { 21 | pub fn new(registry: &Registry) -> Self { 22 | let durable_request_retry_count = register_int_counter_vec_with_registry!( 23 | "durable_request_retry_count", 24 | "The number of times a request to a durable object was retried. count = -1 means the number of retries was exhausted", 25 | &["count", "object", "path"], 26 | registry, 27 | ).unwrap(); 28 | 29 | let durable_request_time_seconds = register_histogram_vec_with_registry!( 30 | "durable_request_time_seconds", 31 | "Histogram of durable object request timings", 32 | &["uri", "outcome"], 33 | registry, 34 | ) 35 | .unwrap(); 36 | 37 | let kv_request_time_seconds = register_histogram_vec_with_registry!( 38 | "kv_request_time_seconds", 39 | "Histogram of KV request timings", 40 | &["op", "status"], 41 | registry, 42 | ) 43 | .unwrap(); 44 | 45 | Self { 46 | durable_request_retry_count, 47 | durable_request_time_seconds, 48 | kv_request_time_seconds, 49 | } 50 | } 51 | 52 | pub fn durable_request_retry_count_inc(&self, number_of_retries: i8, object: &str, path: &str) { 53 | self.durable_request_retry_count 54 | .with_label_values(&[&number_of_retries.to_string(), object, path]) 55 | .inc(); 56 | } 57 | 58 | pub fn durable_request_time_seconds_observe(&self, uri: &str, status: &str, time: Duration) { 59 | self.durable_request_time_seconds 60 | .with_label_values(&[uri, status]) 61 | .observe(time.as_secs_f64()); 62 | } 63 | 64 | pub fn kv_request_time_seconds_observe(&self, op: &str, status: &str, time: Duration) { 65 | self.kv_request_time_seconds 66 | .with_label_values(&[op, status]) 67 | .observe(time.as_secs_f64()); 68 | } 69 | } 70 | -------------------------------------------------------------------------------- /crates/daphne-worker/src/storage_proxy/middleware.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2025 Cloudflare, Inc. All rights reserved. 2 | // SPDX-License-Identifier: BSD-3-Clause 3 | 4 | use std::sync::{Arc, OnceLock}; 5 | 6 | use axum::{ 7 | extract::{Path, State}, 8 | middleware::Next, 9 | response::IntoResponse, 10 | }; 11 | use axum_extra::{ 12 | headers::{authorization::Bearer, Authorization}, 13 | TypedHeader, 14 | }; 15 | use daphne::messages::constant_time_eq; 16 | use http::{Method, StatusCode}; 17 | use tower_service::Service; 18 | 19 | use super::RequestContext; 20 | use crate::elapsed; 21 | 22 | /// Performs bearer token auth of a request. 23 | pub async fn bearer_auth( 24 | ctx: State>, 25 | bearer: TypedHeader>, 26 | request: axum::extract::Request, 27 | mut next: Next, 28 | ) -> axum::response::Response { 29 | static TRUSTED_TOKEN: OnceLock> = OnceLock::new(); 30 | 31 | let Some(trusted_token) = TRUSTED_TOKEN.get_or_init(|| { 32 | ctx.env 33 | .var("DAPHNE_SERVER_AUTH_TOKEN") 34 | .ok() 35 | .map(|t| t.to_string()) 36 | }) else { 37 | tracing::warn!("trusted bearer token not configured"); 38 | return ( 39 | StatusCode::INTERNAL_SERVER_ERROR, 40 | "Authorization token for storage proxy is not configured", 41 | ) 42 | .into_response(); 43 | }; 44 | 45 | if !constant_time_eq(bearer.token().as_bytes(), trusted_token.as_bytes()) { 46 | return (StatusCode::UNAUTHORIZED, "Incorrect authorization token").into_response(); 47 | } 48 | 49 | match next.call(request.map(axum::body::Body::new)).await { 50 | Ok(r) => r, 51 | Err(infalible) => match infalible {}, 52 | } 53 | } 54 | 55 | #[worker::send] 56 | pub async fn time_kv_requests( 57 | ctx: State>, 58 | method: Method, 59 | request: axum::extract::Request, 60 | mut next: Next, 61 | ) -> axum::response::Response { 62 | let start = worker::Date::now(); 63 | let response = match next.call(request).await { 64 | Ok(r) => r, 65 | Err(infalible) => match infalible {}, 66 | }; 67 | let elapsed = elapsed(&start); 68 | 69 | let op = match method { 70 | Method::GET => "kv_get", 71 | Method::POST => "kv_put", 72 | Method::PUT => "kv_put_if_not_exists", 73 | Method::DELETE => "kv_delete", 74 | method => { 75 | tracing::warn!(?method, status = ?response.status(), "unexpected method in kv request"); 76 | "unknown" 77 | } 78 | }; 79 | let status = if response.status().is_success() { 80 | "success" 81 | } else { 82 | "error" 83 | }; 84 | ctx.metrics 85 | .kv_request_time_seconds_observe(op, status, elapsed); 86 | 87 | response 88 | } 89 | 90 | #[worker::send] 91 | pub async fn time_do_requests( 92 | ctx: State>, 93 | Path(uri): Path, 94 | request: axum::extract::Request, 95 | mut next: Next, 96 | ) -> axum::response::Response { 97 | let start = worker::Date::now(); 98 | let response = match next.call(request).await { 99 | Ok(r) => r, 100 | Err(infalible) => match infalible {}, 101 | }; 102 | let elapsed = elapsed(&start); 103 | ctx.metrics.durable_request_time_seconds_observe( 104 | &uri, 105 | if response.status().is_success() { 106 | "success" 107 | } else { 108 | "error" 109 | }, 110 | elapsed, 111 | ); 112 | response 113 | } 114 | -------------------------------------------------------------------------------- /crates/daphne-worker/src/tracing_utils/fields_recording_layer.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2023 Cloudflare, Inc. All rights reserved. 2 | // SPDX-License-Identifier: BSD-3-Clause 3 | 4 | use std::collections::HashMap; 5 | 6 | use tracing::{span::Attributes, Id, Subscriber}; 7 | use tracing_subscriber::{layer::Context as LayerContext, registry, Layer}; 8 | 9 | use super::{JsonFields, JsonVisitor}; 10 | 11 | /// Tracing subscriber layer that records the fields of the created spans. 12 | /// 13 | /// Fields from spans are flattened into the JSON, duplicates are prioritized by their closeness to 14 | /// the leaf (e.g. leaf field overrides root field). 15 | /// 16 | /// Timestamps are derived from calling `Date.now()`. In the Workers runtime, time only progresses 17 | /// when IO occurs. 18 | pub(super) struct SpanFieldsRecorderLayer; 19 | 20 | impl Layer for SpanFieldsRecorderLayer 21 | where 22 | S: Subscriber + for<'a> registry::LookupSpan<'a>, 23 | { 24 | fn on_new_span(&self, attrs: &Attributes<'_>, id: &Id, ctx: LayerContext<'_, S>) { 25 | let span = ctx.span(id).expect("span should exist"); 26 | let mut fields = HashMap::new(); 27 | let mut visitor = JsonVisitor(&mut fields); 28 | attrs.record(&mut visitor); 29 | 30 | let mut extensions = span.extensions_mut(); 31 | if extensions.get_mut::().is_none() { 32 | extensions.insert(fields); 33 | } 34 | } 35 | } 36 | -------------------------------------------------------------------------------- /crates/daphne-worker/src/tracing_utils/workers_json_layer.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2023 Cloudflare, Inc. All rights reserved. 2 | // SPDX-License-Identifier: BSD-3-Clause 3 | 4 | use std::collections::HashMap; 5 | 6 | use tracing::Subscriber; 7 | use tracing_subscriber::{layer::Context as LayerContext, registry, Layer}; 8 | use worker::console_log; 9 | 10 | use super::{shorten_paths, JsonFields, JsonVisitor}; 11 | 12 | #[derive(serde::Serialize)] 13 | struct LogLine { 14 | timestamp: u64, 15 | log_level: &'static str, 16 | #[serde(flatten)] 17 | fields: HashMap, 18 | } 19 | 20 | /// Tracing subscriber layer that writes JSON to Cloudflare Workers console. 21 | /// 22 | /// Fields from spans are flattened into the JSON, duplicates are prioritized by their closeness to 23 | /// the leaf (e.g. leaf field overrides root field). 24 | /// 25 | /// Timestamps are derived from calling `Date.now()`. In the Workers runtime, time only progresses 26 | /// when IO occurs. 27 | /// 28 | /// Logs all messages to `console.log` with their tracing log level in a `level` field of the JSON. 29 | pub struct WorkersJsonLayer; 30 | 31 | impl Layer for WorkersJsonLayer 32 | where 33 | S: Subscriber + for<'a> registry::LookupSpan<'a>, 34 | { 35 | fn on_event(&self, event: &tracing::Event<'_>, ctx: LayerContext<'_, S>) { 36 | let timestamp = worker::Date::now().as_millis(); 37 | 38 | let mut fields = HashMap::new(); 39 | let mut visitor = JsonVisitor(&mut fields); 40 | 41 | // Prioritize any fields in the message itself over those in the span tree. 42 | event.record(&mut visitor); 43 | 44 | if let Some(current) = ctx.event_span(event) { 45 | // current context --> root context 46 | if let Some(spans) = ctx.span_scope(¤t.id()) { 47 | for span in spans { 48 | let ext = span.extensions(); 49 | let data = ext.get::(); 50 | 51 | fields 52 | .entry("current_span".to_owned()) 53 | .or_insert(serde_json::json!(span.name())); 54 | 55 | for f in span.fields() { 56 | if let Some(value) = data.and_then(|d| d.get(f.name())) { 57 | // As we are going from current span to root, prioritize existing values. 58 | fields.entry(f.name().to_owned()).or_insert(value.clone()); 59 | } 60 | } 61 | } 62 | } 63 | } 64 | 65 | let metadata = event.metadata(); 66 | if let (Some(file), Some(line)) = (metadata.file(), metadata.line()) { 67 | // we need to keep log lines as short as possible otherwise logpush will truncate them. 68 | let file_parts = shorten_paths(file.trim_start_matches("daphne_").split('/')); 69 | fields.insert( 70 | "at".to_owned(), 71 | format!("{}:{}", file_parts.display(), line).into(), 72 | ); 73 | } 74 | 75 | // If there is no `message`, repurpose the error meessage is there is one or the 76 | // `current_span` as the `message`. This helps normalize the `WasmTimingLayer` events. 77 | const MSG_KEY: &str = "message"; 78 | if !fields.contains_key(MSG_KEY) { 79 | if let Some(error) = fields.get("error") { 80 | fields.insert(MSG_KEY.into(), error.clone()); 81 | } else { 82 | fields.insert(MSG_KEY.to_owned(), "(no message)".into()); 83 | } 84 | } else if matches!(fields.get(MSG_KEY).unwrap().as_str(), Some(m) if m.trim().is_empty()) { 85 | fields.insert(MSG_KEY.to_owned(), "(no message)".into()); 86 | } 87 | 88 | let log_line = LogLine { 89 | timestamp, 90 | log_level: metadata.level().as_str(), 91 | fields, 92 | }; 93 | 94 | if let Ok(log) = serde_json::to_string(&log_line) { 95 | console_log!("{}", log); 96 | } 97 | } 98 | } 99 | -------------------------------------------------------------------------------- /crates/daphne/Cargo.toml: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2024 Cloudflare, Inc. All rights reserved. 2 | # SPDX-License-Identifier: BSD-3-Clause 3 | 4 | [package] 5 | name = "daphne" 6 | version.workspace = true 7 | authors.workspace = true 8 | edition.workspace = true 9 | license.workspace = true 10 | readme.workspace = true 11 | homepage.workspace = true 12 | repository.workspace = true 13 | description = "Implementation of the DAP specification" 14 | 15 | [lib] 16 | crate-type = ["cdylib", "rlib"] 17 | 18 | [dependencies] 19 | async-trait.workspace = true 20 | base64.workspace = true 21 | deepsize = { workspace = true, optional = true } 22 | futures.workspace = true 23 | hex.workspace = true 24 | hpke-rs = { workspace = true, features = ["hazmat", "serialization"] } 25 | hpke-rs-crypto.workspace = true 26 | hpke-rs-rust-crypto.workspace = true 27 | prio_draft09 = { workspace = true, features = ["experimental"] } 28 | prio = { workspace = true, features = ["experimental"] } 29 | prometheus = { workspace = true, optional = true } 30 | rand.workspace = true 31 | ring.workspace = true 32 | serde.workspace = true 33 | serde_json.workspace = true 34 | thiserror.workspace = true 35 | tracing.workspace = true 36 | url.workspace = true 37 | tokio = { workspace = true, optional = true } 38 | rayon = { workspace = true } 39 | subtle = { workspace = true } 40 | 41 | [dev-dependencies] 42 | assert_matches.workspace = true 43 | criterion.workspace = true 44 | deepsize.workspace = true 45 | matchit.workspace = true 46 | paste.workspace = true 47 | prio_draft09 = { workspace = true, features = ["test-util"] } 48 | prio = { workspace = true, features = ["test-util"] } 49 | prometheus.workspace = true 50 | rayon.workspace = true 51 | regex.workspace = true 52 | strum.workspace = true 53 | tokio.workspace = true 54 | 55 | [features] 56 | experimental = [] 57 | test-utils = ["dep:deepsize", "dep:prometheus"] 58 | report-generator = ["test-utils", "dep:tokio", "tokio/sync"] 59 | default = [] 60 | prometheus = ["dep:prometheus"] 61 | 62 | [[bench]] 63 | name = "vdaf" 64 | harness = false 65 | 66 | [[bench]] 67 | name = "aggregation" 68 | harness = false 69 | required-features = ["test-utils"] 70 | 71 | [[bench]] 72 | name = "pine" 73 | harness = false 74 | 75 | [lints] 76 | workspace = true 77 | -------------------------------------------------------------------------------- /crates/daphne/benches/aggregation.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2024 Cloudflare, Inc. All rights reserved. 2 | // SPDX-License-Identifier: BSD-3-Clause 3 | 4 | #![allow(clippy::cast_possible_truncation)] 5 | 6 | use std::{ 7 | hint::black_box, 8 | iter::repeat, 9 | time::{Duration, Instant}, 10 | }; 11 | 12 | use criterion::{criterion_group, criterion_main, Bencher, BenchmarkId, Criterion, Throughput}; 13 | use daphne::{ 14 | hpke::HpkeKemId, 15 | messages::AggregationJobInitReq, 16 | testing::AggregationJobTest, 17 | vdaf::{Prio3Config, VdafConfig}, 18 | DapAggregationParam, DapVersion, 19 | }; 20 | 21 | macro_rules! function { 22 | () => {{ 23 | fn f() {} 24 | let name = std::any::type_name_of_val(&f); 25 | 26 | // Find and cut the rest of the path 27 | match &name[..name.len() - 3].rfind(':') { 28 | Some(pos) => &name[pos + 1..name.len() - 3], 29 | None => &name[..name.len() - 3], 30 | } 31 | }}; 32 | } 33 | 34 | fn consume_reports_vary_vdaf_dimension(c: &mut Criterion) { 35 | const NUM_REPORTS: u64 = 1000; 36 | let vdaf_lengths = [10, 100, 1_000, 10_000, 100_000]; 37 | let mut test = AggregationJobTest::new( 38 | &VdafConfig::Prio2 { dimension: 0 }, 39 | HpkeKemId::P256HkdfSha256, 40 | DapVersion::Draft09, 41 | ); 42 | test.disable_replay_protection(); 43 | 44 | let mut g = c.benchmark_group(function!()); 45 | for vdaf_length in vdaf_lengths { 46 | let vdaf = VdafConfig::Prio3( 47 | Prio3Config::Draft09SumVecField64MultiproofHmacSha256Aes128 { 48 | bits: 1, 49 | length: vdaf_length, 50 | chunk_length: 320, 51 | num_proofs: 2, 52 | }, 53 | ); 54 | test.change_vdaf(vdaf); 55 | let reports = test 56 | .produce_repeated_reports(vdaf.gen_measurement().unwrap()) 57 | .take(NUM_REPORTS as _); 58 | 59 | let (_, init) = test.produce_agg_job_req(&DapAggregationParam::Empty, reports); 60 | 61 | g.throughput(Throughput::Bytes(vdaf_length as _)); 62 | g.bench_with_input( 63 | BenchmarkId::new("consume_agg_job_req", vdaf_length), 64 | &init, 65 | |b, init| bench(b, &test, init), 66 | ); 67 | } 68 | } 69 | 70 | fn consume_reports_vary_num_reports(c: &mut Criterion) { 71 | const VDAF: VdafConfig = VdafConfig::Prio3( 72 | Prio3Config::Draft09SumVecField64MultiproofHmacSha256Aes128 { 73 | bits: 1, 74 | length: 1000, 75 | chunk_length: 320, 76 | num_proofs: 2, 77 | }, 78 | ); 79 | 80 | let mut test = AggregationJobTest::new(&VDAF, HpkeKemId::P256HkdfSha256, DapVersion::Draft09); 81 | test.disable_replay_protection(); 82 | 83 | let mut g = c.benchmark_group(function!()); 84 | for report_counts in [10, 100, 1_000, 10_000] { 85 | let reports = test 86 | .produce_repeated_reports(VDAF.gen_measurement().unwrap()) 87 | .take(report_counts); 88 | 89 | let (_, init) = test.produce_agg_job_req(&DapAggregationParam::Empty, reports); 90 | 91 | g.throughput(Throughput::Elements(report_counts as _)); 92 | g.bench_with_input( 93 | BenchmarkId::new("consume_agg_job_req", report_counts), 94 | &init, 95 | |b, init| bench(b, &test, init), 96 | ); 97 | } 98 | } 99 | 100 | fn bench(b: &mut Bencher, test: &AggregationJobTest, init: &AggregationJobInitReq) { 101 | b.iter_custom(|iters| { 102 | let mut total = Duration::ZERO; 103 | for init in repeat(init).take(iters as _).cloned() { 104 | let now = Instant::now(); 105 | let ret = black_box(test.handle_agg_job_req(init)); 106 | total += now.elapsed(); 107 | drop(ret); 108 | } 109 | total 110 | }); 111 | } 112 | 113 | criterion_group!( 114 | benches, 115 | consume_reports_vary_num_reports, 116 | consume_reports_vary_vdaf_dimension 117 | ); 118 | criterion_main!(benches); 119 | -------------------------------------------------------------------------------- /crates/daphne/benches/pine.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2024 Cloudflare, Inc. All rights reserved. 2 | // SPDX-License-Identifier: BSD-3-Clause 3 | 4 | use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion}; 5 | use daphne::pine::Pine; 6 | use prio_draft09::{ 7 | field::random_vector, 8 | flp::Type, 9 | vdaf::{ 10 | xof::{Seed, XofTurboShake128}, 11 | Aggregator, Client, 12 | }, 13 | }; 14 | 15 | fn pine(c: &mut Criterion) { 16 | // NOTE We ignore this clippy warning because we may want to benchmark more parameters later. 17 | #[allow(clippy::single_element_loop)] 18 | for (dimension, chunk_len, chunk_len_sq_norm_equal) in [(200_000, 150 * 2, 447 * 18)] { 19 | let pine = 20 | Pine::new_64(1 << 15, dimension, 15, chunk_len, chunk_len_sq_norm_equal).unwrap(); 21 | let measurement = vec![0.0; dimension]; 22 | let wr_joint_rand_seed = Seed::generate().unwrap(); 23 | let nonce = [0; 16]; 24 | let verify_key = [0; 16]; 25 | 26 | c.bench_with_input( 27 | BenchmarkId::new("pine/encode", dimension), 28 | &dimension, 29 | |b, &_d| { 30 | b.iter(|| { 31 | pine.flp 32 | .encode_with_wr_joint_rand::( 33 | measurement.iter().copied(), 34 | &wr_joint_rand_seed, 35 | ) 36 | .unwrap() 37 | }); 38 | }, 39 | ); 40 | 41 | { 42 | let (mut input, wr_test_results) = pine 43 | .flp 44 | .encode_with_wr_joint_rand::( 45 | measurement.iter().copied(), 46 | &wr_joint_rand_seed, 47 | ) 48 | .unwrap(); 49 | input.extend_from_slice(&wr_test_results); 50 | let prove_rand = random_vector(pine.flp_sq_norm_equal.prove_rand_len()).unwrap(); 51 | 52 | c.bench_with_input( 53 | BenchmarkId::new("pine/prove", dimension), 54 | &dimension, 55 | |b, &_d| { 56 | b.iter(|| { 57 | pine.flp_sq_norm_equal 58 | .prove(&input, &prove_rand, &[]) 59 | .unwrap() 60 | }); 61 | }, 62 | ); 63 | 64 | let query_rand = random_vector(pine.flp_sq_norm_equal.query_rand_len()).unwrap(); 65 | let proof = pine 66 | .flp_sq_norm_equal 67 | .prove(&input, &prove_rand, &[]) 68 | .unwrap(); 69 | 70 | c.bench_with_input( 71 | BenchmarkId::new("pine/query", dimension), 72 | &dimension, 73 | |b, &_d| { 74 | b.iter(|| { 75 | pine.flp_sq_norm_equal 76 | .query(&input, &proof, &query_rand, &[], 1) 77 | .unwrap() 78 | }); 79 | }, 80 | ); 81 | } 82 | 83 | c.bench_with_input( 84 | BenchmarkId::new("pine/shard", dimension), 85 | &dimension, 86 | |b, &_d| { 87 | b.iter(|| pine.shard(&measurement, &nonce).unwrap()); 88 | }, 89 | ); 90 | 91 | let (public_share, input_shares) = pine.shard(&measurement, &nonce).unwrap(); 92 | 93 | c.bench_with_input( 94 | BenchmarkId::new("pine/prep_init", dimension), 95 | &dimension, 96 | |b, &_d| { 97 | b.iter(|| { 98 | pine.prepare_init(&verify_key, 1, &(), &nonce, &public_share, &input_shares[1]) 99 | }); 100 | }, 101 | ); 102 | } 103 | } 104 | 105 | criterion_group!(benches, pine); 106 | criterion_main!(benches); 107 | -------------------------------------------------------------------------------- /crates/daphne/benches/vdaf.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2024 Cloudflare, Inc. All rights reserved. 2 | // SPDX-License-Identifier: BSD-3-Clause 3 | 4 | use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion}; 5 | use prio_draft09::{ 6 | field::Field64, 7 | flp::{ 8 | gadgets::{Mul, ParallelSum}, 9 | types::SumVec, 10 | }, 11 | vdaf::{prio2::Prio2, prio3::Prio3, xof::XofHmacSha256Aes128, Aggregator, Client}, 12 | }; 13 | 14 | fn count_vec(c: &mut Criterion) { 15 | for dimension in [100, 1_000, 10_000, 100_000] { 16 | let nonce = [0; 16]; 17 | #[allow(clippy::cast_possible_truncation)] 18 | #[allow(clippy::cast_sign_loss)] 19 | let chunk_length = (dimension as f64).sqrt() as usize; // asymptotically optimal 20 | 21 | // Prio2 22 | { 23 | let prio2 = Prio2::new(dimension).unwrap(); 24 | let verify_key = [0; 32]; 25 | let measurement = vec![0; dimension]; 26 | c.bench_with_input( 27 | BenchmarkId::new("Prio2/shard", dimension), 28 | &dimension, 29 | |b, &_d| { 30 | b.iter(|| prio2.shard(&measurement, &nonce).unwrap()); 31 | }, 32 | ); 33 | 34 | let (public_share, input_shares) = prio2.shard(&measurement, &nonce).unwrap(); 35 | 36 | c.bench_with_input( 37 | BenchmarkId::new("Prio2/prep_init", dimension), 38 | &dimension, 39 | |b, &_d| { 40 | b.iter(|| { 41 | prio2.prepare_init( 42 | &verify_key, 43 | 1, 44 | &(), 45 | &nonce, 46 | &public_share, 47 | &input_shares[1], 48 | ) 49 | }); 50 | }, 51 | ); 52 | } 53 | 54 | // Prio3SumVec 55 | { 56 | let prio3 = Prio3::new_sum_vec(2, 1, dimension, chunk_length).unwrap(); 57 | let verify_key = [0; 16]; 58 | let measurement = vec![0; dimension]; 59 | c.bench_with_input( 60 | BenchmarkId::new("Prio3SumVec/shard", dimension), 61 | &dimension, 62 | |b, &_d| { 63 | b.iter(|| prio3.shard(&measurement, &nonce).unwrap()); 64 | }, 65 | ); 66 | 67 | let (public_share, input_shares) = prio3.shard(&measurement, &nonce).unwrap(); 68 | 69 | c.bench_with_input( 70 | BenchmarkId::new("Prio3SumVec/prep_init", dimension), 71 | &dimension, 72 | |b, &_d| { 73 | b.iter(|| { 74 | prio3.prepare_init( 75 | &verify_key, 76 | 1, 77 | &(), 78 | &nonce, 79 | &public_share, 80 | &input_shares[1], 81 | ) 82 | }); 83 | }, 84 | ); 85 | } 86 | 87 | // Prio3SumVecField64MultiproofHmacSha256Aes128 88 | { 89 | type Prio3SumVecField64MultiproofHmacSha256Aes128 = 90 | Prio3>>, XofHmacSha256Aes128, 32>; 91 | let typ = SumVec::new(1, dimension, chunk_length).unwrap(); 92 | let alg_id = 0; // arbitrary algorithm ID 93 | let prio3 = 94 | Prio3SumVecField64MultiproofHmacSha256Aes128::new(2, 3, alg_id, typ).unwrap(); 95 | 96 | let verify_key = [0; 32]; 97 | let measurement = vec![0; dimension]; 98 | c.bench_with_input( 99 | BenchmarkId::new( 100 | "Prio3SumVecField64MultiproofHmacSha256Aes128/shard", 101 | dimension, 102 | ), 103 | &dimension, 104 | |b, &_d| { 105 | b.iter(|| prio3.shard(&measurement, &nonce).unwrap()); 106 | }, 107 | ); 108 | 109 | let (public_share, input_shares) = prio3.shard(&measurement, &nonce).unwrap(); 110 | 111 | c.bench_with_input( 112 | BenchmarkId::new( 113 | "Prio3SumVecField64MultiproofHmacSha256Aes128/prep_init", 114 | dimension, 115 | ), 116 | &dimension, 117 | |b, &_d| { 118 | b.iter(|| { 119 | prio3.prepare_init( 120 | &verify_key, 121 | 1, 122 | &(), 123 | &nonce, 124 | &public_share, 125 | &input_shares[1], 126 | ) 127 | }); 128 | }, 129 | ); 130 | } 131 | } 132 | } 133 | 134 | criterion_group!(benches, count_vec); 135 | criterion_main!(benches); 136 | -------------------------------------------------------------------------------- /crates/daphne/src/audit_log.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2023 Cloudflare, Inc. All rights reserved. 2 | // SPDX-License-Identifier: BSD-3-Clause 3 | 4 | use crate::{messages::TaskId, DapTaskConfig}; 5 | 6 | pub trait AuditLog { 7 | fn on_aggregation_job( 8 | &self, 9 | task_id: &TaskId, 10 | task_config: &DapTaskConfig, 11 | report_count: u64, 12 | vdaf_step: u8, 13 | ); 14 | } 15 | 16 | /// Default implementation of the trait, which is a no-op. 17 | pub struct NoopAuditLog; 18 | 19 | impl AuditLog for NoopAuditLog { 20 | fn on_aggregation_job( 21 | &self, 22 | _task_id: &TaskId, 23 | _task_config: &DapTaskConfig, 24 | _report_count: u64, 25 | _vdaf_step: u8, 26 | ) { 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /crates/daphne/src/messages/request.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2025 Cloudflare, Inc. All rights reserved. 2 | // SPDX-License-Identifier: BSD-3-Clause 3 | 4 | use std::ops::Deref; 5 | 6 | use super::{ 7 | taskprov::TaskprovAdvertisement, AggregateShareReq, AggregationJobId, AggregationJobInitReq, 8 | CollectionJobId, CollectionReq, Report, 9 | }; 10 | use crate::{ 11 | constants::DapMediaType, error::DapAbort, messages::TaskId, 12 | roles::helper::HashedAggregationJobReq, DapVersion, 13 | }; 14 | 15 | pub trait RequestBody { 16 | type ResourceId; 17 | } 18 | 19 | /// A poll request has no body, but requires a `AggregationJobId`. 20 | pub struct PollAggregationJob; 21 | 22 | /// A poll request has no body, but requires a `CollectionJobId`. 23 | pub struct CollectionPollReq; 24 | 25 | macro_rules! impl_req_body { 26 | ($($body:tt | $id:tt)*) => { 27 | $(impl RequestBody for $body { 28 | type ResourceId = $id; 29 | })* 30 | }; 31 | } 32 | 33 | impl_req_body! { 34 | // body type | id type 35 | // --------------------| ---------------- 36 | Report | () 37 | AggregationJobInitReq | AggregationJobId 38 | HashedAggregationJobReq | AggregationJobId 39 | PollAggregationJob | AggregationJobId 40 | AggregateShareReq | () 41 | CollectionReq | CollectionJobId 42 | CollectionPollReq | CollectionJobId 43 | () | () 44 | } 45 | 46 | /// Fields common to all DAP requests. 47 | #[derive(Debug)] 48 | #[cfg_attr(test, derive(Default))] 49 | pub struct DapRequestMeta { 50 | /// Protocol version indicated by the request. 51 | pub version: DapVersion, 52 | 53 | /// Request media type, sent in the "content-type" header of the HTTP request. 54 | pub media_type: Option, 55 | 56 | /// ID of the task with which the request is associated. 57 | pub task_id: TaskId, 58 | 59 | /// taskprov: The task advertisement, sent in the `dap-taskprov` header. 60 | pub taskprov_advertisement: Option, 61 | } 62 | 63 | impl DapRequestMeta { 64 | /// Checks the request content type against the expected content type. 65 | pub fn get_checked_media_type(&self, expected: DapMediaType) -> Result { 66 | self.media_type 67 | .filter(|m| *m == expected) 68 | .ok_or_else(|| DapAbort::content_type(self, expected)) 69 | } 70 | } 71 | 72 | /// DAP request. 73 | #[derive(Debug)] 74 | #[cfg_attr(test, derive(Default))] 75 | pub struct DapRequest { 76 | pub meta: DapRequestMeta, 77 | 78 | /// The resource with which this request is associated. 79 | pub resource_id: B::ResourceId, 80 | 81 | /// Request payload. 82 | pub payload: B, 83 | } 84 | 85 | impl DapRequest { 86 | pub fn map(self, mapper: F) -> DapRequest 87 | where 88 | F: FnOnce(B) -> O, 89 | O: RequestBody, 90 | { 91 | DapRequest { 92 | meta: self.meta, 93 | resource_id: self.resource_id, 94 | payload: mapper(self.payload), 95 | } 96 | } 97 | } 98 | 99 | impl AsRef for DapRequest { 100 | fn as_ref(&self) -> &DapRequestMeta { 101 | &self.meta 102 | } 103 | } 104 | 105 | impl Deref for DapRequest { 106 | type Target = DapRequestMeta; 107 | fn deref(&self) -> &Self::Target { 108 | &self.meta 109 | } 110 | } 111 | 112 | /// DAP response. 113 | #[derive(Debug)] 114 | pub struct DapResponse { 115 | pub version: DapVersion, 116 | pub media_type: DapMediaType, 117 | pub payload: Vec, 118 | } 119 | -------------------------------------------------------------------------------- /crates/daphne/src/protocol/collector.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2024 Cloudflare, Inc. All rights reserved. 2 | // SPDX-License-Identifier: BSD-3-Clause 3 | 4 | use crate::{ 5 | constants::DapAggregatorRole, 6 | fatal_error, 7 | hpke::{info_and_aad, HpkeDecrypter}, 8 | messages::{BatchSelector, HpkeCiphertext, TaskId}, 9 | DapAggregateResult, DapAggregationParam, DapError, DapVersion, VdafConfig, 10 | }; 11 | 12 | impl VdafConfig { 13 | /// Decrypt and unshard a sequence of aggregate shares. This method is run by the Collector 14 | /// after completing a collect request. 15 | /// 16 | /// # Inputs 17 | /// 18 | /// * `decrypter` is used to decrypt the aggregate shares. 19 | /// 20 | /// * `task_id` is the DAP task ID. 21 | /// 22 | /// * `batch_interval` is the batch interval for the aggregate share. 23 | /// 24 | /// * `encrypted_agg_shares` is the set of encrypted aggregate shares produced by the 25 | /// Aggregators. The first encrypted aggregate shares must be the Leader's. 26 | /// 27 | /// * `version` is the `DapVersion` to use. 28 | #[allow(clippy::too_many_arguments)] 29 | pub fn consume_encrypted_agg_shares( 30 | &self, 31 | decrypter: &impl HpkeDecrypter, 32 | task_id: &TaskId, 33 | batch_sel: &BatchSelector, 34 | report_count: u64, 35 | agg_param: &DapAggregationParam, 36 | encrypted_agg_shares: Vec, 37 | version: DapVersion, 38 | ) -> Result { 39 | if encrypted_agg_shares.len() != 2 { 40 | return Err(fatal_error!( 41 | err = "unexpected number of encrypted aggregate shares" 42 | )); 43 | } 44 | 45 | let mut info = info_and_aad::AggregateShare { 46 | version, 47 | sender: DapAggregatorRole::Leader, // placeholder 48 | task_id, 49 | agg_param, 50 | batch_selector: batch_sel, 51 | }; 52 | 53 | let mut agg_shares = Vec::with_capacity(encrypted_agg_shares.len()); 54 | for (i, agg_share_ciphertext) in encrypted_agg_shares.iter().enumerate() { 55 | info.sender = if i == 0 { 56 | DapAggregatorRole::Leader 57 | } else { 58 | DapAggregatorRole::Helper 59 | }; 60 | 61 | let agg_share_data = decrypter.hpke_decrypt(info, agg_share_ciphertext)?; 62 | agg_shares.push(agg_share_data); 63 | } 64 | 65 | if agg_shares.len() != encrypted_agg_shares.len() { 66 | return Err(fatal_error!( 67 | err = "one or more HPKE ciphertexts with unrecognized config ID", 68 | )); 69 | } 70 | 71 | let num_measurements = usize::try_from(report_count).unwrap(); 72 | self.unshard(version, agg_param, num_measurements, agg_shares) 73 | .map_err(|e| fatal_error!(err = ?e, "failed to unshard agg_shares")) 74 | } 75 | } 76 | -------------------------------------------------------------------------------- /crates/daphne/src/testing/report_generator.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2024 Cloudflare, Inc. All rights reserved. 2 | // SPDX-License-Identifier: BSD-3-Clause 3 | 4 | use std::{ 5 | sync::{ 6 | atomic::{AtomicUsize, Ordering}, 7 | mpsc, Mutex, OnceLock, 8 | }, 9 | time::Instant, 10 | }; 11 | 12 | use crate::{ 13 | fatal_error, 14 | hpke::HpkeConfig, 15 | messages::{self, Report, TaskId, Time}, 16 | vdaf::VdafConfig, 17 | DapError, DapMeasurement, DapVersion, 18 | }; 19 | use deepsize::DeepSizeOf; 20 | use rand::{ 21 | distributions::{Distribution, Uniform}, 22 | thread_rng, 23 | }; 24 | 25 | pub struct ReportGenerator { 26 | len: usize, 27 | ch: mpsc::Receiver, 28 | } 29 | 30 | impl Iterator for ReportGenerator { 31 | type Item = messages::Report; 32 | 33 | fn next(&mut self) -> Option { 34 | self.ch.recv().ok() 35 | } 36 | } 37 | 38 | impl ReportGenerator { 39 | #[allow(clippy::too_many_arguments)] 40 | pub fn new( 41 | vdaf: &VdafConfig, 42 | hpke_config_list: &[HpkeConfig; 2], 43 | task_id: TaskId, 44 | reports_per_batch: usize, 45 | measurement: &DapMeasurement, 46 | version: DapVersion, 47 | now: Time, 48 | public_extensions: Option>, 49 | private_extensions: Vec, 50 | replay_reports: bool, 51 | ) -> Self { 52 | let (tx, rx) = mpsc::channel(); 53 | rayon::spawn({ 54 | let hpke_config_list = hpke_config_list.clone(); 55 | let measurement = measurement.clone(); 56 | let vdaf = *vdaf; 57 | move || { 58 | // perf measurements 59 | static GENERATED_REPORT_COUNTER: AtomicUsize = AtomicUsize::new(0); 60 | static LAST_INSTANT: Mutex> = Mutex::new(None); 61 | // -- 62 | 63 | let report_time_dist = Uniform::from(now - (60 * 60 * 36)..now - (60 * 60 * 24)); 64 | let error = (0..reports_per_batch).try_for_each(move |_| { 65 | // perf measurements 66 | let last_instant = *LAST_INSTANT 67 | .lock() 68 | .unwrap() 69 | .get_or_insert_with(Instant::now); 70 | let now = Instant::now(); 71 | // ---- 72 | 73 | static LAST_REPORT: OnceLock = OnceLock::new(); 74 | let report = if replay_reports { 75 | LAST_REPORT 76 | .get_or_init(|| { 77 | vdaf.produce_report_with_extensions( 78 | &hpke_config_list, 79 | report_time_dist.sample(&mut thread_rng()), 80 | &task_id, 81 | measurement.clone(), 82 | public_extensions.clone(), 83 | private_extensions.clone(), 84 | version, 85 | ) 86 | .expect("we have to panic here since we can't return the error") 87 | }) 88 | .clone() 89 | } else { 90 | vdaf.produce_report_with_extensions( 91 | &hpke_config_list, 92 | report_time_dist.sample(&mut thread_rng()), 93 | &task_id, 94 | measurement.clone(), 95 | public_extensions.clone(), 96 | private_extensions.clone(), 97 | version, 98 | )? 99 | }; 100 | 101 | // perf measurements 102 | let count = GENERATED_REPORT_COUNTER.fetch_add(1, Ordering::SeqCst) + 1; 103 | if count % 1000 == 0 { 104 | tracing::debug!( 105 | "generated {count} reports in {:?}. Each of size {}. Last one in {:?}", 106 | last_instant.elapsed(), 107 | report.deep_size_of() as f64 / 1000., 108 | now.elapsed(), 109 | ); 110 | *LAST_INSTANT.lock().unwrap() = Some(Instant::now()); 111 | } 112 | // -- 113 | 114 | tx.send(report) 115 | .map_err(|_| fatal_error!(err = "failed to send report, channel closed"))?; 116 | Ok::<_, DapError>(()) 117 | }); 118 | if let Err(error) = error { 119 | tracing::error!(?error, "failed to generate a report"); 120 | } 121 | } 122 | }); 123 | 124 | Self { 125 | len: reports_per_batch, 126 | ch: rx, 127 | } 128 | } 129 | 130 | #[must_use] 131 | pub fn len(&self) -> usize { 132 | self.len 133 | } 134 | 135 | #[must_use] 136 | pub fn is_empty(&self) -> bool { 137 | self.len() == 0 138 | } 139 | } 140 | -------------------------------------------------------------------------------- /interop/Dockerfile.interop_helper: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2024 Cloudflare, Inc. All rights reserved. 2 | # SPDX-License-Identifier: BSD-3-Clause 3 | 4 | # Prepare dependencies common to both services. 5 | # 6 | # NOTE: We must use debian (bookworm). We cannot use alpine because building 7 | # the service proxy requires OpenSSL, which is not compatible with the musl 8 | # target required by alpine. 9 | FROM rust:1.84.1-bookworm AS build-deps-common 10 | RUN apt update && apt install -y capnproto clang 11 | RUN capnp --version 12 | 13 | # Prepare dependencies for building the storage proxy. 14 | FROM build-deps-common AS build-deps-storage-proxy 15 | RUN rustup target add wasm32-unknown-unknown 16 | RUN echo ola 17 | RUN cargo install worker-build@0.1.2 --locked 18 | 19 | # Build the service. 20 | FROM build-deps-common AS builder-service 21 | WORKDIR /build 22 | COPY Cargo.toml Cargo.lock /build/ 23 | COPY crates/daphne /build/crates/daphne 24 | COPY crates/daphne-server /build/crates/daphne-server 25 | COPY crates/daphne-service-utils /build/crates/daphne-service-utils 26 | RUN cargo build --example service --features test-utils --release 27 | 28 | # Build the storage proxy. 29 | FROM build-deps-storage-proxy AS builder-storage-proxy 30 | WORKDIR /build 31 | COPY Cargo.toml Cargo.lock /build/ 32 | COPY crates/daphne /build/crates/daphne 33 | COPY crates/daphne-service-utils /build/crates/daphne-service-utils 34 | COPY crates/daphne-worker /build/crates/daphne-worker 35 | COPY crates/daphne-worker-test /build/crates/daphne-worker-test 36 | WORKDIR /build/crates/daphne-worker-test 37 | RUN worker-build --dev 38 | 39 | # Prepare the environment in which the service and storage proxy will run. 40 | FROM node:bookworm AS final 41 | RUN apt update && apt install -y colorized-logs 42 | RUN npm install -g wrangler@3.50.0 && npm cache clean --force 43 | COPY --from=builder-service /build/target/release/examples/service / 44 | COPY --from=builder-storage-proxy /build/crates/daphne-worker-test/build /build 45 | COPY crates/daphne-worker-test/wrangler.storage-proxy.toml / 46 | COPY crates/daphne-server/examples/configuration-helper.toml / 47 | COPY interop/run_interop_helper.sh / 48 | WORKDIR / 49 | RUN wrangler deploy --dry-run -c wrangler.storage-proxy.toml 50 | 51 | # Expose the port for the service. The test runner does not need direct access 52 | # to the storage proxy. 53 | EXPOSE 8788 54 | ENTRYPOINT ["/bin/bash", "/run_interop_helper.sh"] 55 | -------------------------------------------------------------------------------- /interop/run_interop_helper.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Copyright (c) 2024 Cloudflare, Inc. All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | 5 | set -e 6 | 7 | mkdir /logs 8 | 9 | echo "Starting storage proxy" 10 | # Start storage proxy. 11 | nohup wrangler dev --config wrangler.storage-proxy.toml --port 4001 | ansi2txt \ 12 | > /logs/storage_proxy.log 2>&1 & 13 | 14 | # Wait for the storage proxy to come up. 15 | curl --retry 10 --retry-delay 1 --retry-all-errors -s http://localhost:4001 16 | 17 | printf "\nStarting service\n" 18 | 19 | # Start service. 20 | nohup env RUST_LOG=info ./service -c configuration-helper.toml | ansi2txt \ 21 | > /logs/service.log 2>&1 & 22 | 23 | # Wait for the service to come up. 24 | curl --retry 10 --retry-delay 1 --retry-all-errors -s -X POST http://localhost:8788/internal/test/ready 25 | 26 | echo "Ready to receive requests!" 27 | 28 | wait -n 29 | exit $? 30 | --------------------------------------------------------------------------------