├── .cargo-ok ├── rust ├── .gitignore ├── Cargo.toml ├── holochain_bootstrap_wasm │ ├── Cargo.toml │ └── src │ │ ├── host.rs │ │ ├── lib.rs │ │ └── kv.rs └── holochain_bootstrap_core │ ├── src │ ├── handlers.rs │ ├── metrics.rs │ ├── lib.rs │ ├── handlers │ │ ├── post_put.rs │ │ ├── get_metrics.rs │ │ ├── post_proxy_list.rs │ │ └── post_trigger_scheduled.rs │ ├── dispatcher.rs │ ├── exec_scheduled.rs │ ├── types.rs │ └── agent_info.rs │ └── Cargo.toml ├── packed.data ├── src ├── bindings.d.ts ├── base64 │ └── base64.ts ├── op │ ├── now.ts │ ├── put.ts │ └── random.ts ├── index.ts ├── io │ └── io.ts ├── kv │ ├── kv.ts │ ├── get.ts │ ├── list.ts │ ├── random.ts │ └── put.ts ├── ctx.ts ├── request │ ├── post.ts │ └── dispatch.ts ├── msgpack │ └── msgpack.ts ├── kitsune │ └── kitsune.ts ├── crypto │ └── crypto.ts └── agent_info │ ├── signed.ts │ └── info.ts ├── .gitignore ├── .dockerignore ├── test ├── fixture │ ├── crypto.ts │ ├── spaces.ts │ └── agents.ts ├── crypto │ └── crypto.ts ├── io │ └── io.ts ├── agent_info │ ├── signed.ts │ └── info.ts └── kitsune │ └── kitsune.ts ├── .prettierrc ├── .eslintrc.yml ├── scripts ├── cf_worker_entry.js ├── run-mininet.mjs ├── set-proxy-pool.cjs ├── run-integration-test.mjs └── run-build-worker.cjs ├── shell.nix ├── tsconfig.json ├── .github └── workflows │ ├── test.yml │ └── deploy.yml ├── test-webpack.config.cjs ├── LICENSE_MIT ├── wrangler.toml ├── Dockerfile ├── webpack.config.cjs ├── package.json ├── integration └── integration.ts ├── LICENSE_APACHE └── README.md /.cargo-ok: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /rust/.gitignore: -------------------------------------------------------------------------------- 1 | Cargo.lock 2 | target 3 | -------------------------------------------------------------------------------- /packed.data: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/holochain/bootstrap/HEAD/packed.data -------------------------------------------------------------------------------- /src/bindings.d.ts: -------------------------------------------------------------------------------- 1 | export {} 2 | 3 | declare global { 4 | const BOOTSTRAP: KVNamespace 5 | } 6 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .nix-node 2 | node_modules 3 | .cargo 4 | dist 5 | test-dist 6 | transpiled 7 | worker 8 | -------------------------------------------------------------------------------- /.dockerignore: -------------------------------------------------------------------------------- 1 | Dockerfile 2 | .github 3 | node_modules 4 | .gitignore 5 | Makefile 6 | README.md 7 | shell.nix 8 | -------------------------------------------------------------------------------- /test/fixture/crypto.ts: -------------------------------------------------------------------------------- 1 | import * as NaCl from 'tweetnacl' 2 | 3 | export const keypair = () => NaCl.sign.keyPair() 4 | -------------------------------------------------------------------------------- /.prettierrc: -------------------------------------------------------------------------------- 1 | { 2 | "singleQuote": true, 3 | "semi": false, 4 | "trailingComma": "all", 5 | "tabWidth": 2, 6 | "printWidth": 80 7 | } -------------------------------------------------------------------------------- /.eslintrc.yml: -------------------------------------------------------------------------------- 1 | env: 2 | browser: true 3 | es2021: true 4 | extends: 5 | - standard 6 | parser: '@typescript-eslint/parser' 7 | parserOptions: 8 | ecmaVersion: 12 9 | sourceType: module 10 | plugins: 11 | - '@typescript-eslint' 12 | rules: {} 13 | -------------------------------------------------------------------------------- /rust/Cargo.toml: -------------------------------------------------------------------------------- 1 | [workspace] 2 | members = [ 3 | "holochain_bootstrap_core", 4 | "holochain_bootstrap_wasm", 5 | ] 6 | 7 | [profile.release] 8 | # Tell `rustc` to optimize for small code size. 9 | opt-level = "s" 10 | # also include link-time optimizations 11 | lto = true 12 | -------------------------------------------------------------------------------- /rust/holochain_bootstrap_wasm/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "holochain_bootstrap_wasm" 3 | version = "0.0.1" 4 | edition = "2021" 5 | 6 | [lib] 7 | crate-type = ["cdylib", "rlib"] 8 | 9 | [dependencies] 10 | js-sys = "0.3.61" 11 | wasm-bindgen = "0.2.84" 12 | wasm-bindgen-futures = "0.4.34" 13 | holochain_bootstrap_core = { path = "../holochain_bootstrap_core" } 14 | -------------------------------------------------------------------------------- /src/base64/base64.ts: -------------------------------------------------------------------------------- 1 | import * as base64 from 'base64-js' 2 | 3 | export type Value = string 4 | 5 | // Convert a Uint8Array to base64. 6 | export function fromBytes(a: Uint8Array): Value { 7 | return base64.fromByteArray(a) 8 | } 9 | 10 | // Convert base64 string to a Uint8Array. 11 | export function toBytes(b: Value): Uint8Array { 12 | return base64.toByteArray(b) 13 | } 14 | -------------------------------------------------------------------------------- /scripts/cf_worker_entry.js: -------------------------------------------------------------------------------- 1 | import bootstrapWasm from './holochain_bootstrap_wasm_bg.js' 2 | import workerIndex from './worker.js' 3 | 4 | export default { 5 | fetch: async (request, env) => { 6 | return await workerIndex.fetch(request, env, bootstrapWasm) 7 | }, 8 | scheduled: async (_event, env, _ctx) => { 9 | await workerIndex.scheduled(env, bootstrapWasm) 10 | } 11 | } 12 | -------------------------------------------------------------------------------- /rust/holochain_bootstrap_core/src/handlers.rs: -------------------------------------------------------------------------------- 1 | //! Built-in common bootstrap handlers 2 | 3 | const METHOD_GET: &str = "GET"; 4 | const METHOD_POST: &str = "POST"; 5 | 6 | mod get_metrics; 7 | pub use get_metrics::*; 8 | 9 | mod post_put; 10 | pub use post_put::*; 11 | 12 | mod post_proxy_list; 13 | pub use post_proxy_list::*; 14 | 15 | mod post_trigger_scheduled; 16 | pub use post_trigger_scheduled::*; 17 | -------------------------------------------------------------------------------- /rust/holochain_bootstrap_core/src/metrics.rs: -------------------------------------------------------------------------------- 1 | /// one week in seconds 2 | pub const ONE_WEEK_S: f64 = 60.0 * 60.0 * 24.0 * 7.0; 3 | 4 | /// aggregate metrics key 5 | pub const METRICS_AGG: &str = "METRICS_AGG"; 6 | 7 | /// last time the metrics aggregation was run 8 | pub const MA_LAST_RUN: &str = "MA_LAST_RUN"; 9 | 10 | /// kv prefix for metric entries 11 | pub const METRIC_PREFIX: &str = "bootstrap_metric:"; 12 | -------------------------------------------------------------------------------- /test/fixture/spaces.ts: -------------------------------------------------------------------------------- 1 | import * as Kitsune from '../../src/kitsune/kitsune' 2 | 3 | export const vaporChatSpace: Kitsune.Space = Uint8Array.from( 4 | Array(Kitsune.kitsuneBinLength).fill(0), 5 | ) 6 | 7 | export const wikiSpace: Kitsune.Space = Uint8Array.from( 8 | Array(Kitsune.kitsuneBinLength).fill(1), 9 | ) 10 | 11 | export const emptySpace: Kitsune.Space = Uint8Array.from( 12 | Array(Kitsune.kitsuneBinLength).fill(2), 13 | ) 14 | -------------------------------------------------------------------------------- /shell.nix: -------------------------------------------------------------------------------- 1 | let 2 | pkgs = import {}; 3 | in 4 | pkgs.stdenv.mkDerivation { 5 | name = "shell"; 6 | buildInputs = [ 7 | # deps from upstream 8 | pkgs.nodejs-14_x 9 | pkgs.cargo 10 | pkgs.rustc 11 | pkgs.cloudflared 12 | ]; 13 | 14 | shellHook = '' 15 | export CARGO_HOME=$PWD/.cargo 16 | export PATH=$( npm bin ):$PATH 17 | export PATH=$CARGO_HOME/bin:$PATH 18 | npm install 19 | cargo install wrangler 20 | ''; 21 | } 22 | -------------------------------------------------------------------------------- /tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "outDir": "./dist", 4 | "target": "esnext", 5 | "module": "commonjs", 6 | "sourceMap": true, 7 | "lib": ["esnext"], 8 | "alwaysStrict": true, 9 | "strict": true, 10 | "preserveConstEnums": true, 11 | "moduleResolution": "node", 12 | "esModuleInterop": true, 13 | "types": ["@cloudflare/workers-types/2022-08-04"] 14 | }, 15 | "include": ["./src/*.ts", "./src/**/*.ts"], 16 | "exclude": ["node_modules/", "dist/"] 17 | } 18 | -------------------------------------------------------------------------------- /rust/holochain_bootstrap_core/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "holochain_bootstrap_core" 3 | version = "0.0.1" 4 | edition = "2021" 5 | 6 | ## NOTE! We must be very careful with our dependencies 7 | ## lest we blow up the WASM size for the cloudflare worker 8 | 9 | [dependencies] 10 | base64 = { version = "0.13.0", default-features = false, features = [ "alloc" ] } 11 | ed25519-dalek = { version = "1.0.1", default-features = false, features = [ "u32_backend" ] } 12 | msgpackin_core = { version = "0.0.3" } 13 | 14 | [dev-dependencies] 15 | futures = "0.3.25" 16 | -------------------------------------------------------------------------------- /src/op/now.ts: -------------------------------------------------------------------------------- 1 | import { Ctx } from '../ctx' 2 | import * as MP from '../msgpack/msgpack' 3 | 4 | // The `now` op does not interact with the kv store at all. 5 | // We can encompass all the logic as a native call, messagepack encoded. 6 | export const now = async ( 7 | _: MP.MessagePackData, 8 | _ctx: Ctx, 9 | ): Promise => { 10 | try { 11 | return MP.encode(Date.now()) 12 | } catch (e) { 13 | if (e instanceof Error) { 14 | return e 15 | } else { 16 | return new Error(JSON.stringify(e)) 17 | } 18 | } 19 | } 20 | -------------------------------------------------------------------------------- /rust/holochain_bootstrap_core/src/lib.rs: -------------------------------------------------------------------------------- 1 | #![no_std] 2 | #![deny(unsafe_code)] 3 | #![deny(missing_docs)] 4 | #![deny(warnings)] 5 | //! Holochain Bootstrap Core logic to be shared with cloudflare worker 6 | //! and standalone rust binary. 7 | 8 | #[macro_use] 9 | extern crate alloc; 10 | 11 | pub(crate) const PROXY_PREFIX: &str = "proxy_pool:"; 12 | 13 | pub(crate) mod metrics; 14 | 15 | pub mod types; 16 | 17 | mod dispatcher; 18 | pub use dispatcher::*; 19 | 20 | pub mod agent_info; 21 | 22 | pub mod handlers; 23 | 24 | mod exec_scheduled; 25 | pub use exec_scheduled::*; 26 | -------------------------------------------------------------------------------- /src/index.ts: -------------------------------------------------------------------------------- 1 | import { BootstrapWasm, Ctx, wasmHost } from './ctx' 2 | import { requestDispatch } from './request/dispatch' 3 | 4 | export default { 5 | async fetch( 6 | request: Request, 7 | env: { BOOTSTRAP: KVNamespace }, 8 | bootstrapWasm: BootstrapWasm, 9 | ): Promise { 10 | const ctx = new Ctx(request, env.BOOTSTRAP, bootstrapWasm) 11 | return await requestDispatch(ctx) 12 | }, 13 | 14 | async scheduled( 15 | env: { BOOTSTRAP: KVNamespace }, 16 | bootstrapWasm: BootstrapWasm, 17 | ): Promise { 18 | await bootstrapWasm.handle_scheduled(env.BOOTSTRAP, wasmHost) 19 | }, 20 | } 21 | -------------------------------------------------------------------------------- /.github/workflows/test.yml: -------------------------------------------------------------------------------- 1 | name: npm test 2 | 3 | on: push 4 | 5 | jobs: 6 | test: 7 | name: test npm 8 | runs-on: ubuntu-latest 9 | strategy: 10 | matrix: 11 | node: ['18'] 12 | 13 | steps: 14 | # Checkout code 15 | - uses: actions/checkout@v2 16 | 17 | # Need to do the javascript builds for the wrangler to have compiled 18 | # typescript to work with. 19 | - uses: actions/setup-node@v2 20 | with: 21 | node-version: ${{ matrix.node }} 22 | 23 | # Install node dependencies 24 | - run: npm ci 25 | 26 | # Execute test script that runs miniflare in the background 27 | # as a local cloudflare api simulator target. 28 | - run: npm test 29 | -------------------------------------------------------------------------------- /src/io/io.ts: -------------------------------------------------------------------------------- 1 | import * as D from 'io-ts/Decoder' 2 | import { pipe } from 'fp-ts/lib/pipeable' 3 | 4 | // Decoder for a Uint8Array of any length. 5 | export const Uint8ArrayDecoder: D.Decoder = { 6 | decode: (a: unknown) => { 7 | if (a instanceof Uint8Array) { 8 | return D.success(a) 9 | } 10 | return D.failure( 11 | a, 12 | JSON.stringify(a) + ' cannot be decoded as a Uint8Array', 13 | ) 14 | }, 15 | } 16 | 17 | // Decoder factory for Uint8Array data with a fixed size. 18 | // The fixed size is constant per-factory. 19 | export const FixedSizeUint8ArrayDecoderBuilder = (n: number) => 20 | pipe( 21 | Uint8ArrayDecoder, 22 | D.refine( 23 | (input): input is Uint8Array => input.length === n, 24 | `length must be exactly ${n}`, 25 | ), 26 | ) 27 | -------------------------------------------------------------------------------- /src/op/put.ts: -------------------------------------------------------------------------------- 1 | import { Ctx } from '../ctx' 2 | import * as MP from '../msgpack/msgpack' 3 | import * as E from 'fp-ts/lib/Either' 4 | import * as KVPut from '../kv/put' 5 | 6 | // put literally puts the raw MessagePackData to the kv store if it validates. 7 | // the key is derived from the raw data. 8 | // Returns messagepack null if successful or the error if there is an error. 9 | export async function put( 10 | input: MP.MessagePackData, 11 | ctx: Ctx, 12 | ): Promise { 13 | try { 14 | let p = await KVPut.put(input, ctx) 15 | if (E.isLeft(p)) { 16 | return p.left 17 | } else { 18 | return MP.encode(p.right) 19 | } 20 | } catch (e) { 21 | if (e instanceof Error) { 22 | return e 23 | } else { 24 | return new Error(JSON.stringify(e)) 25 | } 26 | } 27 | } 28 | -------------------------------------------------------------------------------- /test-webpack.config.cjs: -------------------------------------------------------------------------------- 1 | // vim: set syntax=javascript: 2 | 3 | const path = require('path') 4 | const webpack = require('webpack') 5 | 6 | let mode = 'development' 7 | let devtool = 'inline-source-map' 8 | 9 | module.exports = { 10 | mode, 11 | devtool, 12 | target: 'node', 13 | entry: './integration/integration.ts', 14 | output: { 15 | publicPath: './', 16 | filename: `integration.cjs`, 17 | path: path.join(__dirname, 'test-dist'), 18 | }, 19 | resolve: { 20 | extensions: ['.ts', '.tsx', '.mjs', '.js'], 21 | plugins: [], 22 | }, 23 | module: { 24 | rules: [ 25 | { 26 | test: /\.tsx?$/, 27 | loader: 'ts-loader', 28 | options: { 29 | // transpileOnly is useful to skip typescript checks occasionally: 30 | transpileOnly: true, 31 | }, 32 | }, 33 | ], 34 | }, 35 | } 36 | -------------------------------------------------------------------------------- /scripts/run-mininet.mjs: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env -S node --experimental-vm-modules 2 | 3 | // vim: set syntax=javascript: 4 | 5 | import { Miniflare, Log, LogLevel } from 'miniflare' 6 | 7 | async function main() { 8 | const mf = new Miniflare({ 9 | log: new Log(LogLevel.INFO), 10 | kvNamespaces: ['BOOTSTRAP'], 11 | wranglerConfigPath: true, 12 | wranglerConfigEnv: 'local', 13 | host: '127.0.0.1', 14 | port: 8787 15 | }) 16 | 17 | const ns = await mf.getKVNamespace('BOOTSTRAP') 18 | await ns.put('proxy_pool:https://test.holo.host/this/is/a/test?noodle=true', '1') 19 | await ns.put('proxy_pool:https://test2.holo.host/another/test/this/is?a=b#yada', '1') 20 | 21 | const server = await mf.startServer() 22 | 23 | await new Promise(() => {}) 24 | } 25 | 26 | // entrypoint 27 | main().then( 28 | () => { 29 | console.error('[test-exec]: done') 30 | }, 31 | (err) => { 32 | console.error('[test-exec]:', err) 33 | process.exitCode = 1 34 | }, 35 | ) 36 | -------------------------------------------------------------------------------- /rust/holochain_bootstrap_core/src/handlers/post_put.rs: -------------------------------------------------------------------------------- 1 | use crate::agent_info::*; 2 | use crate::types::*; 3 | 4 | const OP_PUT: &str = "put"; 5 | 6 | /// Handler for method: "POST", op: "put". 7 | /// Validate and store an agent_info_signed struct in the bootstrap kv store. 8 | pub struct PostPut; 9 | 10 | impl AsRequestHandler for PostPut { 11 | fn handles_method(&self) -> &'static str { 12 | super::METHOD_POST 13 | } 14 | 15 | fn handles_op(&self) -> &'static str { 16 | OP_PUT 17 | } 18 | 19 | fn handle<'a>( 20 | &'a self, 21 | _kv: &'a dyn AsKV, 22 | _host: &'a dyn AsFromHost, 23 | input: &'a [u8], 24 | ) -> BCoreFut<'a, BCoreResult> { 25 | bcore_fut(async move { 26 | let sig = AgentInfoSignedRef::decode(input)?; 27 | let info = sig.verify_and_decode_agent_info()?; 28 | Err(format!("wasm put disabled: but decoded: {info:?}").into()) 29 | }) 30 | } 31 | } 32 | -------------------------------------------------------------------------------- /rust/holochain_bootstrap_wasm/src/host.rs: -------------------------------------------------------------------------------- 1 | use super::*; 2 | 3 | pub struct Host(JsValue); 4 | 5 | impl Host { 6 | pub fn new(host: JsValue) -> JsResult { 7 | Ok(Host(host)) 8 | } 9 | 10 | /// Internal helper for getting a specific function from the KV object 11 | fn get_func_prop(&self, name: &str) -> BCoreResult { 12 | let func: JsValue = 13 | js_sys::Reflect::get(&self.0, &name.into()).map_err(|e| bcore_err!("{:?}", e))?; 14 | if !func.is_function() { 15 | return Err(format!("{name} is not a function").into()); 16 | } 17 | Ok(func.into()) 18 | } 19 | } 20 | 21 | impl AsFromHost for Host { 22 | fn get_timestamp_millis(&self) -> BCoreResult { 23 | let func = self.get_func_prop("get_timestamp_millis")?; 24 | let res = func.call0(&self.0).map_err(|e| bcore_err!("{:?}", e))?; 25 | let res = res 26 | .as_f64() 27 | .ok_or_else(|| BCoreError::from("expected num"))?; 28 | Ok(res as i64) 29 | } 30 | } 31 | -------------------------------------------------------------------------------- /src/op/random.ts: -------------------------------------------------------------------------------- 1 | import { Ctx } from '../ctx' 2 | import * as D from 'io-ts/Decoder' 3 | import * as MP from '../msgpack/msgpack' 4 | import { pipe } from 'fp-ts/lib/pipeable' 5 | import * as E from 'fp-ts/lib/Either' 6 | import * as KVRandom from '../kv/random' 7 | 8 | // Returns random agents according to the input query. 9 | // Returns any errors, or a messagepack array of signed agent info data. 10 | // @see random as documented under kv. 11 | export async function random( 12 | input: MP.MessagePackData, 13 | ctx: Ctx, 14 | ): Promise { 15 | try { 16 | let result = await pipe( 17 | KVRandom.QuerySafe.decode(input), 18 | E.mapLeft((v) => Error(JSON.stringify(v))), 19 | E.map(async (queryValue) => { 20 | return MP.encode(await KVRandom.random(queryValue, ctx)) 21 | }), 22 | ) 23 | if (E.isLeft(result)) { 24 | return result.left 25 | } else { 26 | return await result.right 27 | } 28 | } catch (e) { 29 | if (e instanceof Error) { 30 | return e 31 | } else { 32 | return new Error(JSON.stringify(e)) 33 | } 34 | } 35 | } 36 | -------------------------------------------------------------------------------- /LICENSE_MIT: -------------------------------------------------------------------------------- 1 | Copyright (c) 2020 Cloudflare, Inc. 2 | 3 | Permission is hereby granted, free of charge, to any 4 | person obtaining a copy of this software and associated 5 | documentation files (the "Software"), to deal in the 6 | Software without restriction, including without 7 | limitation the rights to use, copy, modify, merge, 8 | publish, distribute, sublicense, and/or sell copies of 9 | the Software, and to permit persons to whom the Software 10 | is furnished to do so, subject to the following 11 | conditions: 12 | 13 | The above copyright notice and this permission notice 14 | shall be included in all copies or substantial portions 15 | of the Software. 16 | 17 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF 18 | ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED 19 | TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A 20 | PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT 21 | SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY 22 | CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 23 | OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR 24 | IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 25 | DEALINGS IN THE SOFTWARE. -------------------------------------------------------------------------------- /scripts/set-proxy-pool.cjs: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env node 2 | 3 | // vim: set syntax=javascript: 4 | 5 | const { execSync } = require('child_process') 6 | 7 | function wrangler(env, args) { 8 | for (let i = 0; i < args.length; ++i) { 9 | args[i] = "'" + args[i] + "'" 10 | } 11 | args.push('--binding') 12 | args.push("'BOOTSTRAP'") 13 | args.unshift("'" + env + "'") 14 | args.unshift('-e') 15 | args.unshift("'./node_modules/.bin/wrangler'") 16 | args.unshift('node') 17 | 18 | const cmd = args.join(' ') 19 | console.log(cmd) 20 | return execSync(cmd, { 21 | shell: false, 22 | encoding: 'utf8', 23 | maxBuffer: 1024 * 1024 * 1024, 24 | }) 25 | } 26 | 27 | async function main () { 28 | const env = process.argv[2] 29 | const list = JSON.parse(wrangler(env, ['kv:key', 'list'])) 30 | for (const item of list) { 31 | if (item.name.startsWith('proxy_pool:')) { 32 | console.log(wrangler(env, ['kv:key', 'delete', item.name])) 33 | } 34 | } 35 | for (const poolItem of process.argv.slice(3)) { 36 | console.log(wrangler(env, ['kv:key', 'put', 'proxy_pool:' + poolItem, '1'])) 37 | } 38 | } 39 | 40 | main().then(() => {}, err => { 41 | console.error(err) 42 | process.exit(1) 43 | }) 44 | -------------------------------------------------------------------------------- /src/kv/kv.ts: -------------------------------------------------------------------------------- 1 | import * as Base64 from '../base64/base64' 2 | import * as Kitsune from '../kitsune/kitsune' 3 | import * as D from 'io-ts/Decoder' 4 | import { FixedSizeUint8ArrayDecoderBuilder } from '../io/io' 5 | 6 | // The key _bytes_ are different to the base64 encoded representation. 7 | // The byte representation is simply the space and pubkey bytes concatenated. 8 | // The base64 representation has the space and pubkey separately encoded 9 | // _before_ they are concatenated as two strings. 10 | // @see agentKey() 11 | export const keyLength = Kitsune.spaceLength + Kitsune.agentLength 12 | export const Key = FixedSizeUint8ArrayDecoderBuilder(keyLength) 13 | export type Key = D.TypeOf 14 | 15 | // Constructs a key for a space and agent pair that makes sense for cloudflare 16 | // prefix lookups. 17 | // i.e. concatenates two _separate_ base64 encoded binaries of space/agent 18 | // which is different to the base64 encoding of the concatenated binaries 19 | // i.e. we concatenate the strings to preserve a 'prefix' that matches the space 20 | export function agentKey( 21 | space: Kitsune.Space, 22 | agent: Kitsune.Agent, 23 | ): Base64.Value { 24 | return '' + Base64.fromBytes(space) + Base64.fromBytes(agent) 25 | } 26 | -------------------------------------------------------------------------------- /src/ctx.ts: -------------------------------------------------------------------------------- 1 | export interface WasmHost { 2 | get_timestamp_millis: () => number 3 | } 4 | 5 | export interface BootstrapWasm { 6 | handle_request: ( 7 | kv: KVNamespace, 8 | host: WasmHost, 9 | method: string, 10 | op: string, 11 | net: string, 12 | input: Uint8Array, 13 | ) => Promise<{ 14 | status: number 15 | headers: Array<[string, string]> 16 | body: Uint8Array 17 | }> 18 | 19 | handle_scheduled: (kv: KVNamespace, host: WasmHost) => Promise 20 | } 21 | 22 | export const wasmHost = { 23 | get_timestamp_millis: Date.now.bind(Date), 24 | } 25 | 26 | export class Ctx { 27 | public wasmError?: string = undefined 28 | public wasmHost: WasmHost = wasmHost 29 | public net: string = 'tx2' 30 | 31 | constructor( 32 | public request: Request, 33 | public BOOTSTRAP: KVNamespace, 34 | public bootstrapWasm: BootstrapWasm, 35 | ) {} 36 | 37 | newResponse(bodyInit?: BodyInit | null, maybeInit?: ResponseInit): Response { 38 | maybeInit = maybeInit || {} 39 | const headers = new Headers(maybeInit.headers) 40 | if (this.wasmError) { 41 | headers.append('X-WasmError', this.wasmError) 42 | } 43 | maybeInit.headers = headers 44 | return new Response(bodyInit, maybeInit) 45 | } 46 | } 47 | -------------------------------------------------------------------------------- /src/kv/get.ts: -------------------------------------------------------------------------------- 1 | import { Ctx } from '../ctx' 2 | import * as MP from '../msgpack/msgpack' 3 | import { Key, agentKey } from '../kv/kv' 4 | import * as Kitsune from '../kitsune/kitsune' 5 | import * as E from 'fp-ts/lib/Either' 6 | import { pipe } from 'fp-ts/lib/pipeable' 7 | import { Uint8ArrayDecoder } from '../io/io' 8 | import * as D from 'io-ts/Decoder' 9 | 10 | // Get a single signed agent info by its space+agent key in the kv store. 11 | // Returns: 12 | // - The signed agent info data, as signed by the agent, as messagepack data OR 13 | // - null encoded as messagepack if the key does not exist OR 14 | // - an error if there is some error 15 | export async function get( 16 | key: Key, 17 | ctx: Ctx, 18 | ): Promise { 19 | try { 20 | let space = key.slice(0, Kitsune.spaceLength) 21 | let agent = key.slice(Kitsune.spaceLength) 22 | let pkey = '' 23 | if (ctx.net === 'tx5') { 24 | pkey += 'tx5:' 25 | } 26 | pkey += agentKey(space, agent) 27 | let value = await ctx.BOOTSTRAP.get(pkey, 'arrayBuffer') 28 | // Found values are already messagepack encoded but null won't be so we have to 29 | // manually encode it here. 30 | return value === null ? MP.encode(null) : new Uint8Array(value) 31 | } catch (e) { 32 | if (e instanceof Error) { 33 | return e 34 | } else { 35 | return new Error(JSON.stringify(e)) 36 | } 37 | } 38 | } 39 | -------------------------------------------------------------------------------- /src/request/post.ts: -------------------------------------------------------------------------------- 1 | import { Ctx } from '../ctx' 2 | import { put } from '../op/put' 3 | import { random } from '../op/random' 4 | import { now } from '../op/now' 5 | import * as MP from '../msgpack/msgpack' 6 | 7 | const OP_PUT: string = 'put' 8 | const OP_RANDOM: string = 'random' 9 | const OP_NOW: string = 'now' 10 | 11 | async function handle( 12 | f: (bytes: Uint8Array, ctx: Ctx) => Promise, 13 | input: MP.MessagePackData, 14 | ctx: Ctx, 15 | ): Promise { 16 | // Every f needs to handle messagepack decoding itself so that the deserialized 17 | // object can sanity check itself. 18 | let tryF = await f(input, ctx) 19 | 20 | if (tryF instanceof Error) { 21 | console.error('messagepack input:', input.toString()) 22 | console.error('error:', '' + tryF) 23 | return ctx.newResponse('' + tryF, { status: 500 }) 24 | } 25 | 26 | return ctx.newResponse(tryF) 27 | } 28 | 29 | export async function postHandler( 30 | ctx: Ctx, 31 | op: string, 32 | input: Uint8Array, 33 | ): Promise { 34 | switch (op) { 35 | case OP_PUT: 36 | return handle(put, input, ctx) 37 | case OP_RANDOM: 38 | return handle(random, input, ctx) 39 | case OP_NOW: 40 | return handle(now, input, ctx) 41 | default: 42 | return ctx.newResponse(MP.encode('unknown op'), { status: 500 }) 43 | } 44 | throw new Error('broken dispatch switch') 45 | } 46 | -------------------------------------------------------------------------------- /rust/holochain_bootstrap_core/src/handlers/get_metrics.rs: -------------------------------------------------------------------------------- 1 | use crate::metrics::*; 2 | use crate::types::*; 3 | 4 | const OP_METRICS: &str = "metrics"; 5 | 6 | /// Handler for method: "GET", op: "metrics". 7 | pub struct GetMetrics; 8 | 9 | impl AsRequestHandler for GetMetrics { 10 | fn handles_method(&self) -> &'static str { 11 | super::METHOD_GET 12 | } 13 | 14 | fn handles_op(&self) -> &'static str { 15 | OP_METRICS 16 | } 17 | 18 | fn handle<'a>( 19 | &'a self, 20 | kv: &'a dyn AsKV, 21 | _host: &'a dyn AsFromHost, 22 | input: &'a [u8], 23 | ) -> BCoreFut<'a, BCoreResult> { 24 | bcore_fut(async move { 25 | if !input.is_empty() { 26 | return Err("body must be empty for 'GET/metrics'".into()); 27 | } 28 | 29 | let mut body = alloc::vec::Vec::new(); 30 | body.extend_from_slice( 31 | br#"{ 32 | "header": ["timestamp", "total_agent_count", "total_space_count", "total_proxy_count"], 33 | "data": [ 34 | "#, 35 | ); 36 | 37 | if let Ok(agg) = kv.get(METRICS_AGG).await { 38 | body.extend_from_slice(&agg); 39 | } 40 | 41 | body.extend_from_slice(b"\n ]\n}\n"); 42 | 43 | Ok(HttpResponse { 44 | status: 200, 45 | headers: vec![("content-type".into(), "application/json".into())], 46 | body, 47 | }) 48 | }) 49 | } 50 | } 51 | -------------------------------------------------------------------------------- /src/msgpack/msgpack.ts: -------------------------------------------------------------------------------- 1 | // Wrapper around whatever backend we choose for messagepack handling. 2 | // This allows us to hot-swap the encoding and decoding lib if we need to. 3 | // Includes a io-ts compatiblity wrapper for use in piped data wrangling. 4 | // The exact format of messagepack data produced by the underlying lib is not 5 | // important because the bootstrap service always performs cryptographic 6 | // verification against the exact bytes produced by the signing agent. 7 | // i.e. the service never attempts to 'recreate' messagepack bytes from 8 | // structured data in order to perform cryptographic operations on it. 9 | // All that matters is that we can deserialize what we need to _after_ all 10 | // signatures are determined to be valid, and serialize anything reasonably for 11 | // downstream. 12 | import * as Lib from '@msgpack/msgpack' 13 | import { Uint8ArrayDecoder } from '../io/io' 14 | import * as D from 'io-ts/Decoder' 15 | 16 | export function encode(data: unknown): MessagePackData { 17 | return Lib.encode(data) 18 | } 19 | 20 | export function decode(bytes: Uint8Array): any { 21 | return Lib.decode(bytes) 22 | } 23 | 24 | export const messagePackData = Uint8ArrayDecoder 25 | export type MessagePackData = D.TypeOf 26 | 27 | export const messagePackDecoder: D.Decoder = { 28 | decode: (a: Uint8Array) => { 29 | try { 30 | return D.success(Lib.decode(a)) 31 | } catch (e) { 32 | return D.failure(a, JSON.stringify(e)) 33 | } 34 | }, 35 | } 36 | -------------------------------------------------------------------------------- /test/crypto/crypto.ts: -------------------------------------------------------------------------------- 1 | import * as Crypto from '../../src/crypto/crypto' 2 | import * as Base64 from '../../src/base64/base64' 3 | import { strict as assert } from 'assert' 4 | import * as Agents from '../fixture/agents' 5 | import * as NaCl from 'tweetnacl' 6 | 7 | describe('base64 handling', () => { 8 | it('should convert base64 strings to u8 int arrays', () => { 9 | const base64 = '123ABC==' 10 | const bytes = Uint8Array.from([215, 109, 192, 4]) 11 | 12 | assert.deepEqual(Base64.toBytes(base64), bytes) 13 | }) 14 | }) 15 | 16 | describe('validate signatures', () => { 17 | it('should validate detached messages', () => { 18 | const message = Uint8Array.from([1, 2, 3]) 19 | const validSignature: Crypto.Signature = Crypto.sign( 20 | message, 21 | Agents.alice.secretKey, 22 | ) 23 | const expectedSignature: Crypto.Signature = NaCl.sign.detached( 24 | message, 25 | Agents.alice.secretKey, 26 | ) 27 | assert.deepEqual(validSignature, expectedSignature) 28 | 29 | // Should verify true when everything lines up. 30 | assert.ok(Crypto.verify(message, validSignature, Agents.alice.publicKey)) 31 | 32 | // None of these are valid. 33 | for (let [m, sig, pub] of [ 34 | [Uint8Array.from([1, 2]), validSignature, Agents.alice.publicKey], 35 | [message, validSignature, Agents.bob.publicKey], 36 | [ 37 | message, 38 | Crypto.sign(message, Agents.bob.secretKey), 39 | Agents.alice.publicKey, 40 | ], 41 | ]) { 42 | assert.ok(!Crypto.verify(m, sig, pub)) 43 | } 44 | }) 45 | }) 46 | -------------------------------------------------------------------------------- /.github/workflows/deploy.yml: -------------------------------------------------------------------------------- 1 | name: deploy 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | 8 | jobs: 9 | deploy: 10 | name: deploy 11 | runs-on: ubuntu-latest 12 | strategy: 13 | matrix: 14 | include: 15 | #- cf-env: neonphog 16 | # token: CF_API_TOKEN_NEONPHOG 17 | # proxy-pool: >- 18 | # "kitsune-proxy://SYVd4CF3BdJ4DS7KwLLgeU3_DbHoZ34Y-qroZ79DOs8/kitsune-quic/h/165.22.32.11/p/5779/--" 19 | # "kitsune-proxy://nFCWLsuRC0X31UMv8cJxioL-lBRFQ74UQAsb8qL4XyM/kitsune-quic/h/165.22.32.11/p/5778/--" 20 | # "kitsune-proxy://f3gH2VMkJ4qvZJOXx0ccL_Zo5n-s_CnBjSzAsEHHDCA/kitsune-quic/h/137.184.142.208/p/5788/--" 21 | - cf-env: dev 22 | token: CF_API_TOKEN 23 | proxy-pool: >- 24 | "kitsune-proxy://SYVd4CF3BdJ4DS7KwLLgeU3_DbHoZ34Y-qroZ79DOs8/kitsune-quic/h/165.22.32.11/p/5779/--" 25 | - cf-env: production 26 | token: CF_API_TOKEN 27 | proxy-pool: >- 28 | "kitsune-proxy://SYVd4CF3BdJ4DS7KwLLgeU3_DbHoZ34Y-qroZ79DOs8/kitsune-quic/h/165.22.32.11/p/5779/--" 29 | 30 | steps: 31 | - uses: actions/checkout@v2 32 | 33 | - uses: actions/setup-node@v2 34 | with: 35 | node-version: 18 36 | 37 | - run: npm ci 38 | 39 | - name: Deploy 40 | run: npx wrangler publish -e ${{ matrix.cf-env }} 41 | env: 42 | CF_API_TOKEN: ${{ secrets[matrix.token] }} 43 | 44 | - name: ProxyPool 45 | run: node ./scripts/set-proxy-pool.cjs ${{ matrix.cf-env }} ${{ matrix.proxy-pool }} 46 | env: 47 | CF_API_TOKEN: ${{ secrets[matrix.token] }} 48 | -------------------------------------------------------------------------------- /test/io/io.ts: -------------------------------------------------------------------------------- 1 | import { isRight, isLeft } from 'fp-ts/lib/Either' 2 | import { 3 | Uint8ArrayDecoder, 4 | FixedSizeUint8ArrayDecoderBuilder, 5 | } from '../../src/io/io' 6 | import { strict as assert } from 'assert' 7 | 8 | describe('ts-io', () => { 9 | it('Uint8Array decodes correctly', () => { 10 | for (let decodes of [ 11 | // Uint8Array of any length will decode. 12 | Uint8Array.from([1, 2, 3]), 13 | // Buffer will be decoded to a Uint8Array. 14 | Buffer.from([1, 2, 3]), 15 | ]) { 16 | assert.ok(isRight(Uint8ArrayDecoder.decode(decodes))) 17 | } 18 | 19 | for (let notDecodes of [ 20 | // Obviously bad. 21 | null, 22 | 'foo', 23 | '', 24 | // Arrays are not Uint8Arrays. 25 | [1, 2, 3], 26 | ]) { 27 | assert.ok(isLeft(Uint8ArrayDecoder.decode(notDecodes))) 28 | } 29 | }) 30 | 31 | it('FixedSizeUint8ArrayDecoder decodes correctly', () => { 32 | let decoder = FixedSizeUint8ArrayDecoderBuilder(5) 33 | 34 | for (let decodes of [ 35 | // Both of these need to be exactly 5 bytes. 36 | Uint8Array.from([1, 2, 3, 4, 5]), 37 | Buffer.from([1, 2, 3, 4, 5]), 38 | ]) { 39 | assert.ok(isRight(decoder.decode(decodes))) 40 | } 41 | 42 | for (let notDecodes of [ 43 | // This is not 5 bytes so it won't decode. 44 | Uint8Array.from([1, 2, 3]), 45 | // These can't decode because Uint8Array doesn't decode. 46 | null, 47 | 'foo', 48 | '', 49 | [1, 2, 3], 50 | ]) { 51 | assert.ok(isLeft(decoder.decode(notDecodes))) 52 | } 53 | }) 54 | }) 55 | -------------------------------------------------------------------------------- /src/request/dispatch.ts: -------------------------------------------------------------------------------- 1 | import { Ctx } from '../ctx' 2 | import { postHandler } from './post' 3 | 4 | function opFromPath(url: string): string { 5 | const res = new RegExp('^\\/([^\\/]*)').exec(new URL(url).pathname) 6 | return Array.isArray(res) && typeof res[1] === 'string' ? res[1] : '' 7 | } 8 | 9 | function netFromUrl(url: string): string { 10 | if (new URL(url).searchParams.get('net') === 'tx5') { 11 | return 'tx5' 12 | } else { 13 | return 'tx2' 14 | } 15 | } 16 | 17 | export async function requestDispatch(ctx: Ctx): Promise { 18 | const method = ctx.request.method 19 | const op = ctx.request.headers.get('X-Op') || opFromPath(ctx.request.url) 20 | ctx.net = netFromUrl(ctx.request.url) 21 | const input = new Uint8Array(await ctx.request.arrayBuffer()) 22 | 23 | try { 24 | const response = await ctx.bootstrapWasm.handle_request( 25 | ctx.BOOTSTRAP, 26 | ctx.wasmHost, 27 | method, 28 | op, 29 | ctx.net, 30 | input, 31 | ) 32 | return new Response(response.body, { 33 | status: response.status, 34 | headers: new Headers(response.headers), 35 | }) 36 | } catch (e) { 37 | ctx.wasmError = ('' + e).replace(/\r/g, '').replace(/\n/g, '') 38 | console.error('@wasm:error@', e) 39 | // for now, ignore errors and fall back to legacy logic 40 | } 41 | 42 | if (method === 'POST') { 43 | return postHandler(ctx, op, input) 44 | } 45 | 46 | // Respond with a simple pong for any GET to help with smoke testing. 47 | if (method === 'GET') { 48 | return ctx.newResponse('OK') 49 | } 50 | 51 | return ctx.newResponse('unhandled request', { status: 500 }) 52 | } 53 | -------------------------------------------------------------------------------- /wrangler.toml: -------------------------------------------------------------------------------- 1 | name = "bootstrap-local" 2 | type = "javascript" 3 | zone_id = "" 4 | account_id = "" 5 | route = "" 6 | workers_dev = true 7 | vars = { ENVIRONMENT = "dev" } 8 | compatibility_date = "2022-08-04" 9 | 10 | [triggers] 11 | # trigger scheduled tasks like metrics aggregation 12 | # internally, only one aggregation will happen per hour 13 | # setting this to run at 15 to make sure we're within the hour bucket 14 | crons = ["15 * * * *"] 15 | 16 | [build] 17 | command = "npm ci && npm run build" 18 | [build.upload] 19 | dir = "./dist" 20 | format = "modules" 21 | main = "./cf_worker_entry.js" 22 | [[build.upload.rules]] 23 | globs = ["**/*.js"] 24 | type = "ESModule" 25 | [[build.upload.rules]] 26 | globs = ["**/*.wasm"] 27 | type = "CompiledWasm" 28 | 29 | [env.dev] 30 | name = "bootstrap-dev" 31 | account_id = "18ff2b4e6205b938652998cfca0d8cff" 32 | kv_namespaces = [ 33 | { binding = "BOOTSTRAP", id = "95561bdb2dbc41e8bf6e2975e49a1f6c" } 34 | ] 35 | vars = { ENVIRONMENT = "dev" } 36 | 37 | [env.production] 38 | name = "bootstrap" 39 | workers_dev = false 40 | route = "bootstrap.holo.host/*" 41 | account_id = "18ff2b4e6205b938652998cfca0d8cff" 42 | zone_id = "35f34e8f9d04ef8c87283ea9fb812989" 43 | kv_namespaces = [ 44 | { binding = "BOOTSTRAP", id = "95561bdb2dbc41e8bf6e2975e49a1f6c" } 45 | ] 46 | vars = { ENVIRONMENT = "production" } 47 | 48 | [env.neonphog] 49 | name = "neonphog" 50 | workers_dev = false 51 | route = "worker.neonphog.com/*" 52 | account_id = "2628b4b23d00d785c1d177aaeb9d8ccb" 53 | zone_id = "44b36a22d05cc4dddc9617eeadbd7f33" 54 | kv_namespaces = [ 55 | { binding = "BOOTSTRAP", id = "99df026852624e44b31e7ef004f4e141" } 56 | ] 57 | vars = { ENVIRONMENT = "production" } 58 | -------------------------------------------------------------------------------- /rust/holochain_bootstrap_core/src/handlers/post_proxy_list.rs: -------------------------------------------------------------------------------- 1 | use crate::types::*; 2 | 3 | const OP_PROXY_LIST: &str = "proxy_list"; 4 | 5 | /// Handler for method: "POST", op: "proxy_list". 6 | /// List all entries in the kv with a prefix of "proxy_pool:". 7 | pub struct PostProxyList; 8 | 9 | impl AsRequestHandler for PostProxyList { 10 | fn handles_method(&self) -> &'static str { 11 | super::METHOD_POST 12 | } 13 | 14 | fn handles_op(&self) -> &'static str { 15 | OP_PROXY_LIST 16 | } 17 | 18 | fn handle<'a>( 19 | &'a self, 20 | kv: &'a dyn AsKV, 21 | _host: &'a dyn AsFromHost, 22 | input: &'a [u8], 23 | ) -> BCoreFut<'a, BCoreResult> { 24 | bcore_fut(async move { 25 | if !input.is_empty() { 26 | return Err("body must be empty for 'POST/proxy_list'".into()); 27 | } 28 | 29 | let entries = kv.list(Some(crate::PROXY_PREFIX)).await?; 30 | 31 | let mut enc = msgpackin_core::encode::Encoder::new(); 32 | let mut body = alloc::vec::Vec::new(); 33 | 34 | body.extend_from_slice(&enc.enc_arr_len(entries.len() as u32)); 35 | 36 | for key in entries { 37 | let key = key.into_bytes(); 38 | let key = &key[11..]; 39 | body.extend_from_slice(&enc.enc_str_len(key.len() as u32)); 40 | body.extend_from_slice(key); 41 | } 42 | 43 | Ok(HttpResponse { 44 | status: 200, 45 | headers: vec![("content-type".into(), "application/octet-stream".into())], 46 | body, 47 | }) 48 | }) 49 | } 50 | } 51 | -------------------------------------------------------------------------------- /src/kitsune/kitsune.ts: -------------------------------------------------------------------------------- 1 | import * as Crypto from '../crypto/crypto' 2 | import * as D from 'io-ts/Decoder' 3 | import { Uint8ArrayDecoder, FixedSizeUint8ArrayDecoderBuilder } from '../io/io' 4 | 5 | // kitsuneBin is the concatenation of: 6 | // - 4 byte location 7 | // - 32 byte hash 8 | // Total is 36 bytes. 9 | export const kitsuneBinLength = 36 10 | // If kitsuneBin data has a prefix we need to slice it out. 11 | export const kitsuneBinPrefixLength = 0 12 | // If kitsuneBin data as a suffix we need to slice it out. 13 | export const kitsuneBinSuffixLength = 4 14 | export const Bin = Uint8ArrayDecoder 15 | export type Bin = D.TypeOf 16 | 17 | // kitsuneSpace is a standard kitsuneBin that is the DNA hash for a DHT network. 18 | export const spaceLength = kitsuneBinLength 19 | export const Space = FixedSizeUint8ArrayDecoderBuilder(spaceLength) 20 | export type Space = D.TypeOf 21 | 22 | // kitsuneAgent is a standard kitsuneBin that is the agent public key. 23 | export const agentLength = kitsuneBinLength 24 | export const Agent = FixedSizeUint8ArrayDecoderBuilder(agentLength) 25 | export type Agent = D.TypeOf 26 | 27 | // kitsuneSignature is a non-standard kitsuneBin. 28 | // It is 64 literal bytes for an Ed25519 signature WITHOUT location bytes. 29 | export const signatureLength = Crypto.signatureLength 30 | export const Signature = FixedSizeUint8ArrayDecoderBuilder(signatureLength) 31 | export type Signature = D.TypeOf 32 | 33 | // Extracting the public key from an Agent means stripping the additional 34 | // location bytes and hash prefix. 35 | export const toPublicKey = (bin: Bin): Uint8Array => 36 | bin.slice(kitsuneBinPrefixLength, -kitsuneBinSuffixLength) 37 | -------------------------------------------------------------------------------- /src/kv/list.ts: -------------------------------------------------------------------------------- 1 | import { Ctx } from '../ctx' 2 | import * as Kitsune from '../kitsune/kitsune' 3 | import * as Base64 from '../base64/base64' 4 | import { pipe } from 'fp-ts/lib/pipeable' 5 | import * as MP from '../msgpack/msgpack' 6 | import * as E from 'fp-ts/lib/Either' 7 | import { Uint8ArrayDecoder } from '../io/io' 8 | import * as D from 'io-ts/Decoder' 9 | 10 | // Restores a pubkey given a base64 prefix 11 | function agentFromKey(prefix: Base64.Value, key: string): Kitsune.Agent { 12 | if (key.indexOf(prefix) === 0) { 13 | return Base64.toBytes(key.slice(prefix.length)) 14 | } 15 | throw new Error(`${prefix} prefix not found at start of key ${key}`) 16 | } 17 | 18 | // Paginates through the kv list API using the space as a prefix. 19 | // Returns all pubkeys for all agents currently registered in the space. 20 | export async function list( 21 | space: Kitsune.Space, 22 | ctx: Ctx, 23 | ): Promise> { 24 | let prefix = '' 25 | if (ctx.net === 'tx5') { 26 | prefix += 'tx5:' 27 | } 28 | prefix += Base64.fromBytes(space) 29 | let keys: any[] = [] 30 | let more = true 31 | let cursor 32 | 33 | while (more) { 34 | let options: { 35 | prefix: string 36 | cursor: any 37 | } = { 38 | prefix: prefix, 39 | cursor: undefined, 40 | } 41 | if (cursor) { 42 | options.cursor = cursor 43 | } 44 | 45 | // This comes from cloudflare in the kv binding. 46 | const list = await ctx.BOOTSTRAP.list(options) 47 | 48 | if (list.list_complete) { 49 | more = false 50 | } else { 51 | more = true 52 | cursor = list.cursor 53 | } 54 | 55 | keys = keys.concat(list.keys.map((k) => agentFromKey(prefix, k.name))) 56 | } 57 | return keys 58 | } 59 | -------------------------------------------------------------------------------- /test/agent_info/signed.ts: -------------------------------------------------------------------------------- 1 | import { Urls, AgentInfo, AgentInfoPacked } from '../../src/agent_info/info' 2 | import { 3 | AgentInfoSignedRaw, 4 | AgentInfoSignedRawSafe, 5 | AgentInfoSignedSafe, 6 | } from '../../src/agent_info/signed' 7 | import * as Kitsune from '../../src/kitsune/kitsune' 8 | import * as Agents from '../fixture/agents' 9 | import { strict as assert } from 'assert' 10 | import * as MP from '../../src/msgpack/msgpack' 11 | import { isRight, isLeft } from 'fp-ts/lib/Either' 12 | import * as _ from 'lodash' 13 | import * as Crypto from '../../src/crypto/crypto' 14 | 15 | describe('agent info signed', () => { 16 | it('should decode AgentInfoSignedRaw correctly', () => { 17 | // Round tripts must work. 18 | assert.deepEqual( 19 | MP.decode(MP.encode(Agents.aliceAgentVaporSignedRaw)), 20 | Agents.aliceAgentVaporSignedRaw, 21 | ) 22 | 23 | assert.ok( 24 | isRight( 25 | AgentInfoSignedRawSafe.decode( 26 | MP.encode(Agents.aliceAgentVaporSignedRaw), 27 | ), 28 | ), 29 | ) 30 | 31 | // any bad signature must not be valid 32 | let badSignature = _.cloneDeep(Agents.aliceAgentVaporSignedRaw) 33 | badSignature.signature = new Uint8Array(Array(36)).fill(1) 34 | assert.ok(isLeft(AgentInfoSignedRawSafe.decode(MP.encode(badSignature)))) 35 | }) 36 | 37 | it('should decode agentInfoSigned correctly', () => { 38 | assert.ok( 39 | isRight( 40 | AgentInfoSignedSafe.decode(MP.encode(Agents.aliceAgentVaporSignedRaw)), 41 | ), 42 | ) 43 | 44 | // bob must not be allowed to sign alice's info 45 | let bobSignedAlice = _.cloneDeep(Agents.aliceAgentVaporSignedRaw) 46 | bobSignedAlice.signature = Crypto.sign( 47 | bobSignedAlice.agent_info, 48 | Agents.bob.secretKey, 49 | ) 50 | assert.ok(isLeft(AgentInfoSignedSafe.decode(MP.encode(bobSignedAlice)))) 51 | }) 52 | }) 53 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM alpine:3.12 2 | 3 | # RUN mkdir -p /worker 4 | # 5 | # WORKDIR /worker 6 | # 7 | # # install node on alpine 8 | # # adapted from https://github.com/alpine-docker/node/blob/master/Dockerfile 9 | # 10 | # # pin params 11 | # ENV VERSION=v14.13.1 NPM_VERSION=6.14.8 12 | # # For base builds 13 | # ENV CONFIG_FLAGS="--fully-static" DEL_PKGS="libstdc++" RM_DIRS=/usr/include 14 | # 15 | # # linux deps 16 | # RUN apk add --no-cache curl gnupg python3 gcc make libstdc++ g++ binutils-gold linux-headers 17 | # 18 | # # node dev pub keys 19 | # RUN for server in ipv4.pool.sks-keyservers.net keyserver.pgp.com ha.pool.sks-keyservers.net; \ 20 | # do \ 21 | # gpg --keyserver $server --recv-keys \ 22 | # 4ED778F539E3634C779C87C6D7062848A1AB005C \ 23 | # 94AE36675C464D64BAFA68DD7434390BDBE9B9C5 \ 24 | # FD3A5288F042B6850C66B31F09FE44734EB7990E \ 25 | # 71DCFD284A79C3B38668286BC97EC7A07EDE3FC1 \ 26 | # DD8F2338BAE7501E3DD5AC78C273792F7D83545D \ 27 | # C4F0DFFF4E8C1A8236409D08E73BC641CC11F4C8 \ 28 | # B9AE9905FFD7803F25714661B63B535A4C206CA9 \ 29 | # 56730D5401028683275BD23C23EFEFE93C4CFFFE \ 30 | # 77984A986EBC2AA786BC0F66B01FBB92821C587A && break; \ 31 | # done 32 | # 33 | # # fetch node tarball 34 | # RUN curl -sfSLO https://nodejs.org/dist/${VERSION}/node-${VERSION}.tar.xz 35 | # 36 | # # verify node tarball 37 | # RUN curl -sfSL https://nodejs.org/dist/${VERSION}/SHASUMS256.txt.asc | gpg --batch --decrypt | grep " node-${VERSION}.tar.xz\$" | sha256sum -c | grep ': OK$' 38 | # 39 | # # extract node tarball 40 | # RUN tar -xf node-${VERSION}.tar.xz 41 | # 42 | # # build node 43 | # WORKDIR node-${VERSION} 44 | # RUN ./configure --prefix=/usr ${CONFIG_FLAGS} \ 45 | # && make -j$(getconf _NPROCESSORS_ONLN) \ 46 | # && make install 47 | # RUN npm install -g npm@${NPM_VERSION} 48 | # 49 | # WORKDIR /worker 50 | # RUN rm node-${VERSION}.tar.xz 51 | # 52 | # ADD . . 53 | # RUN npm install @cloudflare/wrangler 54 | -------------------------------------------------------------------------------- /test/kitsune/kitsune.ts: -------------------------------------------------------------------------------- 1 | import { isRight, isLeft } from 'fp-ts/lib/Either' 2 | import * as Kitsune from '../../src/kitsune/kitsune' 3 | import { strict as assert } from 'assert' 4 | 5 | describe('kitsune ts-io', () => { 6 | it('KitsuneBin decodes correctly', () => { 7 | // Any number of bytes is a valid KitsuneBin. 8 | assert.ok(isRight(Kitsune.Bin.decode(Uint8Array.from(Array(5))))) 9 | }) 10 | 11 | it('KitsuneSpace decodes correctly', () => { 12 | // KitsuneSpace must be the correct length. 13 | assert.ok( 14 | isRight( 15 | Kitsune.Space.decode(Uint8Array.from(Array(Kitsune.spaceLength))), 16 | ), 17 | ) 18 | 19 | assert.ok( 20 | isLeft( 21 | Kitsune.Space.decode(Uint8Array.from(Array(Kitsune.spaceLength - 1))), 22 | ), 23 | ) 24 | assert.ok( 25 | isLeft( 26 | Kitsune.Space.decode(Uint8Array.from(Array(Kitsune.spaceLength + 1))), 27 | ), 28 | ) 29 | }) 30 | 31 | it('KitsuneAgent decodes correctly', () => { 32 | // KitsuneAgent must be the correct length. 33 | assert.ok( 34 | isRight( 35 | Kitsune.Agent.decode(Uint8Array.from(Array(Kitsune.agentLength))), 36 | ), 37 | ) 38 | 39 | assert.ok( 40 | isLeft( 41 | Kitsune.Agent.decode(Uint8Array.from(Array(Kitsune.agentLength - 1))), 42 | ), 43 | ) 44 | assert.ok( 45 | isLeft( 46 | Kitsune.Agent.decode(Uint8Array.from(Array(Kitsune.agentLength + 1))), 47 | ), 48 | ) 49 | }) 50 | 51 | it('KitsuneSignature decodes correctly', () => { 52 | // KitsuneSignature must be the correct length. 53 | assert.ok( 54 | isRight( 55 | Kitsune.Signature.decode( 56 | Uint8Array.from(Array(Kitsune.signatureLength)), 57 | ), 58 | ), 59 | ) 60 | 61 | // The normal kitsuneBin length does NOT work for signatures. 62 | assert.ok(isLeft(Kitsune.Signature.decode(Uint8Array.from(Array(39))))) 63 | }) 64 | }) 65 | -------------------------------------------------------------------------------- /webpack.config.cjs: -------------------------------------------------------------------------------- 1 | // vim: set syntax=javascript: 2 | 3 | const path = require('path') 4 | const webpack = require('webpack') 5 | const CopyPlugin = require('copy-webpack-plugin') 6 | 7 | let mode = 'production' 8 | let devtool = false 9 | if (process.env.NODE_ENV === 'development') { 10 | mode = 'development' 11 | devtool = 'inline-source-map' 12 | } 13 | 14 | const wasmBuild = path.join(__dirname, 'rust', 'target', 'wasm-build') 15 | const scripts = path.join(__dirname, 'scripts') 16 | 17 | module.exports = { 18 | mode, 19 | devtool, 20 | target: 'web', 21 | output: { 22 | publicPath: './', 23 | module: true, 24 | filename: `worker.js`, 25 | path: path.join(__dirname, 'dist'), 26 | library: { 27 | type: 'module', 28 | }, 29 | }, 30 | resolve: { 31 | extensions: ['.ts', '.tsx', '.mjs', '.js'], 32 | plugins: [], 33 | fallback: { 'crypto': false }, 34 | }, 35 | plugins: [ 36 | new CopyPlugin({ 37 | patterns: [ 38 | { 39 | from: path.join(wasmBuild, 'holochain_bootstrap_wasm_bg.wasm'), 40 | to: 'holochain_bootstrap_wasm_bg.wasm', 41 | }, 42 | { 43 | from: path.join(wasmBuild, 'holochain_bootstrap_wasm_bg.js'), 44 | to: 'holochain_bootstrap_wasm_bg.js', 45 | }, 46 | { 47 | from: path.join(wasmBuild, 'holochain_bootstrap_wasm_export.js'), 48 | to: 'holochain_bootstrap_wasm_export.js', 49 | }, 50 | { 51 | from: path.join(scripts, 'cf_worker_entry.js'), 52 | to: 'cf_worker_entry.js', 53 | }, 54 | ], 55 | }) 56 | ], 57 | module: { 58 | rules: [ 59 | { 60 | test: /\.tsx?$/, 61 | loader: 'ts-loader', 62 | options: { 63 | // transpileOnly is useful to skip typescript checks occasionally: 64 | // transpileOnly: true, 65 | }, 66 | }, 67 | ], 68 | }, 69 | experiments: { 70 | outputModule: true, 71 | }, 72 | } 73 | -------------------------------------------------------------------------------- /rust/holochain_bootstrap_core/src/dispatcher.rs: -------------------------------------------------------------------------------- 1 | use crate::types::*; 2 | 3 | use alloc::boxed::Box; 4 | use alloc::string::ToString; 5 | use alloc::vec::Vec; 6 | 7 | /// Maps request method/ops to correct handlers 8 | pub struct HandlerDispatcher { 9 | // reference to KV store 10 | kv: Box, 11 | 12 | // reference to host interface 13 | host: Box, 14 | 15 | // rather than using a true map type, we shouldn't have 16 | // very many handlers, so it results in smaller wasm 17 | // and not bad performance to just search them each time. 18 | map: Vec>, 19 | } 20 | 21 | impl HandlerDispatcher { 22 | /// construct a new handler dispatcher 23 | pub fn new(kv: KV, host: H) -> Self { 24 | let kv: Box = Box::new(kv); 25 | let host: Box = Box::new(host); 26 | Self { 27 | kv, 28 | host, 29 | map: Vec::new(), 30 | } 31 | } 32 | 33 | /// attach an additional handler instance to this dispatcher 34 | pub fn attach_handler(&mut self, h: H) { 35 | let h: Box = Box::new(h); 36 | self.map.push(h); 37 | } 38 | 39 | /// dispatch a request to appropriate handler and return response 40 | pub async fn handle(&self, method: &str, op: &str, input: &[u8]) -> BCoreResult { 41 | let Self { kv, host, map } = self; 42 | 43 | for h in map.iter() { 44 | if h.handles_method() != method { 45 | continue; 46 | } 47 | if h.handles_op() != op { 48 | continue; 49 | } 50 | return h.handle(&**kv, &**host, input).await; 51 | } 52 | 53 | Err(BCoreError::EBadOp { 54 | method: method.to_string(), 55 | op: op.to_string(), 56 | }) 57 | } 58 | } 59 | -------------------------------------------------------------------------------- /src/kv/random.ts: -------------------------------------------------------------------------------- 1 | import { Ctx } from '../ctx' 2 | import * as D from 'io-ts/Decoder' 3 | import * as E from 'fp-ts/lib/Either' 4 | import * as Kitsune from '../kitsune/kitsune' 5 | import * as MP from '../msgpack/msgpack' 6 | import * as IO from '../io/io' 7 | import * as List from './list' 8 | import * as Get from './get' 9 | import { pipe } from 'fp-ts/lib/pipeable' 10 | 11 | // One query for random agents for a specific space. 12 | // Up to `limit` agents will be returned without pagination. 13 | export const Query = D.type({ 14 | space: Kitsune.Space, 15 | limit: D.number, 16 | }) 17 | export type Query = D.TypeOf 18 | 19 | // Decoded query from messagepack data. 20 | export const QuerySafe: D.Decoder = { 21 | decode: (a: MP.MessagePackData) => { 22 | return pipe( 23 | IO.Uint8ArrayDecoder.decode(a), 24 | E.chain((value) => MP.messagePackDecoder.decode(value)), 25 | E.chain((value) => Query.decode(value)), 26 | ) 27 | }, 28 | } 29 | 30 | // Shuffle with a lazy generator because the list may be very long internally. 31 | // Allows us to stop shuffling after `limit` entries have been returned. 32 | function* shuffle(array: any) { 33 | var i = array.length 34 | while (i--) { 35 | yield array.splice(Math.floor(Math.random() * (i + 1)), 1)[0] 36 | } 37 | } 38 | 39 | export async function random( 40 | query: Query, 41 | ctx: Ctx, 42 | ): Promise> { 43 | let { space, limit } = query 44 | // Need to be random over the complete list for the whole space even if we use 45 | // a generator to shuffle, otherwise we won't ever return agents after the 46 | // first page (1000 items on cloudflare). 47 | let everyone = shuffle(await List.list(space, ctx)) 48 | let keys = [] 49 | let i = 0 50 | let k: Kitsune.Agent 51 | while (i < limit) { 52 | k = everyone.next().value 53 | if (k) { 54 | keys[i] = k 55 | i++ 56 | } else { 57 | break 58 | } 59 | } 60 | return await Promise.all( 61 | keys.map((k) => Get.get(Uint8Array.from([...space, ...k]), ctx)), 62 | ) 63 | } 64 | -------------------------------------------------------------------------------- /src/crypto/crypto.ts: -------------------------------------------------------------------------------- 1 | import * as D from 'io-ts/Decoder' 2 | import * as NaCl from 'tweetnacl' 3 | import { FixedSizeUint8ArrayDecoderBuilder, Uint8ArrayDecoder } from '../io/io' 4 | 5 | // Defer to tweetnacl for signing pubkey length. 6 | export const publicKeyLength: number = NaCl.sign.publicKeyLength 7 | export const PublicKey = FixedSizeUint8ArrayDecoderBuilder(publicKeyLength) 8 | export type PublicKey = D.TypeOf 9 | 10 | // Seed is for deterministic secret generation in bytes. 11 | // Strongly recommended to not use this directly without some kind of key 12 | // stretching algorithm, e.g. scrypt or argon2id. 13 | export const seedLength: number = NaCl.sign.seedLength 14 | export const Seed = FixedSizeUint8ArrayDecoderBuilder(seedLength) 15 | export type Seed = D.TypeOf 16 | 17 | // Defer to tweetnacl for signing private key length. 18 | // This should never be used outside of testing because we only want to verify 19 | // signatures in production, never store private keys or sign anything. 20 | // @todo this may change in the future if the bootstrap service is expected to 21 | // sign its own responses to ops for agent-centric auditing. 22 | export const secretKeyLength: number = NaCl.sign.secretKeyLength 23 | export const SecretKey = FixedSizeUint8ArrayDecoderBuilder(secretKeyLength) 24 | export type SecretKey = D.TypeOf 25 | 26 | // Defer to tweetnacl for the length of a signature. 27 | export const signatureLength: number = NaCl.sign.signatureLength 28 | export const Signature = FixedSizeUint8ArrayDecoderBuilder(signatureLength) 29 | export type Signature = D.TypeOf 30 | 31 | // Messages can be any length but they must be binary data. 32 | export const Message = Uint8ArrayDecoder 33 | export type Message = D.TypeOf 34 | 35 | // Sign a message. 36 | // NOT used by the server but useful for testing. 37 | export function sign(message: Message, secret: SecretKey): Signature { 38 | return NaCl.sign.detached(message, secret) 39 | } 40 | 41 | // Verify a message. 42 | // The main workhorse for server security. 43 | // This is the only cryptography used in production. 44 | export function verify( 45 | message: Message, 46 | signature: Signature, 47 | pubkey: PublicKey, 48 | ): boolean { 49 | return NaCl.sign.detached.verify(message, signature, pubkey) 50 | } 51 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "holochain-bootstrap", 3 | "version": "1.0.0", 4 | "description": "holochain bootstrap cloudflare worker", 5 | "main": "dist/worker.js", 6 | "scripts": { 7 | "start": "node --experimental-vm-modules ./scripts/run-mininet.mjs", 8 | "rust:fmt": "cd rust && cargo fmt -- --check", 9 | "rust:clippy": "cd rust && cargo clippy", 10 | "rust:test": "cd rust && cargo test", 11 | "rust:build": "node ./scripts/run-build-worker.cjs", 12 | "build": "npm run rust:build && webpack", 13 | "dev": "NODE_ENV=development npm run build", 14 | "format": "prettier --write 'src/**/*.ts' 'test/**/*.ts'", 15 | "format:check": "prettier --check 'src/**/*.ts' 'test/**/*.ts'", 16 | "test:rust": "npm run rust:fmt && npm run rust:clippy && npm run rust:test", 17 | "test:unit": "npx ts-mocha 'test/**/*.ts'", 18 | "test:integration": "webpack -c test-webpack.config.cjs && node --experimental-vm-modules ./scripts/run-integration-test.mjs", 19 | "test": "npm run test:rust && npm run format:check && npm run test:unit && npm run test:integration" 20 | }, 21 | "author": "author", 22 | "license": "MIT OR Apache-2.0", 23 | "type": "module", 24 | "devDependencies": { 25 | "@cloudflare/workers-types": "^4.20221111.1", 26 | "wrangler": "^2.6.2", 27 | "@types/chai": "^4.3.0", 28 | "@types/lodash": "^4.14.178", 29 | "@types/mocha": "^9.0.0", 30 | "@types/node": "^17.0.10", 31 | "@types/service-worker-mock": "^2.0.1", 32 | "@typescript-eslint/eslint-plugin": "^5.10.0", 33 | "@typescript-eslint/parser": "^5.10.0", 34 | "chai": "^4.3.4", 35 | "copy-webpack-plugin": "^10.2.1", 36 | "eslint": "^7.32.0", 37 | "eslint-config-standard": "^16.0.3", 38 | "eslint-plugin-import": "^2.25.4", 39 | "eslint-plugin-node": "^11.1.0", 40 | "eslint-plugin-promise": "^5.2.0", 41 | "miniflare": "^2.11.0", 42 | "mocha": "^10.2.0", 43 | "node-fetch": "^3.3.0", 44 | "prettier": "^2.8.1", 45 | "rimraf": "^3.0.2", 46 | "service-worker-mock": "^2.0.5", 47 | "ts-loader": "^9.4.2", 48 | "ts-mocha": "^10.0.0", 49 | "ts-node": "^10.9.1", 50 | "typescript": "^4.9.4", 51 | "webpack": "^5.75.0", 52 | "webpack-cli": "^5.0.1" 53 | }, 54 | "dependencies": { 55 | "@msgpack/msgpack": "^2.8.0", 56 | "base64-js": "^1.5.1", 57 | "fp-ts": "^2.13.1", 58 | "io-ts": "^2.2.20", 59 | "lodash": "^4.17.21", 60 | "tweetnacl": "^1.0.3" 61 | } 62 | } 63 | -------------------------------------------------------------------------------- /rust/holochain_bootstrap_core/src/handlers/post_trigger_scheduled.rs: -------------------------------------------------------------------------------- 1 | use crate::types::*; 2 | 3 | const OP_TRIGGER_SCHEDULED: &str = "trigger_scheduled"; 4 | const OP_TRIGGER_SCHEDULED_FORCE: &str = "trigger_scheduled_force"; 5 | 6 | /// Handler for method: "POST", op: "trigger_scheduled". 7 | /// Manually trigger the "scheduled" cron event 8 | pub struct PostTriggerScheduled; 9 | 10 | impl AsRequestHandler for PostTriggerScheduled { 11 | fn handles_method(&self) -> &'static str { 12 | super::METHOD_POST 13 | } 14 | 15 | fn handles_op(&self) -> &'static str { 16 | OP_TRIGGER_SCHEDULED 17 | } 18 | 19 | fn handle<'a>( 20 | &'a self, 21 | kv: &'a dyn AsKV, 22 | host: &'a dyn AsFromHost, 23 | input: &'a [u8], 24 | ) -> BCoreFut<'a, BCoreResult> { 25 | bcore_fut(async move { 26 | if !input.is_empty() { 27 | return Err("body must be empty for 'POST/trigger_scheduled'".into()); 28 | } 29 | 30 | let res = crate::exec_scheduled(kv, host, false).await?; 31 | 32 | Ok(HttpResponse { 33 | status: 200, 34 | headers: vec![("content-type".into(), "text/plain".into())], 35 | body: res.into_bytes(), 36 | }) 37 | }) 38 | } 39 | } 40 | 41 | /// Handler for method: "POST", op: "trigger_scheduled_force". 42 | /// Manually trigger the "scheduled" cron event 43 | pub struct PostTriggerScheduledForce; 44 | 45 | impl AsRequestHandler for PostTriggerScheduledForce { 46 | fn handles_method(&self) -> &'static str { 47 | super::METHOD_POST 48 | } 49 | 50 | fn handles_op(&self) -> &'static str { 51 | OP_TRIGGER_SCHEDULED_FORCE 52 | } 53 | 54 | fn handle<'a>( 55 | &'a self, 56 | kv: &'a dyn AsKV, 57 | host: &'a dyn AsFromHost, 58 | input: &'a [u8], 59 | ) -> BCoreFut<'a, BCoreResult> { 60 | bcore_fut(async move { 61 | if !input.is_empty() { 62 | return Err("body must be empty for 'POST/trigger_scheduled_force'".into()); 63 | } 64 | 65 | let res = crate::exec_scheduled(kv, host, true).await?; 66 | 67 | Ok(HttpResponse { 68 | status: 200, 69 | headers: vec![("content-type".into(), "text/plain".into())], 70 | body: res.into_bytes(), 71 | }) 72 | }) 73 | } 74 | } 75 | -------------------------------------------------------------------------------- /scripts/run-integration-test.mjs: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env -S node --experimental-vm-modules 2 | 3 | // vim: set syntax=javascript: 4 | 5 | import { spawn } from 'child_process' 6 | import { Miniflare, Log, LogLevel } from 'miniflare' 7 | 8 | let server = null 9 | 10 | async function main() { 11 | console.log('@@@@@-start') 12 | 13 | const mf = new Miniflare({ 14 | log: new Log(LogLevel.INFO), 15 | kvNamespaces: ['BOOTSTRAP'], 16 | wranglerConfigPath: true, 17 | wranglerConfigEnv: 'local', 18 | host: '127.0.0.1', 19 | port: 8787 20 | }) 21 | 22 | const ns = await mf.getKVNamespace('BOOTSTRAP') 23 | await ns.put('proxy_pool:https://test.holo.host/this/is/a/test?noodle=true', '1') 24 | await ns.put('proxy_pool:https://test2.holo.host/another/test/this/is?a=b#yada', '1') 25 | 26 | server = await mf.startServer() 27 | 28 | console.log('@@@@@-test') 29 | 30 | await execTest() 31 | 32 | console.log('@@@@@-done') 33 | } 34 | 35 | function execTest() { 36 | return new Promise((resolve, reject) => { 37 | try { 38 | const proc = spawn('./node_modules/.bin/mocha', ['test-dist/integration.cjs'], { 39 | shell: false, 40 | stdio: ['pipe', 'inherit', 'inherit'], 41 | }) 42 | 43 | // close the sub-process stdin 44 | proc.stdin.end() 45 | 46 | // set up events to handle exit conditions 47 | proc.on('close', (code) => { 48 | console.log('[test-exec]: npm test closed', code) 49 | if (typeof code === 'number' && code === 0) { 50 | resolve() 51 | } else { 52 | reject(new Error('npm test bad exit code: ' + code)) 53 | } 54 | }) 55 | proc.on('disconnect', () => { 56 | console.log('[test-exec]: npm test disconnected') 57 | reject(new Error('npm test disconnect')) 58 | }) 59 | proc.on('exit', (code) => { 60 | console.log('[test-exec]: npm test exited', code) 61 | if (typeof code === 'number' && code === 0) { 62 | resolve() 63 | } else { 64 | reject(new Error('npm test bad exit code: ' + code)) 65 | } 66 | }) 67 | proc.on('error', (err) => { 68 | console.error('[test-exec]: npm test errored', err) 69 | reject(err) 70 | }) 71 | } catch (e) { 72 | reject(e) 73 | } 74 | }) 75 | } 76 | 77 | // entrypoint 78 | main().then( 79 | () => { 80 | if (server) { 81 | server.close() 82 | } 83 | console.error('[test-exec]: done') 84 | }, 85 | (err) => { 86 | if (server) { 87 | server.close() 88 | } 89 | console.error('[test-exec]:', err) 90 | process.exitCode = 1 91 | }, 92 | ) 93 | -------------------------------------------------------------------------------- /src/kv/put.ts: -------------------------------------------------------------------------------- 1 | import { Ctx } from '../ctx' 2 | import * as MP from '../msgpack/msgpack' 3 | import * as AgentInfo from '../agent_info/info' 4 | import * as AgentSigned from '../agent_info/signed' 5 | import * as KV from './kv' 6 | import * as D from 'io-ts/Decoder' 7 | import * as E from 'fp-ts/lib/Either' 8 | import { pipe } from 'fp-ts/lib/pipeable' 9 | 10 | // The maximum number of milliseconds that agent info will be held by the 11 | // bootstrap service. 12 | // Equal to 1 hour. 13 | export const MAX_HOLD = 60 * 60 * 1000 14 | 15 | // Store an AgentInfoSignedRaw under the relevant key. 16 | // Returns error if the AgentInfoSignedRaw does not decode to a safe AgentInfo. 17 | // This includes validation issues such as invalid cryptographic signatures. 18 | // Returns null if everything works and the put is successful. 19 | export async function put( 20 | agentInfoSignedRawData: MP.MessagePackData, 21 | ctx: Ctx, 22 | ): Promise> { 23 | try { 24 | let doPut = async ( 25 | agentInfoSigned: AgentSigned.AgentInfoSigned, 26 | ): Promise> => { 27 | try { 28 | let key = '' 29 | if (ctx.net === 'tx5') { 30 | key += 'tx5:' 31 | } 32 | key += KV.agentKey( 33 | agentInfoSigned.agent_info.space, 34 | agentInfoSigned.agent_info.agent, 35 | ) 36 | let value = agentInfoSignedRawData 37 | // Info expires relative to the time they were signed to enforce that agents 38 | // produce freshly signed info for each put. 39 | // Agents MUST explicitly set an expiry time relative to their signature time. 40 | let expires = Math.min( 41 | Math.floor( 42 | (agentInfoSigned.agent_info.expires_after_ms + 43 | agentInfoSigned.agent_info.signed_at_ms) / 44 | 1000, 45 | ), 46 | Date.now() + MAX_HOLD, 47 | ) 48 | 49 | // Cloudflare binds this global to the kv store. 50 | await ctx.BOOTSTRAP.put(key, value, { expiration: expires }) 51 | return E.right(null) 52 | } catch (e) { 53 | if (e instanceof Error) { 54 | return E.left(e) 55 | } else { 56 | return E.left(new Error(JSON.stringify(e))) 57 | } 58 | } 59 | } 60 | 61 | let res: E.Either>> = pipe( 62 | AgentSigned.AgentInfoSignedSafe.decode(agentInfoSignedRawData), 63 | E.mapLeft((v) => new Error(JSON.stringify(v))), 64 | E.map(async (agentInfoSignedValue) => { 65 | return await doPut(agentInfoSignedValue) 66 | }), 67 | ) 68 | 69 | if (E.isLeft(res)) { 70 | return res 71 | } else { 72 | return await res.right 73 | } 74 | } catch (e) { 75 | if (e instanceof Error) { 76 | return E.left(e) 77 | } else { 78 | return E.left(new Error(JSON.stringify(e))) 79 | } 80 | } 81 | } 82 | -------------------------------------------------------------------------------- /rust/holochain_bootstrap_wasm/src/lib.rs: -------------------------------------------------------------------------------- 1 | #![no_std] 2 | #![allow(clippy::unused_unit)] // doesn't pick up #[wasm_bindgen]... 3 | #![deny(unsafe_code)] 4 | #![deny(missing_docs)] 5 | #![deny(warnings)] 6 | //! Holochain Bootstrap Code Cloudflare Typescript / Rust Wasm FFI Bindings 7 | 8 | #[macro_use] 9 | extern crate alloc; 10 | 11 | use holochain_bootstrap_core::types::*; 12 | use holochain_bootstrap_core::*; 13 | use wasm_bindgen::prelude::*; 14 | use wasm_bindgen::JsCast; 15 | 16 | /// Generic Javascript Result Type 17 | pub type JsResult = core::result::Result; 18 | 19 | mod kv; 20 | use kv::*; 21 | 22 | mod host; 23 | use host::*; 24 | 25 | /// Handle a scheduled event 26 | #[wasm_bindgen] 27 | pub async fn handle_scheduled(kv: JsValue, host: JsValue) -> JsResult<()> { 28 | let kv = KV::new(kv)?; 29 | let host = Host::new(host)?; 30 | exec_scheduled(&kv, &host, false) 31 | .await 32 | .map_err(|e| format!("{e:?}").into()) 33 | .map(|_| ()) 34 | } 35 | 36 | /// Handle an incoming request building up a response 37 | #[wasm_bindgen] 38 | pub async fn handle_request( 39 | kv: JsValue, 40 | host: JsValue, 41 | method: JsValue, 42 | op: JsValue, 43 | _net: JsValue, 44 | input: JsValue, 45 | ) -> JsResult { 46 | let kv = KV::new(kv)?; 47 | let host = Host::new(host)?; 48 | let mut dispatch = HandlerDispatcher::new(kv, host); 49 | dispatch.attach_handler(handlers::GetMetrics); 50 | dispatch.attach_handler(handlers::PostPut); 51 | dispatch.attach_handler(handlers::PostProxyList); 52 | dispatch.attach_handler(handlers::PostTriggerScheduled); 53 | dispatch.attach_handler(handlers::PostTriggerScheduledForce); 54 | 55 | let method = method 56 | .as_string() 57 | .ok_or_else(|| JsValue::from(format!("expect method as string: {method:?}")))?; 58 | let op = op 59 | .as_string() 60 | .ok_or_else(|| JsValue::from(format!("expect op as string: {op:?}")))?; 61 | if !input.is_instance_of::() { 62 | return Err("input must be a Uint8Array".into()); 63 | } 64 | let input = js_sys::Uint8Array::from(input).to_vec(); 65 | 66 | match dispatch.handle(&method, &op, &input).await { 67 | Ok(res) => { 68 | let out = js_sys::Object::new(); 69 | let status = res.status.into(); 70 | js_sys::Reflect::set(&out, &"status".into(), &status)?; 71 | let headers = js_sys::Array::new_with_length(res.headers.len() as u32); 72 | for (i, (key, val)) in res.headers.iter().enumerate() { 73 | let item = js_sys::Array::new_with_length(2); 74 | item.set(0, key.into()); 75 | item.set(1, val.into()); 76 | headers.set(i as u32, item.into()); 77 | } 78 | let headers = headers.into(); 79 | js_sys::Reflect::set(&out, &"headers".into(), &headers)?; 80 | let body = js_sys::Uint8Array::from(res.body.as_slice()).into(); 81 | js_sys::Reflect::set(&out, &"body".into(), &body)?; 82 | Ok(out.into()) 83 | } 84 | Err(err) => Err(format!("{err:?}").into()), 85 | } 86 | } 87 | -------------------------------------------------------------------------------- /src/agent_info/signed.ts: -------------------------------------------------------------------------------- 1 | import * as Kitsune from '../kitsune/kitsune' 2 | import { AgentInfo, AgentInfoSafe } from './info' 3 | import * as Crypto from '../crypto/crypto' 4 | import * as MP from '../msgpack/msgpack' 5 | import * as D from 'io-ts/Decoder' 6 | import { pipe } from 'fp-ts/lib/pipeable' 7 | import { Uint8ArrayDecoder } from '../io/io' 8 | import * as E from 'fp-ts/lib/Either' 9 | import * as _ from 'lodash' 10 | 11 | export const AgentInfoSignedRaw = D.type({ 12 | agent: Kitsune.Agent, 13 | signature: Kitsune.Signature, 14 | agent_info: MP.messagePackData, 15 | }) 16 | export type AgentInfoSignedRaw = D.TypeOf 17 | 18 | export const AgentInfoSignedRawSafe: D.Decoder< 19 | MP.MessagePackData, 20 | AgentInfoSignedRaw 21 | > = { 22 | decode: (a: MP.MessagePackData) => { 23 | return pipe( 24 | Uint8ArrayDecoder.decode(a), 25 | E.chain((value) => MP.messagePackDecoder.decode(value)), 26 | E.chain((value) => AgentInfoSignedRaw.decode(value)), 27 | E.chain((agentInfoSignedRawValue) => { 28 | // The signature must be valid for the agent's pubkey. 29 | if ( 30 | Crypto.verify( 31 | agentInfoSignedRawValue.agent_info, 32 | agentInfoSignedRawValue.signature, 33 | Kitsune.toPublicKey(agentInfoSignedRawValue.agent), 34 | ) 35 | ) { 36 | return D.success(agentInfoSignedRawValue) 37 | } else { 38 | return D.failure( 39 | a, 40 | 'Signature does not verify for agent and agent_info data.', 41 | ) 42 | } 43 | }), 44 | ) 45 | }, 46 | } 47 | 48 | export const AgentInfoSigned = D.type({ 49 | signature: Kitsune.Signature, 50 | agent: Kitsune.Agent, 51 | agent_info: AgentInfo, 52 | }) 53 | export type AgentInfoSigned = D.TypeOf 54 | 55 | export const AgentInfoSignedSafe: D.Decoder< 56 | MP.MessagePackData, 57 | AgentInfoSigned 58 | > = { 59 | decode: (a: MP.MessagePackData) => { 60 | return pipe( 61 | AgentInfoSignedRawSafe.decode(a), 62 | E.fold( 63 | (errors) => D.failure(a, JSON.stringify(errors)), 64 | (agentInfoSignedRawSafeValue) => 65 | pipe( 66 | AgentInfoSafe.decode(agentInfoSignedRawSafeValue.agent_info), 67 | E.fold( 68 | (errors) => D.failure(a, JSON.stringify(errors)), 69 | (agentInfoValue) => { 70 | // The inner and outer agent bytes need to be the same. 71 | if ( 72 | !_.isEqual( 73 | agentInfoSignedRawSafeValue.agent, 74 | agentInfoValue.agent, 75 | ) 76 | ) { 77 | return D.failure( 78 | a, 79 | `Outer signed agent ${agentInfoSignedRawSafeValue.agent} does not match signed inner agent ${agentInfoValue.agent}.`, 80 | ) 81 | } 82 | 83 | return D.success({ 84 | signature: agentInfoSignedRawSafeValue.signature, 85 | agent: agentInfoSignedRawSafeValue.agent, 86 | agent_info: agentInfoValue, 87 | }) 88 | }, 89 | ), 90 | ), 91 | ), 92 | ) 93 | }, 94 | } 95 | -------------------------------------------------------------------------------- /test/fixture/agents.ts: -------------------------------------------------------------------------------- 1 | import * as Crypto from '../../src/crypto/crypto' 2 | import { vaporChatSpace, wikiSpace } from './spaces' 3 | import { AgentInfo } from '../../src/agent_info/info' 4 | import { AgentInfoSignedRaw } from '../../src/agent_info/signed' 5 | import * as Kitsune from '../../src/kitsune/kitsune' 6 | import { encode } from '../../src/msgpack/msgpack' 7 | import { keypair } from './crypto' 8 | 9 | export const alice = { 10 | publicKey: Uint8Array.from([ 11 | 95, 62, 138, 155, 147, 98, 254, 130, 27, 90, 189, 22, 214, 159, 53, 71, 110, 12 | 8, 222, 90, 16, 252, 179, 208, 115, 252, 10, 63, 244, 211, 125, 115, 13 | ]), 14 | secretKey: Uint8Array.from([ 15 | 185, 17, 98, 189, 195, 23, 240, 235, 171, 51, 178, 214, 33, 25, 217, 20, 16 | 250, 197, 248, 164, 162, 36, 218, 17, 6, 152, 241, 29, 72, 36, 246, 155, 95, 17 | 62, 138, 155, 147, 98, 254, 130, 27, 90, 189, 22, 214, 159, 53, 71, 110, 8, 18 | 222, 90, 16, 252, 179, 208, 115, 252, 10, 63, 244, 211, 125, 115, 19 | ]), 20 | } 21 | 22 | export const bob = { 23 | publicKey: Uint8Array.from([ 24 | 208, 28, 22, 215, 187, 154, 60, 168, 229, 6, 79, 163, 128, 143, 17, 156, 25 | 124, 230, 192, 4, 137, 124, 84, 121, 212, 49, 14, 156, 25, 120, 4, 129, 26 | ]), 27 | secretKey: Uint8Array.from([ 28 | 145, 37, 159, 254, 196, 64, 171, 49, 149, 7, 17, 253, 171, 58, 253, 214, 8, 29 | 76, 23, 4, 162, 194, 57, 130, 150, 208, 107, 148, 95, 253, 168, 61, 208, 28, 30 | 22, 215, 187, 154, 60, 168, 229, 6, 79, 163, 128, 143, 17, 156, 124, 230, 31 | 192, 4, 137, 124, 84, 121, 212, 49, 14, 156, 25, 120, 4, 129, 32 | ]), 33 | } 34 | 35 | export const publicKeyToKitsuneAgent = (publicKey: Uint8Array): Kitsune.Agent => 36 | Uint8Array.from([...publicKey, ...new Uint8Array(Array(4))]) 37 | // publicKey 38 | 39 | export const aliceAgentVapor: AgentInfo = { 40 | space: vaporChatSpace, 41 | agent: publicKeyToKitsuneAgent(alice.publicKey), 42 | urls: ['https://example.com', 'https://foo.com'], 43 | signed_at_ms: Date.now(), 44 | expires_after_ms: 100000, 45 | meta_info: new Uint8Array(0), 46 | } 47 | export const aliceAgentVaporSignedRaw: AgentInfoSignedRaw = { 48 | signature: Crypto.sign(encode(aliceAgentVapor), alice.secretKey), 49 | agent: publicKeyToKitsuneAgent(alice.publicKey), 50 | agent_info: encode(aliceAgentVapor), 51 | } 52 | 53 | export const aliceAgentWiki: AgentInfo = { 54 | space: wikiSpace, 55 | agent: publicKeyToKitsuneAgent(alice.publicKey), 56 | urls: ['https://alice.com'], 57 | signed_at_ms: Date.now(), 58 | expires_after_ms: 150000, 59 | meta_info: new Uint8Array(0), 60 | } 61 | export const aliceAgentWikiSignedRaw: AgentInfoSignedRaw = { 62 | signature: Crypto.sign(encode(aliceAgentWiki), alice.secretKey), 63 | agent: publicKeyToKitsuneAgent(alice.publicKey), 64 | agent_info: encode(aliceAgentWiki), 65 | } 66 | 67 | export const bobAgentVapor: AgentInfo = { 68 | space: vaporChatSpace, 69 | agent: publicKeyToKitsuneAgent(bob.publicKey), 70 | urls: ['https://bob.com'], 71 | signed_at_ms: Date.now(), 72 | expires_after_ms: 100000, 73 | meta_info: new Uint8Array(0), 74 | } 75 | export const bobAgentVaporSignedRaw: AgentInfoSignedRaw = { 76 | signature: Crypto.sign(encode(bobAgentVapor), bob.secretKey), 77 | agent: publicKeyToKitsuneAgent(bob.publicKey), 78 | agent_info: encode(bobAgentVapor), 79 | } 80 | 81 | // this is bad, bob must not be allowed to sign alice 82 | export const bobSignedAliceRaw: AgentInfoSignedRaw = { 83 | signature: Crypto.sign(encode(aliceAgentVapor), bob.secretKey), 84 | agent: publicKeyToKitsuneAgent(bob.publicKey), 85 | agent_info: encode(aliceAgentVapor), 86 | } 87 | -------------------------------------------------------------------------------- /scripts/run-build-worker.cjs: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env node 2 | 3 | // vim: set syntax=javascript: 4 | 5 | // this logic is base loosely on cloudflare's worker-build tool 6 | // https://github.com/cloudflare/workers-rs/blob/main/worker-build/src/main.rs 7 | 8 | const path = require('path') 9 | const fs = require('fs') 10 | const childProcess = require('child_process') 11 | 12 | async function main () { 13 | await checkWasmPackInstalled() 14 | await wasmPackBuild() 15 | await cleanBuildDir() 16 | await writeExportWasm() 17 | await replaceGeneratedImportWithCustomImpl() 18 | } 19 | 20 | async function execCmd (cmd, cwd) { 21 | cwd = cwd || process.cwd() 22 | console.log('[rbw:exec]:', cmd) 23 | childProcess.execSync(cmd, { 24 | cwd, 25 | shell: false, 26 | stdio: ['ignore', 'inherit', 'inherit'], 27 | }) 28 | } 29 | 30 | async function checkWasmPackInstalled () { 31 | console.log('[rbw:checkWasmPackInstalled]') 32 | try { 33 | await execCmd('wasm-pack --version') 34 | } catch (_err) { 35 | await execCmd('cargo install wasm-pack') 36 | await execCmd('wasm-pack --version') 37 | } 38 | } 39 | 40 | async function wasmPackBuild () { 41 | const crate = path.resolve('.', 'rust', 'holochain_bootstrap_wasm') 42 | const buildDir = path.resolve('.', 'rust', 'target', 'wasm-build') 43 | console.log('[rbw:wasmPackBuild]') 44 | if (process.env.NODE_ENV === 'development') { 45 | await execCmd(`wasm-pack build --dev --no-typescript --out-dir ${buildDir} --out-name holochain_bootstrap_wasm`, crate) 46 | } else { 47 | await execCmd(`wasm-pack build --release --no-typescript --out-dir ${buildDir} --out-name holochain_bootstrap_wasm`, crate) 48 | } 49 | } 50 | 51 | async function cleanBuildDir () { 52 | console.log('[rbw:cleanBuildDir]') 53 | const buildDir = path.resolve('.', 'rust', 'target', 'wasm-build') 54 | fs.unlinkSync(path.resolve(buildDir, '.gitignore')) 55 | fs.unlinkSync(path.resolve(buildDir, 'package.json')) 56 | //fs.unlinkSync(path.resolve(buildDir, 'README.md')) 57 | fs.unlinkSync(path.resolve(buildDir, 'holochain_bootstrap_wasm.js')) 58 | } 59 | 60 | async function writeExportWasm () { 61 | console.log('[rbw:writeExportWasm]') 62 | const fn = path.resolve('.', 'rust', 'target', 'wasm-build', 'holochain_bootstrap_wasm_export.js') 63 | fs.writeFileSync(fn, ` 64 | import * as rust_to_wasm from "./holochain_bootstrap_wasm_bg.js"; 65 | import _wasm from "./holochain_bootstrap_wasm_bg.wasm"; 66 | 67 | const _wasm_memory = new WebAssembly.Memory({initial: 512}) 68 | const _imports_obj = { 69 | env: { memory: _wasm_memory }, 70 | './holochain_bootstrap_wasm_bg.js': rust_to_wasm, 71 | } 72 | export default new WebAssembly.Instance(_wasm, _imports_obj).exports 73 | `) 74 | } 75 | 76 | async function replaceGeneratedImportWithCustomImpl () { 77 | console.log('[rbw:replaceGeneratedImportWithCustomImpl]') 78 | const fn = path.resolve('.', 'rust', 'target', 'wasm-build', 'holochain_bootstrap_wasm_bg.js') 79 | const data = fs.readFileSync(fn).toString() 80 | const customData = data.replace( 81 | `let wasm; 82 | export function __wbg_set_wasm(val) { 83 | wasm = val; 84 | }`, 85 | "import wasm from './holochain_bootstrap_wasm_export.js';", 86 | ) 87 | + "\nimport * as myself from './holochain_bootstrap_wasm_bg.js';" 88 | + "\nexport default myself;" 89 | + "\n" 90 | fs.unlinkSync(fn) 91 | fs.writeFileSync(fn, customData) 92 | } 93 | 94 | main().then(() => { 95 | console.log('[rbw:done]') 96 | }, (err) => { 97 | console.error(err) 98 | process.exit(1) 99 | }) 100 | -------------------------------------------------------------------------------- /test/agent_info/info.ts: -------------------------------------------------------------------------------- 1 | import * as AgentInfo from '../../src/agent_info/info' 2 | import { aliceAgentVapor } from '../fixture/agents' 3 | import { strict as assert } from 'assert' 4 | import { isRight, isLeft, right } from 'fp-ts/lib/Either' 5 | import * as MP from '../../src/msgpack/msgpack' 6 | import * as _ from 'lodash' 7 | import * as Agents from '../fixture/agents' 8 | 9 | describe('agent info ts-io', () => { 10 | it('should decode url', () => { 11 | assert.ok(isRight(AgentInfo.Url.decode('foo'))) 12 | assert.ok(isRight(AgentInfo.Url.decode(''))) 13 | 14 | assert.ok(isLeft(AgentInfo.Url.decode(null))) 15 | assert.ok(isLeft(AgentInfo.Url.decode(1))) 16 | // There is a max size limit on a url. 17 | assert.ok(isLeft(AgentInfo.Url.decode('a'.repeat(5000)))) 18 | // é is a multibyte character so the string length is more restricted than the 19 | // utf8 byte length. 20 | assert.ok(isLeft(AgentInfo.Url.decode('é'.repeat(2000)))) 21 | }) 22 | 23 | it('should decode urls', () => { 24 | assert.ok(isRight(AgentInfo.Urls.decode([]))) 25 | assert.ok(isRight(AgentInfo.Urls.decode(['foo']))) 26 | assert.ok(isRight(AgentInfo.Urls.decode(['']))) 27 | assert.ok(isRight(AgentInfo.Urls.decode(['', 'foo']))) 28 | assert.ok( 29 | isRight(AgentInfo.Urls.decode(Array(AgentInfo.MAX_URLS).fill('a'))), 30 | ) 31 | 32 | assert.ok(isLeft(AgentInfo.Urls.decode(''))) 33 | assert.ok(isLeft(AgentInfo.Urls.decode('foo'))) 34 | assert.ok( 35 | isLeft(AgentInfo.Urls.decode(Array(AgentInfo.MAX_URLS + 1).fill('a'))), 36 | ) 37 | }) 38 | 39 | it('should decode times', () => { 40 | let past = Date.now() - 10 41 | assert.deepEqual(AgentInfo.SignedAtMsSafe.decode(past), right(past)) 42 | 43 | let now = Date.now() 44 | assert.deepEqual(AgentInfo.SignedAtMsSafe.decode(now), right(now)) 45 | 46 | // Fractional times cannot be accepted. 47 | let fractionalMs = 1.1 48 | assert.ok(isLeft(AgentInfo.SignedAtMsSafe.decode(fractionalMs))) 49 | assert.ok(isLeft(AgentInfo.ExpiresAfterMsSafe.decode(fractionalMs))) 50 | 51 | // Negative times cannot be accepted. 52 | let negative = -10 53 | assert.ok(isLeft(AgentInfo.SignedAtMsSafe.decode(negative))) 54 | assert.ok(isLeft(AgentInfo.ExpiresAfterMsSafe.decode(negative))) 55 | 56 | // Expiry times cannot be too short. 57 | let shortExpiry = AgentInfo.MIN_EXPIRES - 1 58 | assert.ok(isLeft(AgentInfo.ExpiresAfterMsSafe.decode(shortExpiry))) 59 | 60 | // Expiry times cannot be too long. 61 | let longExpiry = AgentInfo.MAX_EXPIRES + 1 62 | assert.ok(isLeft(AgentInfo.ExpiresAfterMsSafe.decode(longExpiry))) 63 | 64 | // Expiry times must be just right. 65 | assert.ok( 66 | isRight(AgentInfo.ExpiresAfterMsSafe.decode(AgentInfo.MIN_EXPIRES)), 67 | ) 68 | assert.ok( 69 | isRight(AgentInfo.ExpiresAfterMsSafe.decode(AgentInfo.MAX_EXPIRES)), 70 | ) 71 | }) 72 | 73 | it('should decode packed data', () => { 74 | // We must decode valid agent info data. 75 | assert.ok( 76 | isRight(AgentInfo.AgentInfoSafe.decode(MP.encode(aliceAgentVapor))), 77 | ) 78 | 79 | // We must not decode anything with incorrect messagepack data. 80 | let aliceEncodedCorrupted = _.cloneDeep(Agents.aliceAgentVaporSignedRaw) 81 | aliceEncodedCorrupted.agent_info[0] = 0 82 | assert.ok( 83 | isLeft(AgentInfo.AgentInfoSafe.decode(MP.encode(aliceEncodedCorrupted))), 84 | ) 85 | 86 | // We must not decode anything with unexpected properties. 87 | let aliceMaliciousProperty = _.cloneDeep(Agents.aliceAgentVaporSignedRaw) 88 | aliceMaliciousProperty.bad = true 89 | assert.ok( 90 | isLeft(AgentInfo.AgentInfoSafe.decode(MP.encode(aliceMaliciousProperty))), 91 | ) 92 | }) 93 | }) 94 | -------------------------------------------------------------------------------- /rust/holochain_bootstrap_core/src/exec_scheduled.rs: -------------------------------------------------------------------------------- 1 | use crate::metrics::*; 2 | use crate::types::*; 3 | use crate::PROXY_PREFIX; 4 | 5 | use alloc::boxed::Box; 6 | 7 | const MAX_ENTRIES: usize = 160; // ~ 1 weeks worth at 1 per hour 8 | const HOUR: i64 = 60 * 60; 9 | 10 | /// Execute periodic scheduled logic 11 | pub async fn exec_scheduled( 12 | kv: &dyn AsKV, 13 | host: &dyn AsFromHost, 14 | force: bool, 15 | ) -> BCoreResult { 16 | match (move || async move { 17 | let ma_last_run = if let Ok(lr) = kv.get(MA_LAST_RUN).await { 18 | lr 19 | } else { 20 | alloc::vec::Vec::new().into_boxed_slice() 21 | }; 22 | 23 | let cur_bucket = format!("{}", (host.get_timestamp_millis()? / 1000 / HOUR) * HOUR,); 24 | 25 | if !force && &*ma_last_run == cur_bucket.as_bytes() { 26 | // we've already run an aggregation for this bucket, exit early 27 | return BCoreResult::Ok("exec_scheduled already run this bucket\n".into()); 28 | } 29 | 30 | kv.put(MA_LAST_RUN, cur_bucket.as_bytes(), ONE_WEEK_S) 31 | .await?; 32 | 33 | let mut agg: alloc::vec::Vec = 34 | if let Ok(agg) = kv.get(METRICS_AGG).await { 35 | alloc::string::String::from_utf8_lossy(&agg) 36 | .split(",\n") 37 | .map(|s| s.into()) 38 | .take(MAX_ENTRIES) 39 | .collect() 40 | } else { 41 | alloc::vec::Vec::new() 42 | }; 43 | 44 | let mut space_set = alloc::collections::BTreeSet::new(); 45 | let mut total_agent_count = 0; 46 | let mut total_proxy_count = 0; 47 | 48 | // progressive list because this list could be huge 49 | // and we don't have that much memory in wasm 50 | kv.list_progressive( 51 | None, 52 | Box::new(|keys| { 53 | for key in keys.drain(..) { 54 | let bkey = key.replace("%2F", "/").replace("%2f", "/"); 55 | let bkey = bkey.as_bytes(); 56 | if bkey == MA_LAST_RUN.as_bytes() 57 | || bkey == METRICS_AGG.as_bytes() 58 | || &bkey[..METRIC_PREFIX.as_bytes().len()] == METRIC_PREFIX.as_bytes() 59 | { 60 | // ignore 61 | } else if &bkey[..PROXY_PREFIX.as_bytes().len()] == PROXY_PREFIX.as_bytes() { 62 | total_proxy_count += 1; 63 | } else if bkey.len() >= 80 { 64 | let bkey = if bkey[0..4] == b"tx5:"[..] { 65 | &bkey[4..] 66 | } else { 67 | bkey 68 | }; 69 | // alas, there's some wiggle room in the way 70 | // space / agents are currently encoded as keys 71 | // let's just take the first 30 bytes (40 in base64) 72 | // because we can reasonably assume that is unique 73 | let space = &bkey[..40]; 74 | let space = base64::decode(space) 75 | .map_err(|e| BCoreError::from(format!("{e:?} full_key: {key}")))?; 76 | space_set.insert(space); 77 | total_agent_count += 1; 78 | } 79 | } 80 | 81 | Ok(()) 82 | }), 83 | ) 84 | .await?; 85 | 86 | let total_space_count = space_set.len() as u64; 87 | 88 | agg.push(format!( 89 | " [{cur_bucket}, {total_agent_count}, {total_space_count}, {total_proxy_count}]", 90 | )); 91 | 92 | kv.put(METRICS_AGG, agg.join(",\n").as_bytes(), ONE_WEEK_S) 93 | .await?; 94 | 95 | BCoreResult::Ok("exec_scheduled complete\n".into()) 96 | })() 97 | .await 98 | { 99 | Ok(r) => Ok(r), 100 | Err(err) => Ok(format!("{err:?}")), 101 | } 102 | } 103 | -------------------------------------------------------------------------------- /rust/holochain_bootstrap_core/src/types.rs: -------------------------------------------------------------------------------- 1 | //! Bootstrap Core Types Module 2 | 3 | use alloc::boxed::Box; 4 | use alloc::string::{String, ToString}; 5 | use alloc::vec::Vec; 6 | use core::future::Future; 7 | 8 | /// The Bootstrap Core Error Type should be as transparent as possible 9 | /// so it is light-weight in WASM (and translates well into largely 10 | /// string-based javascript error types) but is still functional for 11 | /// the rust-based bootstrap service. 12 | pub enum BCoreError { 13 | /// Invalid Cryptographic Public Key 14 | EBadPubKey, 15 | 16 | /// Invalid Cryptographic Signature 17 | EBadSig, 18 | 19 | /// Unhandled Op Type 20 | EBadOp { 21 | /// The passed-in method that was unhandled 22 | method: String, 23 | 24 | /// The passed-in op that was unhandled 25 | op: String, 26 | }, 27 | 28 | /// Decode Error 29 | EDecode(String), 30 | 31 | /// Generic string-based error 32 | EOther(String), 33 | } 34 | 35 | impl From for BCoreError { 36 | fn from(s: String) -> Self { 37 | BCoreError::EOther(s) 38 | } 39 | } 40 | 41 | impl From<&String> for BCoreError { 42 | fn from(s: &String) -> Self { 43 | s.to_string().into() 44 | } 45 | } 46 | 47 | impl From<&str> for BCoreError { 48 | fn from(s: &str) -> Self { 49 | s.to_string().into() 50 | } 51 | } 52 | 53 | impl core::fmt::Debug for BCoreError { 54 | fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { 55 | use BCoreError::*; 56 | match self { 57 | EBadPubKey => f.write_str("EBadPubKey"), 58 | EBadSig => f.write_str("EBadSig"), 59 | EBadOp { method, op } => { 60 | f.write_str("EBadOp: ")?; 61 | f.write_str(method)?; 62 | f.write_str("/")?; 63 | f.write_str(op) 64 | } 65 | EDecode(err) => { 66 | f.write_str("EDecode: ")?; 67 | f.write_str(err) 68 | } 69 | EOther(oth) => { 70 | f.write_str("EOther: ")?; 71 | f.write_str(oth) 72 | } 73 | } 74 | } 75 | } 76 | 77 | impl core::fmt::Display for BCoreError { 78 | fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { 79 | core::fmt::Debug::fmt(self, f) 80 | } 81 | } 82 | 83 | /// build a BCoreError::EOther from `format!()`-style parameters 84 | #[macro_export] 85 | macro_rules! bcore_err { 86 | ($($arg: tt)*) => {{ 87 | $crate::types::BCoreError::EOther(::alloc::format!($($arg)*)) 88 | }}; 89 | } 90 | 91 | /// Bootstrap Core Result Type 92 | pub type BCoreResult = core::result::Result; 93 | 94 | /// Future type for trait declarations 95 | pub type BCoreFut<'a, T> = core::pin::Pin + 'a>>; 96 | 97 | /// Helper fn to generate a BCoreFut type 98 | pub fn bcore_fut<'a, R, F: Future + 'a>(f: F) -> BCoreFut<'a, R> { 99 | Box::pin(f) 100 | } 101 | 102 | /// HTTP Response Object 103 | #[derive(Debug)] 104 | pub struct HttpResponse { 105 | /// the status code 106 | pub status: u16, 107 | 108 | /// the list of headers to send 109 | pub headers: Vec<(String, String)>, 110 | 111 | /// the body content 112 | pub body: Vec, 113 | } 114 | 115 | /// Trait representing some functionality supplied by the host environment 116 | /// i.e. wasm 117 | pub trait AsFromHost: 'static { 118 | /// get milliseconds timestamp 119 | fn get_timestamp_millis(&self) -> BCoreResult; 120 | } 121 | 122 | /// Trait representing a KV implementation 123 | pub trait AsKV: 'static { 124 | /// put data into the KV 125 | fn put<'a>(&'a self, key: &str, value: &[u8], ttl_secs: f64) -> BCoreFut<'a, BCoreResult<()>>; 126 | 127 | /// get data from the KV 128 | fn get<'a>(&'a self, key: &str) -> BCoreFut<'a, BCoreResult>>; 129 | 130 | /// delete a key from the KV 131 | fn delete<'a>(&'a self, key: &str) -> BCoreFut<'a, BCoreResult<()>>; 132 | 133 | /// list keys from the KV progressively 134 | #[allow(clippy::type_complexity)] 135 | fn list_progressive<'a, 'b: 'a>( 136 | &'a self, 137 | prefix: Option<&str>, 138 | cb: Box) -> BCoreResult<()> + 'b>, 139 | ) -> BCoreFut<'a, BCoreResult<()>>; 140 | 141 | // -- provided -- // 142 | 143 | /// list keys from the KV 144 | fn list<'a, 'b: 'a>( 145 | &'a self, 146 | prefix: Option<&'b str>, 147 | ) -> BCoreFut<'a, BCoreResult>> { 148 | Box::pin(async move { 149 | let mut out = Vec::new(); 150 | self.list_progressive( 151 | prefix, 152 | Box::new(|keys| { 153 | out.append(keys); 154 | Ok(()) 155 | }), 156 | ) 157 | .await?; 158 | Ok(out) 159 | }) 160 | } 161 | } 162 | 163 | /// Individual Handler Logic 164 | pub trait AsRequestHandler: 'static { 165 | /// static method returns the method this handler handles, i.e. "POST". 166 | fn handles_method(&self) -> &'static str; 167 | 168 | /// static method returns the "X-Op" this handler handles, i.e. "now". 169 | fn handles_op(&self) -> &'static str; 170 | 171 | /// the actual handler logic 172 | fn handle<'a>( 173 | &'a self, 174 | kv: &'a dyn AsKV, 175 | host: &'a dyn AsFromHost, 176 | input: &'a [u8], 177 | ) -> BCoreFut<'a, BCoreResult>; 178 | } 179 | -------------------------------------------------------------------------------- /rust/holochain_bootstrap_wasm/src/kv.rs: -------------------------------------------------------------------------------- 1 | use super::*; 2 | 3 | use alloc::boxed::Box; 4 | use alloc::string::String; 5 | use alloc::vec::Vec; 6 | 7 | /// Js-2-Rust KV ffi bindings 8 | pub struct KV(JsValue); 9 | 10 | impl KV { 11 | pub fn new(kv: JsValue) -> JsResult { 12 | Ok(KV(kv)) 13 | } 14 | 15 | // -- private -- // 16 | 17 | /// Internal helper for getting a specific function from the KV object 18 | fn get_func_prop(&self, name: &str) -> BCoreResult { 19 | let func: JsValue = 20 | js_sys::Reflect::get(&self.0, &name.into()).map_err(|e| bcore_err!("{:?}", e))?; 21 | if !func.is_function() { 22 | return Err(format!("{name} is not a function").into()); 23 | } 24 | Ok(func.into()) 25 | } 26 | } 27 | 28 | impl AsKV for KV { 29 | fn put<'a>(&'a self, key: &str, value: &[u8], ttl_secs: f64) -> BCoreFut<'a, BCoreResult<()>> { 30 | let key: JsValue = key.into(); 31 | let value: JsValue = js_sys::Uint8Array::from(value).into(); 32 | 33 | bcore_fut(async move { 34 | let func = self.get_func_prop("put")?; 35 | 36 | let expiration_ttl: JsValue = Some(ttl_secs).into(); 37 | 38 | let opts = js_sys::Object::new(); 39 | js_sys::Reflect::set(&opts, &"expirationTtl".into(), &expiration_ttl) 40 | .map_err(|e| bcore_err!("{:?}", e))?; 41 | 42 | let res = func 43 | .call3(&self.0, &key, &value, &opts) 44 | .map_err(|e| bcore_err!("{:?}", e))?; 45 | 46 | let res: js_sys::Promise = res.into(); 47 | wasm_bindgen_futures::JsFuture::from(res) 48 | .await 49 | .map_err(|e| bcore_err!("{:?}", e))?; 50 | 51 | Ok(()) 52 | }) 53 | } 54 | 55 | fn get<'a>(&'a self, key: &str) -> BCoreFut<'a, BCoreResult>> { 56 | let key: JsValue = key.into(); 57 | 58 | bcore_fut(async move { 59 | let func = self.get_func_prop("get")?; 60 | 61 | let type_: JsValue = "arrayBuffer".into(); 62 | 63 | let res = func 64 | .call2(&self.0, &key, &type_) 65 | .map_err(|e| bcore_err!("{:?}", e))?; 66 | 67 | let res: js_sys::Promise = res.into(); 68 | let res = wasm_bindgen_futures::JsFuture::from(res) 69 | .await 70 | .map_err(|e| bcore_err!("{:?}", e))?; 71 | 72 | if !res.is_instance_of::() { 73 | return Err("result must be an ArrayBuffer".into()); 74 | } 75 | 76 | let res = js_sys::Uint8Array::new(&res); 77 | 78 | Ok(res.to_vec().into_boxed_slice()) 79 | }) 80 | } 81 | 82 | fn delete<'a>(&'a self, _key: &str) -> BCoreFut<'a, BCoreResult<()>> { 83 | bcore_fut(async move { Err("unimplemented".into()) }) 84 | } 85 | 86 | fn list_progressive<'a, 'b: 'a>( 87 | &'a self, 88 | prefix: Option<&str>, 89 | mut cb: Box) -> BCoreResult<()> + 'b>, 90 | ) -> BCoreFut<'a, BCoreResult<()>> { 91 | let prefix: Option = prefix.map(|p| p.into()); 92 | bcore_fut(async move { 93 | let func = self.get_func_prop("list")?; 94 | let mut cursor: Option = None; 95 | let mut out = Vec::new(); 96 | 97 | loop { 98 | let opts = js_sys::Object::new(); 99 | if let Some(prefix) = &prefix { 100 | js_sys::Reflect::set(&opts, &"prefix".into(), prefix) 101 | .map_err(|e| bcore_err!("{:?}", e))?; 102 | } 103 | if let Some(cursor) = &cursor { 104 | js_sys::Reflect::set(&opts, &"cursor".into(), cursor) 105 | .map_err(|e| bcore_err!("{:?}", e))?; 106 | } 107 | 108 | let res = func 109 | .call1(&self.0, &opts) 110 | .map_err(|e| bcore_err!("{:?}", e))?; 111 | let res: js_sys::Promise = res.into(); 112 | let res = wasm_bindgen_futures::JsFuture::from(res) 113 | .await 114 | .map_err(|e| bcore_err!("{:?}", e))?; 115 | 116 | let js_keys = js_sys::Reflect::get(&res, &"keys".into()) 117 | .map_err(|e| bcore_err!("{:?}", e))?; 118 | if !js_keys.is_instance_of::() { 119 | return Err("keys must be an array".into()); 120 | } 121 | let js_keys: js_sys::Array = js_keys.into(); 122 | for key in js_keys.values() { 123 | let key = key.map_err(|e| bcore_err!("{:?}", e))?; 124 | let name = js_sys::Reflect::get(&key, &"name".into()) 125 | .map_err(|e| bcore_err!("{:?}", e))? 126 | .as_string() 127 | .ok_or_else(|| BCoreError::from("key.name must be a string"))?; 128 | out.push(name); 129 | } 130 | 131 | cb(&mut out)?; 132 | out.clear(); 133 | 134 | let list_complete = js_sys::Reflect::get(&res, &"list_complete".into()) 135 | .map_err(|e| bcore_err!("{:?}", e))? 136 | .as_bool() 137 | .ok_or_else(|| BCoreError::from("list_complete must be a boolean"))?; 138 | 139 | if list_complete { 140 | break; 141 | } 142 | 143 | let c = js_sys::Reflect::get(&res, &"cursor".into()) 144 | .map_err(|e| bcore_err!("{:?}", e))?; 145 | cursor = Some(c) 146 | } 147 | 148 | Ok(()) 149 | }) 150 | } 151 | } 152 | -------------------------------------------------------------------------------- /src/agent_info/info.ts: -------------------------------------------------------------------------------- 1 | import * as D from 'io-ts/Decoder' 2 | import * as Kitsune from '../kitsune/kitsune' 3 | import * as MP from '../msgpack/msgpack' 4 | import { Uint8ArrayDecoder } from '../io/io' 5 | import { pipe } from 'fp-ts/lib/pipeable' 6 | import * as E from 'fp-ts/lib/Either' 7 | import * as _ from 'lodash' 8 | 9 | // Size limit on URLs _as bytes_. 10 | // It is important that this counts the bytes and not string characters because 11 | // string handling is complex and subtle across different languages whereas the 12 | // utf8 byte length is always well defined. 13 | export const MAX_URL_SIZE = 2048 14 | 15 | // The maximum numbers of urls that a single agent can register. 16 | export const MAX_URLS = 256 17 | 18 | // The maximum number of milliseconds that agent info will be valid relative to 19 | // its signature time. 20 | // Equal to 1 hour. 21 | export const MAX_EXPIRES = 60 * 60 * 1000 22 | 23 | // The minimum number of milliseconds that agent info must be valid for relative 24 | // to its signature time. 25 | // Equal to 1 minute. 26 | export const MIN_EXPIRES = 60 * 1000 27 | 28 | // A single url an agent can be found at. 29 | // Each url has a maximum size in bytes and is a valid utf8 string. 30 | export const Url = pipe( 31 | D.string, 32 | D.refine( 33 | (input): input is string => 34 | new TextEncoder().encode(input).length <= MAX_URL_SIZE, 35 | `URL cannot be longer than ${MAX_URL_SIZE} bytes.`, 36 | ), 37 | ) 38 | export type Url = D.TypeOf 39 | 40 | // A list of urls an agent can be found at. 41 | // There is a limit to the numbers of urls a single agent can register. 42 | export const Urls = pipe( 43 | D.array(D.string), 44 | D.refine( 45 | (input): input is Array => input.length <= MAX_URLS, 46 | `Agents cannot have more than ${MAX_URLS} urls.`, 47 | ), 48 | ) 49 | export type Urls = D.TypeOf 50 | 51 | // Messagepack serialized representation of AgentInfo. 52 | export const AgentInfoPacked = Uint8ArrayDecoder 53 | export type AgentInfoPacked = D.TypeOf 54 | 55 | // Time the agent signed the data, in the agent's own opinion. 56 | // Unix milliseconds. 57 | export const SignedAtMs = D.number 58 | export type SignedAtMs = D.TypeOf 59 | 60 | // Decoded SignedAtMs with various sanity checks applied. 61 | export const SignedAtMsSafe: D.Decoder = { 62 | decode: (a: unknown): E.Either => { 63 | return pipe( 64 | D.number.decode(a), 65 | E.chain((signedAtMs) => { 66 | // Milliseconds must be an integer. 67 | if (!Number.isInteger(signedAtMs)) { 68 | return D.failure(a, 'signed at ms is not an integer ' + signedAtMs) 69 | } 70 | 71 | // Time must be positive. 72 | if (signedAtMs <= 0) { 73 | return D.failure(a, 'signed at ms is negative ' + signedAtMs) 74 | } 75 | 76 | return D.success(signedAtMs) 77 | }), 78 | ) 79 | }, 80 | } 81 | 82 | // Time the agent wishes to be found at the published location. 83 | // NOT a guarantee that the agent will be found at this location as disconnects 84 | // and other network issues are unavoidable. If many agents cannot be found 85 | // within their expiry times this may indicate some kind of attack or other 86 | // issue with the bootstrap service. 87 | // Time in milliseconds relative to the signing time. 88 | export const ExpiresAfterMs = D.number 89 | export type ExpiresAfterMs = D.TypeOf 90 | 91 | // Decoded ExpiresAfterMs with various sanity checks applied. 92 | export const ExpiresAfterMsSafe: D.Decoder = { 93 | decode: (a: unknown): E.Either => { 94 | return pipe( 95 | D.number.decode(a), 96 | E.chain((expiresAfterMs) => { 97 | // Milliseconds must be an integer. 98 | if (!Number.isInteger(expiresAfterMs)) { 99 | return D.failure( 100 | a, 101 | 'expires after time is not an integer ' + expiresAfterMs, 102 | ) 103 | } 104 | 105 | // Expiry times cannot be too short. 106 | if (expiresAfterMs < MIN_EXPIRES) { 107 | return D.failure( 108 | a, 109 | 'expires after time ' + 110 | expiresAfterMs + 111 | ' is less than min expiry time ' + 112 | MIN_EXPIRES, 113 | ) 114 | } 115 | 116 | // Expiry times cannot be too long. 117 | if (expiresAfterMs > MAX_EXPIRES) { 118 | return D.failure( 119 | a, 120 | 'expires after time ' + 121 | expiresAfterMs + 122 | ' is longer than max expiry time ' + 123 | MAX_EXPIRES, 124 | ) 125 | } 126 | 127 | return D.success(expiresAfterMs) 128 | }), 129 | ) 130 | }, 131 | } 132 | 133 | export const AgentInfo = D.type({ 134 | // Each agent info is specific to one space. 135 | // Many active spaces implies many active agent infos, even if the network 136 | // connection used by the agent is identical for all spaces. 137 | space: Kitsune.Space, 138 | // The agent public key. 139 | agent: Kitsune.Agent, 140 | // List of urls the agent can be connected to at. 141 | urls: Urls, 142 | // Unix timestamp milliseconds the info was signed. 143 | signed_at_ms: SignedAtMsSafe, 144 | // Milliseconds after which this info expires relative to the signature time. 145 | expires_after_ms: ExpiresAfterMsSafe, 146 | // Information that is not used for bootstrapping. 147 | meta_info: MP.messagePackData, 148 | }) 149 | export type AgentInfo = D.TypeOf 150 | 151 | export const AgentInfoSafe: D.Decoder = { 152 | decode: (a: unknown) => { 153 | return pipe( 154 | Uint8ArrayDecoder.decode(a), 155 | E.chain((value) => MP.messagePackDecoder.decode(value)), 156 | E.fold( 157 | (errors) => D.failure(a, JSON.stringify(errors)), 158 | (rawValue) => 159 | pipe( 160 | AgentInfo.decode(rawValue), 161 | E.fold( 162 | (errors) => D.failure(a, JSON.stringify(errors)), 163 | (agentInfoValue) => { 164 | // Ensure that the decoded AgentInfo matches the generic object. 165 | // This flags the situation where additional properties were added to 166 | // the object that were dropped on the AgentInfo. We don't accept this 167 | // because honest nodes should always sign exactly valid data. 168 | if (_.isEqual(agentInfoValue, rawValue)) { 169 | return D.success(agentInfoValue) 170 | } else { 171 | return D.failure( 172 | a, 173 | JSON.stringify(agentInfoValue) + 174 | ' does not equal ' + 175 | JSON.stringify(rawValue), 176 | ) 177 | } 178 | }, 179 | ), 180 | ), 181 | ), 182 | ) 183 | }, 184 | } 185 | -------------------------------------------------------------------------------- /integration/integration.ts: -------------------------------------------------------------------------------- 1 | import fetch from 'node-fetch' 2 | import { assert } from 'chai' 3 | import * as Agents from '../test/fixture/agents' 4 | import { keypair } from '../test/fixture/crypto' 5 | import * as Crypto from '../src/crypto/crypto' 6 | import { vaporChatSpace, wikiSpace, emptySpace } from '../test/fixture/spaces' 7 | import * as MP from '../src/msgpack/msgpack' 8 | import * as Kitsune from '../src/kitsune/kitsune' 9 | import * as _ from 'lodash' 10 | 11 | describe('integration tests', () => { 12 | for (const query of ['', '?net=tx2', '?net=tx5']) { 13 | let url = 'http://127.0.0.1:8787' + query 14 | 15 | it('should GET correctly (' + query + ')', async function () { 16 | this.timeout(0) 17 | 18 | let ok = await fetch(url).then((res) => res.text()) 19 | 20 | assert.deepEqual('OK', ok) 21 | }) 22 | 23 | it('should handle POST errors (' + query + ')', async function () { 24 | // needs an extended timeout to post everything 25 | this.timeout(0) 26 | 27 | let errApi = async (op: string, body: unknown): Promise => { 28 | return await fetch(url, { 29 | method: 'post', 30 | body: MP.encode(body), 31 | headers: { 32 | 'Content-Type': 'application/octet', 33 | 'X-Op': op, 34 | }, 35 | }) 36 | // .then(_ => assert.ok(false)) 37 | .catch((err) => console.log('we WANT an error here', err)) 38 | } 39 | 40 | // any bad signature must not POST 41 | let badSignature = _.cloneDeep(Agents.aliceAgentVaporSignedRaw) 42 | badSignature.signature = new Uint8Array( 43 | Array(Kitsune.signatureLength), 44 | ).fill(1) 45 | 46 | let badSignatureErr = await errApi('put', badSignature) 47 | assert.deepEqual(badSignatureErr.status, 500) 48 | 49 | assert.ok( 50 | (await badSignatureErr.text()).includes( 51 | 'Signature does not verify for agent and agent_info data.', 52 | ), 53 | ) 54 | 55 | let badSpace = Uint8Array.from([1, 2, 3]) 56 | 57 | let badRandomQuery = { 58 | space: badSpace, 59 | limit: 5, 60 | } 61 | let badRandomQueryErr = await errApi('random', badRandomQuery) 62 | assert.deepEqual(badRandomQueryErr.status, 500) 63 | assert.ok( 64 | (await badRandomQueryErr.text()).includes('length must be exactly 36'), 65 | ) 66 | }) 67 | 68 | it('should POST/proxy_list correctly (' + query + ')', async function () { 69 | const raw = await fetch(url, { 70 | method: 'POST', 71 | body: new Uint8Array(0), 72 | headers: { 73 | 'Content-Type': 'application/octet-stream', 74 | 'X-Op': 'proxy_list', 75 | }, 76 | }) 77 | 78 | if (raw.status !== 200) { 79 | throw new Error(JSON.stringify(raw)) 80 | } 81 | 82 | const buffer = await raw.buffer() 83 | const res = MP.decode(buffer) 84 | 85 | assert.deepEqual([ 86 | 'https://test.holo.host/this/is/a/test?noodle=true', 87 | 'https://test2.holo.host/another/test/this/is?a=b#yada', 88 | ], res.sort()) 89 | }) 90 | 91 | it('should trigger_scheduled / metrics correctly (' + query + ')', async function () { 92 | this.timeout(0) 93 | 94 | // add some random agents to 3 different spaces 95 | for (let s = 0; s < 3; ++s) { 96 | let space = Uint8Array.from(Array(36).fill(100 - s)) 97 | for (let a = 0; a < 3; ++a) { 98 | const {publicKey, secretKey} = keypair() 99 | const info = { 100 | space, 101 | agent: Agents.publicKeyToKitsuneAgent(publicKey), 102 | urls: ['https://foo.com'], 103 | signed_at_ms: Date.now(), 104 | expires_after_ms: 100000, 105 | meta_info: new Uint8Array(0), 106 | } 107 | const infoEnc = MP.encode(info) 108 | const signed = MP.encode({ 109 | signature: Crypto.sign(infoEnc, secretKey), 110 | agent: info.agent, 111 | agent_info: infoEnc, 112 | }) 113 | await fetch(url, { 114 | method: 'POST', 115 | body: signed, 116 | headers: { 117 | 'X-Op': 'put', 118 | }, 119 | }) 120 | } 121 | } 122 | 123 | // trigger the scheduled aggregation 3 times 124 | for (let i = 0; i < 3; ++i) { 125 | await fetch(url, { 126 | method: 'POST', 127 | body: new Uint8Array(0), 128 | headers: { 129 | 'X-Op': 'trigger_scheduled', 130 | }, 131 | }) 132 | } 133 | 134 | // pull down the aggregated metrics 135 | const raw = await fetch(url, { 136 | method: 'GET', 137 | headers: { 138 | 'X-Op': 'metrics', 139 | }, 140 | }) 141 | 142 | if (raw.status !== 200) { 143 | throw new Error(JSON.stringify(raw)) 144 | } 145 | 146 | // decode as json 147 | const res = JSON.parse((new TextDecoder()).decode(await raw.buffer())) 148 | 149 | // print for debugging 150 | console.log(res) 151 | 152 | // make sure we only got 1 entry for the three triggers above 153 | assert.equal(1, res.data.length) 154 | 155 | // make sure we have at least the agents we added 156 | assert(res.data[0][1] >= 9) 157 | 158 | // make sure we have at least the spaces we added 159 | assert(res.data[0][2] >= 3) 160 | 161 | // make sure we recorded the two proxy urls in the proxy pool 162 | assert.equal(2, res.data[0][3]) 163 | }) 164 | 165 | it('should POST correctly (' + query + ')', async function () { 166 | // needs an extended timeout to post everything 167 | this.timeout(0) 168 | 169 | let doApi = async (op: string, body: unknown): Promise => { 170 | let buffer = await fetch(url, { 171 | method: 'post', 172 | body: MP.encode(body), 173 | headers: { 174 | 'Content-Type': 'application/octet', 175 | 'X-Op': op, 176 | }, 177 | }) 178 | .then((res) => { 179 | // For debugging errors. 180 | if (res.status !== 200) { 181 | console.log(res) 182 | } 183 | return res.buffer() 184 | }) 185 | .catch((err) => console.log(err)) 186 | 187 | return MP.decode(Uint8Array.from(buffer)) 188 | } 189 | 190 | // now 191 | let now = await doApi('now', null) 192 | if (typeof now === 'number') { 193 | assert.ok(Number.isInteger(now)) 194 | assert.ok(now > 1604318591241) 195 | } else { 196 | assert.ok(false, 'now not a number') 197 | } 198 | 199 | 200 | // put alice and bob 201 | for (let agent of [ 202 | Agents.aliceAgentVaporSignedRaw, 203 | Agents.aliceAgentWikiSignedRaw, 204 | Agents.bobAgentVaporSignedRaw, 205 | ]) { 206 | let res = await doApi('put', agent) 207 | assert.deepEqual(res, null) 208 | } 209 | 210 | // random list 211 | let randomOne = await doApi('random', { 212 | space: vaporChatSpace, 213 | limit: 1, 214 | }) 215 | // alice or bob is fine 216 | try { 217 | assert.deepEqual(randomOne, [MP.encode(Agents.bobAgentVaporSignedRaw)]) 218 | } catch (e) { 219 | assert.deepEqual(randomOne, [MP.encode(Agents.aliceAgentVaporSignedRaw)]) 220 | } 221 | 222 | let randomTwo = await doApi('random', { 223 | space: vaporChatSpace, 224 | limit: 2, 225 | }) 226 | // either order is fine but we need both 227 | try { 228 | assert.deepEqual(randomTwo, [ 229 | MP.encode(Agents.aliceAgentVaporSignedRaw), 230 | MP.encode(Agents.bobAgentVaporSignedRaw), 231 | ]) 232 | } catch (e) { 233 | assert.deepEqual(randomTwo, [ 234 | MP.encode(Agents.bobAgentVaporSignedRaw), 235 | MP.encode(Agents.aliceAgentVaporSignedRaw), 236 | ]) 237 | } 238 | 239 | let randomOversubscribed = await doApi('random', { 240 | space: wikiSpace, 241 | limit: 2, 242 | }) 243 | assert.deepEqual(randomOversubscribed, [ 244 | MP.encode(Agents.aliceAgentWikiSignedRaw), 245 | ]) 246 | let randomEmpty = await doApi('random', { 247 | space: emptySpace, 248 | limit: 2, 249 | }) 250 | assert.deepEqual(randomEmpty, []) 251 | }) 252 | } 253 | }) 254 | -------------------------------------------------------------------------------- /rust/holochain_bootstrap_core/src/agent_info.rs: -------------------------------------------------------------------------------- 1 | // sometimes it's less confusing to be explicit 2 | #![allow(clippy::needless_lifetimes)] 3 | //! Agent Info Structs 4 | 5 | use crate::types::*; 6 | use msgpackin_core::decode::*; 7 | 8 | use alloc::vec::Vec; 9 | 10 | /// Struct for decoding agent info 11 | pub struct AgentInfoRef<'a> { 12 | /// space this agent is part of 13 | pub space: &'a [u8], 14 | 15 | /// agent id / pubkey 16 | pub agent: &'a [u8], 17 | 18 | /// urls this agent is reachable at 19 | pub urls: Vec<&'a str>, 20 | 21 | /// timestamp this blob was signed 22 | pub signed_at_ms: u64, 23 | 24 | /// WARNING this is NOT an absolute timestamp, 25 | /// but an offset from the signed_at_ms field 26 | pub expires_after_ms: u64, 27 | 28 | /// additional opaque meta-info 29 | pub meta_info: &'a [u8], 30 | } 31 | 32 | impl core::fmt::Debug for AgentInfoRef<'_> { 33 | fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { 34 | let space = base64::encode(self.space); 35 | let agent = base64::encode(self.agent); 36 | let meta_info = base64::encode(self.meta_info); 37 | f.debug_struct("AgentInfoRef") 38 | .field("space", &space) 39 | .field("agent", &agent) 40 | .field("urls", &self.urls) 41 | .field("signed_at_ms", &self.signed_at_ms) 42 | .field("expires_after_ms", &self.expires_after_ms) 43 | .field("meta_info", &meta_info) 44 | .finish() 45 | } 46 | } 47 | 48 | fn get_map_len(iter: &mut TokenIter<'_, '_>) -> BCoreResult { 49 | Ok(match iter.next() { 50 | Some(Token::Len(LenType::Map, l)) => l, 51 | _ => return Err(BCoreError::EDecode("expected map".into())), 52 | }) 53 | } 54 | 55 | fn get_str<'dec, 'buf>(iter: &mut TokenIter<'dec, 'buf>) -> BCoreResult<&'buf str> { 56 | let _len = match iter.next() { 57 | Some(Token::Len(LenType::Str, l)) => l, 58 | _ => return Err(BCoreError::EDecode("expected str_len".into())), 59 | }; 60 | 61 | match iter.next() { 62 | Some(Token::Bin(s)) => { 63 | core::str::from_utf8(s).map_err(|_| BCoreError::EDecode("str utf8 error".into())) 64 | } 65 | _ => Err(BCoreError::EDecode("expected str".into())), 66 | } 67 | } 68 | 69 | fn get_bin<'dec, 'buf>(iter: &mut TokenIter<'dec, 'buf>) -> BCoreResult<&'buf [u8]> { 70 | let _len = match iter.next() { 71 | Some(Token::Len(LenType::Bin, l)) => l, 72 | _ => return Err(BCoreError::EDecode("expected bin_len".into())), 73 | }; 74 | 75 | match iter.next() { 76 | Some(Token::Bin(b)) => Ok(b), 77 | _ => Err(BCoreError::EDecode("expected bin".into())), 78 | } 79 | } 80 | 81 | fn get_str_arr<'dec, 'buf>(iter: &mut TokenIter<'dec, 'buf>) -> BCoreResult> { 82 | let len = match iter.next() { 83 | Some(Token::Len(LenType::Arr, l)) => l, 84 | _ => return Err(BCoreError::EDecode("expected array".into())), 85 | }; 86 | 87 | let mut out = Vec::with_capacity(len as usize); 88 | 89 | for _ in 0..len { 90 | out.push(get_str(iter)?); 91 | } 92 | 93 | Ok(out) 94 | } 95 | 96 | fn get_u64(iter: &mut TokenIter<'_, '_>) -> BCoreResult { 97 | match iter.next() { 98 | Some(Token::Num(u)) => Ok(u.to()), 99 | _ => Err(BCoreError::EDecode("expected unsigned int".into())), 100 | } 101 | } 102 | 103 | impl AgentInfoRef<'_> { 104 | /// parse an encoded message-pack agent-info blob into an AgentInfoRef 105 | pub fn decode(buf: &[u8]) -> BCoreResult> { 106 | let mut space = None; 107 | let mut agent = None; 108 | let mut urls = None; 109 | let mut signed_at_ms = None; 110 | let mut expires_after_ms = None; 111 | let mut meta_info = None; 112 | 113 | let mut dec = Decoder::new(); 114 | let mut iter = dec.parse(buf); 115 | 116 | let len = get_map_len(&mut iter)?; 117 | 118 | for _ in 0..len { 119 | let key = get_str(&mut iter)?; 120 | 121 | match key { 122 | "space" => space = Some(get_bin(&mut iter)?), 123 | "agent" => agent = Some(get_bin(&mut iter)?), 124 | "urls" => urls = Some(get_str_arr(&mut iter)?), 125 | "signed_at_ms" => signed_at_ms = Some(get_u64(&mut iter)?), 126 | "expires_after_ms" => expires_after_ms = Some(get_u64(&mut iter)?), 127 | "meta_info" => meta_info = Some(get_bin(&mut iter)?), 128 | oth => return Err(BCoreError::EDecode(format!("unexpected key: {oth}"))), 129 | } 130 | } 131 | 132 | Ok(AgentInfoRef { 133 | space: space.ok_or_else(|| BCoreError::EDecode("no space".into()))?, 134 | agent: agent.ok_or_else(|| BCoreError::EDecode("no agent".into()))?, 135 | urls: urls.ok_or_else(|| BCoreError::EDecode("no urls".into()))?, 136 | signed_at_ms: signed_at_ms 137 | .ok_or_else(|| BCoreError::EDecode("no signed_at_ms".into()))?, 138 | expires_after_ms: expires_after_ms 139 | .ok_or_else(|| BCoreError::EDecode("no expires_after_ms".into()))?, 140 | meta_info: meta_info.ok_or_else(|| BCoreError::EDecode("no meta_info".into()))?, 141 | }) 142 | } 143 | } 144 | 145 | /// Struct for decoding agent info 146 | pub struct AgentInfoSignedRef<'a> { 147 | /// agent id / pubkey 148 | pub agent: &'a [u8], 149 | 150 | /// ed25519 signature over agent_info by the above agent key 151 | pub signature: &'a [u8], 152 | 153 | /// msgpack encoded agent_info 154 | pub agent_info: &'a [u8], 155 | } 156 | 157 | impl core::fmt::Debug for AgentInfoSignedRef<'_> { 158 | fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { 159 | let agent = base64::encode(self.agent); 160 | let signature = base64::encode(self.signature); 161 | let agent_info = base64::encode(self.agent_info); 162 | f.debug_struct("AgentInfoSignedRef") 163 | .field("agent", &agent) 164 | .field("signature", &signature) 165 | .field("agent_info", &agent_info) 166 | .finish() 167 | } 168 | } 169 | 170 | impl AgentInfoSignedRef<'_> { 171 | /// parse an encoded message-pack agent-info blob into an AgentInfoRef 172 | pub fn decode(buf: &[u8]) -> BCoreResult> { 173 | let mut agent = None; 174 | let mut signature = None; 175 | let mut agent_info = None; 176 | 177 | let mut dec = Decoder::new(); 178 | let mut iter = dec.parse(buf); 179 | 180 | let len = get_map_len(&mut iter)?; 181 | 182 | for _ in 0..len { 183 | let key = get_str(&mut iter)?; 184 | 185 | match key { 186 | "agent" => agent = Some(get_bin(&mut iter)?), 187 | "signature" => signature = Some(get_bin(&mut iter)?), 188 | "agent_info" => agent_info = Some(get_bin(&mut iter)?), 189 | oth => return Err(BCoreError::EDecode(format!("unexpected key: {oth}"))), 190 | } 191 | } 192 | 193 | Ok(AgentInfoSignedRef { 194 | agent: agent.ok_or_else(|| BCoreError::EDecode("no agent".into()))?, 195 | signature: signature.ok_or_else(|| BCoreError::EDecode("no signature".into()))?, 196 | agent_info: agent_info.ok_or_else(|| BCoreError::EDecode("no agent_info".into()))?, 197 | }) 198 | } 199 | 200 | /// verify the signature, if valid, decode the agent info 201 | /// and perform some additional sanity checks 202 | pub fn verify_and_decode_agent_info(&self) -> BCoreResult> { 203 | if self.agent.len() != 36 { 204 | return Err(BCoreError::EBadPubKey); 205 | } 206 | 207 | if self.signature.len() != 64 { 208 | return Err(BCoreError::EBadSig); 209 | } 210 | 211 | let pub_key = ed25519_dalek::PublicKey::from_bytes(&self.agent[0..32]) 212 | .map_err(|_| BCoreError::EBadPubKey)?; 213 | let signature = ed25519_dalek::Signature::from_bytes(&self.signature[0..64]) 214 | .map_err(|_| BCoreError::EBadSig)?; 215 | 216 | use ed25519_dalek::Verifier; 217 | pub_key 218 | .verify(self.agent_info, &signature) 219 | .map_err(|_| BCoreError::EBadSig)?; 220 | 221 | let info = AgentInfoRef::decode(self.agent_info)?; 222 | 223 | if info.agent != self.agent { 224 | return Err(BCoreError::EBadPubKey); 225 | } 226 | 227 | Ok(info) 228 | } 229 | } 230 | 231 | #[cfg(test)] 232 | mod tests { 233 | use super::*; 234 | 235 | const TEST_SIG_1: &str = "g6lzaWduYXR1cmXEQMNDUQ+j7tA6n+UdI1g3KUty245ihpr6DTt9I7jw8ZZL6kKHlQGwhAGRRAmN1lt8bDXdXotv2CcWf4+l8e6oAQmlYWdlbnTEJF8+ipuTYv6CG1q9FtafNUduCN5aEPyz0HP8Cj/0031zAAAAAKphZ2VudF9pbmZvxLuGpXNwYWNlxCQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAClYWdlbnTEJF8+ipuTYv6CG1q9FtafNUduCN5aEPyz0HP8Cj/0031zAAAAAKR1cmxzkrNodHRwczovL2V4YW1wbGUuY29tr2h0dHBzOi8vZm9vLmNvbaxzaWduZWRfYXRfbXPPAAABfqHDuu6wZXhwaXJlc19hZnRlcl9tc84AAYagqW1ldGFfaW5mb8QA"; 236 | 237 | #[test] 238 | fn deserialize_agent_info() { 239 | let sign = base64::decode(TEST_SIG_1).unwrap(); 240 | 241 | let sign = AgentInfoSignedRef::decode(&sign).unwrap(); 242 | let _info = sign.verify_and_decode_agent_info().unwrap(); 243 | } 244 | } 245 | -------------------------------------------------------------------------------- /LICENSE_APACHE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # bootstrap 2 | 3 | This is a [CloudFlare Worker](https://workers.cloudflare.com/) that allows 4 | holochain networks to bootstrap. 5 | 6 | ## Installing 7 | 8 | The worker code is all written in typescript using npm. 9 | 10 | Tested on CI against node major versions `12` and `14` on ubuntu. 11 | 12 | Standard `npm install` to install. 13 | 14 | Standard `npm test` to test. Unit tests will be run via ts-node/ts-mocha. Integration tests use the provided `run-integration-test.js` script which first launches a `miniflare` local cloudflare simulator, then executes the integration test suite making api calls against this simulator. 15 | 16 | ## Forking 17 | 18 | We expect and encourage developers to fork and use this code for deployment on 19 | their own CloudFlare account. 20 | 21 | Simply update the `wrangler.toml` with your CF account details and ensure that 22 | github secrets have `CF_API_TOKEN` set for production deployment. 23 | 24 | ## Why do holochain networks need a bootstrap service? 25 | 26 | tl;dr: to mitigate eclipse attacks. 27 | 28 | Holochain is commonly discussed in terms of 'the DHT' (Distributed Hash Table). 29 | 30 | Data is distributed across all the nodes on 'the network' and validated by a 31 | deterministic but random (based on cryptographic hash) set of nodes according to 32 | a set of rules (defined in wasm) implemented as callback functions. 33 | 34 | All of that happens in [the 'conductor'](https://github.com/holochain/holochain). 35 | 36 | Nodes send data to each other to maintain 'the DHT' without central servers. 37 | 38 | Basic DHT behaviour includes (for example): 39 | 40 | - Redundant data storage and healing as nodes join and leave the network 41 | - Identifying and mitigating bad actors 42 | - Direct p2p realtime communications and RPC style wasm calls 43 | 44 | Being more specific there are _two_ DHTs. 45 | 46 | One DHT tracks the signed (by the node) ephemeral network locations of nodes in 47 | parallel to the 'main DHT' that handles all the holochain data and validation. 48 | 49 | We can call this the 'agent DHT', it is much simpler: 50 | 51 | - The rules for agent data validation is hardcoded into the networking layer 52 | - It only tracks agent IDs (pubkeys) and spaces (DHT/DNA hashes) 53 | - It requires agents to pass a challenge to participate 54 | 55 | Unlike the data DHT, where there may be many TBs of data in a large, active DHT, 56 | the agent info is relatively small and each agent can only broadcast a single 57 | agent info data point per space. 58 | 59 | It's expected that each agent can hold a significantly higher percentage of the 60 | agent DHT data than a typical data DHT. Any agent lookup can often be completed 61 | in zero or not many hops. 62 | 63 | The agent's signature of their current network location is returned alongside 64 | their information and validated. Malicious actors on the network cannot tamper 65 | with another agent's location, the worst they can do is withold another agent's 66 | location, but as long as at least one honest agent is returning the signed agent 67 | location, that agent is discoverable. 68 | 69 | At this point we still have two big, obvious problems: 70 | 71 | - How do we handle firewalls etc.? 72 | - How does a new node safely find an honest node in the first place? 73 | 74 | The solution to the first problem is handled via. the holochain _proxy_ and is 75 | totally different and separate to the _bootstrap_ service. 76 | 77 | The _proxy_ allows nodes that are already aware of each other indirectly to open 78 | connections to each other directly, regardless of firewalls, etc. 79 | 80 | The _bootstrap_ service allows nodes to advertise their current network location 81 | _independant of the agent DHT_. 82 | 83 | For example, this repository implements a bootstrap service as: 84 | 85 | - A simple POST based API that accepts signed agent info 86 | - A CloudFlare backed key/value store 87 | - Agent information automaticaly expires (is deleted) 88 | - The service can be forked/copied by any hApp developer and deployed to their 89 | own CloudFlare account 90 | - 'Trusted' agent public keys can be set by the service owner to further 91 | mitigate eclipse attacks at the expense of needing to maintain high(ish) 92 | availability nodes (not implemented yet) 93 | 94 | This allows nodes that want to safely join a DHT space to prepopulate their 95 | agent locations with everyone advertising themselves periodically. 96 | 97 | ## Limitations of the boostrap service 98 | 99 | There are some obvious limits of the bootstrap service as currently implemented. 100 | 101 | Some of these limitations can be mitigated relatively easily and others need 102 | more effort or domain specific solutions. 103 | 104 | ### Sybil ghost network 105 | 106 | It's pretty easy for someone to spam the kv store with apparently valid data 107 | that has been signed by a garbage keypair and doesn't lead anywhere. 108 | 109 | This would create a ghost network where so few listed agents are real that a new 110 | user cannot open any useful connections. 111 | 112 | It also puts pressure on the server, which in this case is CloudFlare so I'm 113 | sure they can handle it, but it may result in additional costs for the account 114 | owner. 115 | 116 | Mitigations: 117 | 118 | - Trust and delegated trust model (need to be a dev or approved by a dev) 119 | - Identiy/auth based challenge (e.g. DPKI) 120 | - Anti-spam/throttling challenges (e.g. proof of work) 121 | - Proof of unique human (e.g. QR code systems like bright ID) 122 | 123 | Anything that meaningfully raises the bar for entry above 'can sign data' is 124 | useful mitigation here. 125 | 126 | At the time of writing we are simply expiring all key/value pairs after some 127 | time, which is a relatively weak challenge but at least sybils will fade 128 | quickly unless there is a dedicated machine somewhere actively generating them 129 | over a long period of time. 130 | 131 | Additionally, CloudFlare themselves implement anti-bot protections at the 132 | network layer that we passively benefit from simply by using their service. 133 | 134 | ### Eclipse attack 135 | 136 | Similar to the ghost town situation, a more sophisticated attack generates a 137 | large number of agents that do resolve to a real connection. 138 | 139 | The real but malicious connections 'fork' new users off onto a parallel set of 140 | DHTs. The sheer number of fake accounts defeats the bootstrap service as a means 141 | to avoid eclipse attacks because some percentage of new users will never find an 142 | honest signal among the malicious noise. 143 | 144 | For an arbitrarily sophisticated sybil we can't hope to automatically detect 145 | them at the bootstrap service level with an algorithm. 146 | 147 | At some point an element of trust will need to be applied to the bootstrapping 148 | process. 149 | 150 | Even monero, a world class privacy and trust minimised blockchain, relies on a 151 | website [Monero World](https://moneroworld.com/#nodes) to list out some trusted 152 | nodes that can bootstrap new users onto the monero network safely. This relies 153 | on users finding the list when they start their wallet, and downloading a safe 154 | wallet in the first place, and trusting the developers that write the monero 155 | code, and the machine the user runs the code on... etc. 156 | 157 | The best we can do is to allow for decentralisation so that no party can _force_ 158 | themselves to be 'trusted' and to implement an explicit trust model so that 159 | agents can be vetted by each other. 160 | 161 | This is similar to the [ERC-20 lists used by uniswap](https://tokenlists.org/), 162 | there is a default list maintained by uniswap and then several dozen community 163 | maintained lists. Users of uniswap then select which list they'd like to opt-in 164 | to in order to be protected against phishing and other scams. 165 | 166 | This bootstrap service currently has no concept of trust in it but it will in 167 | the future. 168 | 169 | The `random` endpoint _does_ enforce that random agents are returned from the 170 | running service, so that a client cannot be tricked into selecting specific 171 | agents from the listings. This puts additional trust on CloudFlare (see below). 172 | 173 | With any trust model, there will be some set of public keys that agents would be 174 | strongly encouraged to prioritise when joining a network. 175 | 176 | These public keys would be set after some kind of elevated access challenge. 177 | For example: 178 | 179 | - Set directly in the CloudFlare interface by the account owner (developer) 180 | - Requiring a signature from another already-trusted agent (delegation) 181 | - Requiring a signature from some external system (identity) 182 | - Some other challenge (algorithmic, API key, etc.) 183 | 184 | So then the service owner sets the trust model, populates and maintains the 185 | trusted public keys, then end-users opt in to a bootstrap service that they 186 | decide to trust the operator and model of. 187 | 188 | ### DOS attack 189 | 190 | CloudFlare themselves are one of the world leaders in mitigating DOS attacks for 191 | their clients. 192 | 193 | The logic in the workers is limited to simple cryptographic checks and direct 194 | interactions with the CloudFlre kv store. 195 | 196 | It's unlikely that an attacker could exploit something in this repository that 197 | brings down the service for honest users. The worst they could do is trip the 198 | 10-50ms CPU circuit breaker on an individual request and see a 500 error for 199 | themselves. 200 | 201 | ### Trusting CloudFlare 202 | 203 | Of course, all this talk of explicit trust is ignoring the need to implicitly 204 | trust CloudFlare as the infrastructure provider of the kv service. 205 | 206 | Given that we're cryptographically signing absolutely everything, the damage 207 | that CloudFlare can do is limited to witholding data or failing to provide their 208 | service. They cannot tamper with or inject any additional data. 209 | 210 | CloudFlare returns the original agent info bytes alongside the agent pubkey and 211 | signature to all agents, so that every agent can independently verify the data. 212 | Agents do not need to trust that CloudFlare has not tampered with the data 213 | because they SHOULD do their own cryptographic verification of all data returned 214 | from any boostrap service. This removes the tempation for an attacker to attempt 215 | to hijack a bootstrap service to invisibly serve up bad network agent network 216 | locations. 217 | 218 | To mitigate the need to trust CloudFlare _in general_ we have a well defined and 219 | very simple POST API that most web developers could be confident in implementing 220 | correctly. This way they can build their own binaries and servers that are 221 | compatible with the holochain conductors and host these anywhere. 222 | 223 | The main concern is that the `random` op is opaque from the caller's point of 224 | view and this is where CloudFlare could collude with (or be hacked by) an 225 | attacker to "randomly" only return malicious nodes. 226 | 227 | The main two defenses against CloudFlare are (not implemented yet): 228 | 229 | - Developers easily forking the service and users easily configuring many 230 | bootstrap services in conductors (decentralised bootstrap). 231 | - Signed responses from bootstrap services as part of the API so that agents can 232 | audit and cross-reference responses against what is found in the DHT and/or 233 | other services, similar to how time audits work in the Roughtime protocol. 234 | 235 | ## API 236 | 237 | ### POST method 238 | 239 | All API requests use the HTTP POST method. 240 | 241 | This is for both gets and sets. 242 | 243 | This is so that we can use messagepack binary data as-is for all requests and 244 | responses with no 'extra steps' like handling base64 encoding/decoding and URL 245 | parsing just to work with binary data. 246 | 247 | ### GET ping 248 | 249 | There is _one_ GET endpoint, used for debugging, testing and health checks. 250 | 251 | All GET requests to the bootstrap service will receive the response string `OK` 252 | encoded as UTF-8 text data and the `200` status code. 253 | 254 | The GET endpoint behaves differently to all other methods in that it is not 255 | serialized, is not binary data, has no op headers and cannot interact with the 256 | kv service at all. 257 | 258 | ### MessagePack serialization 259 | 260 | All requests and responses are serialized as binary MessagePack data in the 261 | body of the request/response. 262 | 263 | The op header (see below) defines which operation the messagepack payload in a 264 | request is dispatched to and what kind of response will be returned. 265 | 266 | This is the same serialization format that holochain itself uses for wire 267 | messages on the network and for compatibility with wasm for validation logic. 268 | 269 | ### Ed25519/NaCl crytography 270 | 271 | All cryptographic logic is handled as per libsodium using the Ed25519 curve. 272 | 273 | This is the same cryptography as holochain itself which means the signatures and 274 | validation used by the bootstrap service are the same as those used by the agent 275 | DHT by conductors. 276 | 277 | This implementation uses [tweetnacl](https://www.npmjs.com/package/tweetnacl) for validation 278 | but does not ever generate any signatures. 279 | 280 | ### Headers 281 | 282 | The `Content-Type` header for all POST requests must be `application/octet` to 283 | signify to the server that the body of the request is a binary payload. 284 | 285 | The action to be performed is set by the `X-Op` header in the POST request. 286 | 287 | The possible values are: 288 | 289 | - `put`: store signed agent info 290 | - `random`: retrieve up to N random agents 291 | - `now`: get the current server time as a unix milliseconds timestamp 292 | 293 | ## Data structures 294 | 295 | ### Agent info 296 | 297 | `AgentInfoSigned` is the main data structure. 298 | 299 | It is the same structure defined by `kitsune_p2p` in the Rust codebase for the 300 | conductor, but ported to typescript for validation here. 301 | 302 | It looks like this on the wire: 303 | 304 | ```typescript 305 | { 306 | signature: Uint8Array, 307 | agent: Uint8Array, 308 | agent_info: Uint8Array, 309 | } 310 | ``` 311 | 312 | Where the `agent_info` is messagepack serialized binary data that MUST be valid 313 | for the `agent` public key and `signature` bytes according to libsodium. 314 | 315 | If the `agent_info` is not valid then it MUST be discarded and any further logic 316 | abandoned because this is ALWAYS malicious or corrupt data. The inner 317 | `agent_info` MUST NOT be deserialized if it is invalid. 318 | 319 | When the `agent_info` is validated and unpacked it looks like this: 320 | 321 | ```typescript 322 | agent_info: { 323 | space: Uint8Array, 324 | agent: Uint8Array, 325 | urls: Array, 326 | signed_at_ms: number, 327 | } 328 | ``` 329 | 330 | - `space` is the bytes of the hash of the DNA used to connect to the DHT 331 | - `agent` is the same as the signing key above and MUST match it 332 | - `urls` is an array of strings that are the URLs the agent can be found at 333 | - `signed_at_ms` is the unix millisecond timestamp of the signing 334 | 335 | The `AgentInfoSigned` packed data is saved and retrieved by the bootstrap 336 | service. We store exactly what is given to us by the agent alongside its 337 | cryptographic integrity and authenticity proof (signature). 338 | 339 | This allows all agents using the bootstrap service to redundantly verify the 340 | data for themselves which is an important hedge against a compromised service. 341 | 342 | ### KV keys 343 | 344 | Valid `AgentInfoSigned` data is stored under the binary concatenation of 345 | `space` + `agent` in the CloudFlare kv store. 346 | 347 | For example, if there was a space `[1, 2, 3]` and agent `[4, 5, 6]` the kv key 348 | would be `[1, 2, 3, 4, 5, 6]`. 349 | 350 | Technically CloudFlare kv does not support prefix based lookups (which we need) 351 | for raw binary keys, so internally we base64 the space and agent separately 352 | before concatenating them. This is an _internal implementation detail only_ so 353 | any attempt to externally interact with keys as base64 data will fail because 354 | the input/output will be treated as raw binary bytes, not utf8 encoded data. 355 | 356 | This is more efficient on the wire and decouples the messagepack binary API 357 | design from the CloudFlare key prefix lookup implementation. 358 | 359 | For example, when performing a `get` op the POST body would contain the space 360 | and agent key raw binary bytes, not a base64 or utf8 representation of these. 361 | 362 | ## Ops 363 | 364 | ### Put 365 | 366 | Put a signed agent info into the kv store. 367 | 368 | `X-Op` header: `put` 369 | 370 | Request body: Messagepack serialized `AgentInfoSigned` data (see above). 371 | 372 | Successful response: Messagepack encoded `null`, i.e. `[ 192 ]` binary body. 373 | 374 | If the `AgentInfoSigned` data validates on the CloudFlare worker it will be 375 | saved under the kv key (see above) for the parsed `space` and `agent`. 376 | 377 | The value will expire (be deleted) at signing time + expires after, as per the 378 | signed agent information. 379 | 380 | The expectation is that agents repost their current location periodically to 381 | maintain liveness. 382 | 383 | ### Random 384 | 385 | Get _up to_ `limit` random `AgentInfoSigned` for a given `space`. 386 | 387 | `X-Op` header: `random` 388 | 389 | Request body: Messagepack serialized `{ space: Uint8Array, limit: number }`. 390 | The `limit` must be a positive integer. 391 | 392 | Successful response: Messagepack serialized array of `AgentInfoSigned` data. 393 | If there are at least `limit` agents in the `space` then 394 | there will always be `limit` random agents returned. 395 | If there are less than `limit` agents in the `space` then 396 | `limit` agents will be returned in random order. 397 | If there are no agents a messagepack empty array, 398 | i.e. `[221, 0, 0, 0, 0]`. 399 | 400 | This is the default and recommended way for an agent to fetch node information 401 | as it balances network efficiency against eclipse mitigation via randomness. 402 | 403 | Agents are encouraged to fetch as many random agents as they can comfortably 404 | handle to maximise the diversity of their view on the network before they 405 | attempt to join, which has benefits beyond eclipse protection. 406 | 407 | ### Now 408 | 409 | Get the time 'now' from the service as a millisecond unix timestamp. 410 | 411 | The `signed_at_ms` SHOULD be in the past from the perspective of the bootstrap 412 | service but this is NOT enforced. The correctness of times remains between 413 | agents and so the bootstrap service is agnostic to far future signed and expiry 414 | times. That said, the bootstrap service will not hold data longer than an hour 415 | and peers are free to ignore bad times (in their opinion). 416 | 417 | If an agent wants some assurance that the signing time will be accepted by 418 | their peers it can first call `now` and then use the returned timestamp for 419 | signing. 420 | 421 | An agent can call `now` once upon booting a conductor and then calculate an 422 | offset relative to their agent local time, then use the offset for as long as it 423 | is safe to assume that the local clock has not shifted. 424 | 425 | A full clock sync algorithm like (S)NTP is NOT required, the signing time simply 426 | needs to be within a few seconds on both machines and in the past from the 427 | perspective of the service. A simple `min` comparison with the local time, or 428 | even direct deferral to the server time is likely sufficient. 429 | 430 | ## Validation 431 | 432 | Validation rules are well defined for signed agent info and all other binary 433 | data is fixed sized. 434 | 435 | ### SignedAgentInfo validation 436 | 437 | Validation is a 'chained' operation in that each step of the validation will be 438 | attempting to verify some aspect of the data. Any step that fails MUST abort the 439 | entire validation chain as a failure to validate the data. That is to say, any 440 | corrupt or bad data MUST immediately stop validations and return an error. The 441 | error SHOULD be descriptive to aid logging and debugging. 442 | 443 | 0. The raw `SignedAgentInfo` on the wire will be a messagepacked object with 444 | keys `signature`, `agent`, and `agent_info` and binary array values. 445 | 1. Attempt to decode the messagepack data into the object. 446 | 2. Check that the `signature` is 64 bytes long, as per `Ed25519` signatures. 447 | 3. Check that the `agent` pubkey is 32 bytes long, as per `Ed25519` public keys. 448 | 4. Use `libsodium` to verify the `agent_info` bytes using the `signature` and 449 | `agent` pubkey, as per `Ed25519`. 450 | 5. IF the signature is valid, attempt to deserialize the `agent_info` bytes 451 | using messagepack to an `AgentInfo` object (see above). 452 | 6. Check the `space` is 32 bytes long, as per base HoloHash bytes. 453 | 7. Check the `agent` is 32 bytes long, as per `Ed25519` public keys. 454 | 8. Check the `agent` bytes are equal to the `agent` bytes used to verify the 455 | signature above. 456 | 9. Check the `urls` is an array of utf8 strings. 457 | 10. Check there are 256 or fewer `urls` in the array. 458 | 11. Check every `url` is 2048 or fewer utf8 _bytes_, e.g. utf8 multibyte 459 | characters are counted as several bytes towards the limit. 460 | 12. Check the `signed_at_ms` is an integer. 461 | 13. Check the `signed_at_ms` is a positive number. 462 | 14. Check the `expires_after_ms` is an integer. 463 | 15. Check the `expires_after_ms` is between `MIN_EXPIRES` and `MAX_EXPIRES`. 464 | These are currently 1 minute and 1 hour respectively, in milliseconds. 465 | --------------------------------------------------------------------------------