├── .dockerignore ├── .github ├── CODEOWNERS └── workflows │ ├── create-do-app.yml │ ├── delete-do-app.yml │ └── rust.yml ├── .gitignore ├── Cargo.toml ├── LICENSE ├── README.md ├── adi ├── Cargo.toml ├── README.md └── src │ ├── error.rs │ ├── gf.rs │ └── lib.rs ├── api ├── .gitignore ├── Cargo.toml ├── Dockerfile ├── README.md ├── example │ ├── seed │ ├── seed.pub │ ├── service-get.sh │ ├── services │ │ └── hello │ ├── session-get.sh │ ├── session-id.b64 │ ├── session-patch.json │ ├── session-patch.sh │ ├── session-put.json │ ├── session-put.sh │ ├── sessions │ │ └── r8N1EOII3DAfb1BCcbPfSu9uXF717C0xrJaf7kGi4Pk= │ ├── timestamp │ ├── timestamp-sig │ ├── vault │ ├── vault-bin │ ├── vault-get.sh │ ├── vault-put.sh │ ├── vault-sig │ ├── vault-sig-bin │ └── vaults │ │ └── r8N1EOII3DAfb1BCcbPfSu9uXF717C0xrJaf7kGi4Pk= ├── res │ └── regexes.yaml ├── src │ ├── assistant.rs │ ├── auth.rs │ ├── brandfetch.rs │ ├── lib.rs │ ├── magic_share.rs │ ├── mailbox.rs │ ├── main.rs │ ├── store.rs │ ├── store │ │ ├── file.rs │ │ └── s3.rs │ ├── twilio.rs │ └── verify_token.rs └── tests │ ├── requests.rs │ ├── service_list.rs │ └── services.json ├── cli ├── Cargo.toml ├── README.md └── src │ ├── lib.rs │ └── main.rs ├── djb ├── Cargo.toml ├── README.md └── src │ └── lib.rs ├── ffi ├── Cargo.toml ├── build.rs ├── cbindgen.toml ├── include │ └── libuno.h └── src │ └── lib.rs ├── lib ├── Cargo.toml ├── README.md └── src │ ├── error.rs │ └── lib.rs ├── rust-toolchain.toml ├── rustfmt.toml ├── s39 ├── Cargo.toml ├── README.md └── src │ ├── error.rs │ └── lib.rs ├── wsm ├── Cargo.toml ├── README.md ├── src │ └── lib.rs └── tests │ └── wasm.rs └── xcf ├── Cargo.toml ├── build.rs ├── cbindgen.toml ├── include └── uno.h ├── src └── lib.rs ├── sumo.sh ├── xcode12 ├── aarch64-apple-ios14.0-macabi.json ├── aarch64-apple-ios14.0-simulator.json ├── aarch64-apple-ios7.0.0.json ├── sumo.sh └── x86_64-apple-ios7.0.0-simulator.json └── xcode13 ├── aarch64-apple-ios7.0.0.json ├── sumo.sh └── x86_64-apple-ios7.0.0-sim.json /.dockerignore: -------------------------------------------------------------------------------- 1 | target 2 | -------------------------------------------------------------------------------- /.github/CODEOWNERS: -------------------------------------------------------------------------------- 1 | api/* @withuno/api 2 | -------------------------------------------------------------------------------- /.github/workflows/create-do-app.yml: -------------------------------------------------------------------------------- 1 | name: Create PR Environment 2 | 3 | on: 4 | pull_request: 5 | types: [opened, reopened] 6 | 7 | jobs: 8 | create-app: 9 | name: Configure DigitalOcean App 10 | runs-on: ubuntu-latest 11 | steps: 12 | - name: Extract Branch Name 13 | shell: bash 14 | run: | 15 | echo ${GITHUB_HEAD_REF#refs/heads/} \ 16 | | tee head.ref 17 | echo "::set-output name=branch::$(cat head.ref)" 18 | id: extract 19 | 20 | - name: Install doctl 21 | uses: digitalocean/action-doctl@v2 22 | with: 23 | token: ${{ secrets.DIGITAL_OCEAN_TOKEN }} 24 | 25 | - name: Write App Spec 26 | env: 27 | PR_NUM: ${{ github.event.number }} 28 | BRANCH_NAME: ${{ steps.extract.outputs.branch }} 29 | run: | 30 | cat <app.yaml 31 | name: identity-pr-${PR_NUM} 32 | region: nyc 33 | services: 34 | - name: api 35 | dockerfile_path: api/Dockerfile 36 | github: 37 | repo: ${{ github.repository }} 38 | branch: ${BRANCH_NAME} 39 | deploy_on_push: true 40 | health_check: 41 | http_path: /v2/health 42 | instance_size_slug: basic-xs 43 | envs: 44 | - key: SPACES_ACCESS_KEY_ID 45 | value: ${{ secrets.DIGITAL_OCEAN_SPACES_KEY }} 46 | scope: RUN_TIME 47 | type: SECRET 48 | - key: SPACES_SECRET_ACCESS_KEY 49 | value: ${{ secrets.DIGITAL_OCEAN_SPACES_SECRET }} 50 | scope: RUN_TIME 51 | type: SECRET 52 | - key: SPACES_HOSTNAME 53 | value: "https://nyc3.digitaloceanspaces.com" 54 | scope: RUN_TIME 55 | - key: SPACES_REGION 56 | value: "nyc3" 57 | scope: RUN_TIME 58 | - key: SPACES_BUCKET_PREFIX 59 | value: "u1o.dev" 60 | scope: RUN_TIME 61 | - key: TWILIO_API_ENDPOINT 62 | value: "https://twilio.com/" 63 | scope: RUN_TIME 64 | - key: TWILIO_SERVICE_SID 65 | value: ${{ secrets.TWILIO_SERVICE_SID }} 66 | scope: RUN_TIME 67 | type: SECRET 68 | - key: TWILIO_ACCOUNT_SID 69 | value: ${{ secrets.TWILIO_ACCOUNT_SID }} 70 | scope: RUN_TIME 71 | type: SECRET 72 | - key: TWILIO_AUTH_TOKEN 73 | value: ${{ secrets.TWILIO_AUTH_TOKEN }} 74 | scope: RUN_TIME 75 | type: SECRET 76 | - key: CUSTOMER_IO_API_ENDPOINT 77 | value: "https://api.customer.io/v1/send/email" 78 | scope: RUN_TIME 79 | - key: CUSTOMER_IO_API_KEY 80 | value: ${{ secrets.CUSTOMER_IO_API_KEY }} 81 | scope: RUN_TIME 82 | type: SECRET 83 | - key: CUSTOMER_IO_MESSAGE_ID 84 | value: "4" 85 | scope: RUN_TIME 86 | - key: VERIFY_EMAIL_DOMAIN 87 | value: "https://verify.u1o.dev/" 88 | scope: RUN_TIME 89 | - key: VERIFICATION_CODE_OVERRIDE_SMS 90 | value: "42424242" 91 | scope: RUN_TIME 92 | - key: PUBLIC_URL 93 | value: \${_self.PUBLIC_URL} 94 | scope: RUN_TIME 95 | - key: OPENAI_API_KEY 96 | value: ${{ secrets.OPENAI_API_KEY }} 97 | type: SECRET 98 | scope: RUN_TIME 99 | - key: ASSISTANT_ENDPOINT 100 | value: ${{ secrets.ASSISTANT_ENDPOINT }} 101 | type: SECRET 102 | scope: RUN_TIME 103 | - key: BRANDFETCH_API_KEY 104 | value: ${{ secrets.BRANDFETCH_API_KEY }} 105 | type: SECRET 106 | scope: RUN_TIME 107 | EOF 108 | 109 | - name: Validate App Spec 110 | run: | 111 | doctl apps spec validate app.yaml 112 | id: validate 113 | 114 | - name: Create New App 115 | run: | 116 | doctl apps create --spec app.yaml --no-header --format ID 2>errors \ 117 | | tee app.id 118 | echo "::set-output name=id::$(cat app.id)" 119 | cat errors 120 | id: app 121 | 122 | - name: Add PR Comment 123 | uses: mshick/add-pr-comment@v1 124 | with: 125 | repo-token: ${{ secrets.GITHUB_TOKEN }} 126 | message: "Started deployment https://cloud.digitalocean.com/apps/${{ steps.app.outputs.id }}/deployments" 127 | -------------------------------------------------------------------------------- /.github/workflows/delete-do-app.yml: -------------------------------------------------------------------------------- 1 | name: Delete PR Environemnt 2 | 3 | on: 4 | pull_request: 5 | types: [closed] 6 | 7 | jobs: 8 | delete-app: 9 | name: Remove DigitalOcean App 10 | runs-on: ubuntu-latest 11 | steps: 12 | - name: Install doctl 13 | uses: digitalocean/action-doctl@v2 14 | with: 15 | token: ${{ secrets.DIGITAL_OCEAN_TOKEN }} 16 | 17 | - name: Delete App 18 | env: 19 | PR_NUM: ${{ github.event.number }} 20 | run: | 21 | doctl apps list --no-header --format ID,Spec.Name \ 22 | | grep identity-pr-${PR_NUM} \ 23 | | cut -b1-36 \ 24 | | tee app.id 25 | doctl apps delete -f $(cat app.id) 26 | 27 | -------------------------------------------------------------------------------- /.github/workflows/rust.yml: -------------------------------------------------------------------------------- 1 | name: Rust 2 | 3 | on: 4 | push: 5 | branches: [ main ] 6 | pull_request: 7 | branches: [ main ] 8 | 9 | env: 10 | CARGO_TERM_COLOR: always 11 | 12 | jobs: 13 | build: 14 | 15 | runs-on: ubuntu-latest 16 | 17 | steps: 18 | - uses: actions/checkout@v2 19 | - name: Install rust 20 | run: rustup toolchain install nightly 21 | - name: Install rustfmt 22 | run: rustup +nightly component add rustfmt 23 | - name: Check 24 | run: cargo +nightly fmt --check --verbose 25 | - name: Tests 26 | run: cargo +nightly test --verbose 27 | - name: S3 Build 28 | run: cargo +nightly build --package api --features s3 --verbose 29 | 30 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Generated by Cargo 2 | # will have compiled files and executables 3 | /target/ 4 | 5 | # Remove Cargo.lock from gitignore if creating an executable, leave it for libraries 6 | # More information here https://doc.rust-lang.org/cargo/guide/cargo-toml-vs-cargo-lock.html 7 | Cargo.lock 8 | 9 | # These are backup files generated by rustfmt 10 | **/*.rs.bk 11 | 12 | # Vim backup files 13 | *.swp 14 | 15 | # macOS why 16 | .DS_Store 17 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | # Top level virtual manifest that ties uno rust software together. 2 | # 3 | # Most of the time you would work out of this directory. The virtual manifest 4 | # serves to keep dependencies in sync across projects and allows team members 5 | # to build and test related rust components all at once. This is our mono- 6 | # tree and mono-build. 7 | 8 | # The following is a list of all the projects to include. 9 | [workspace] 10 | members = ["adi", "api", "cli", "djb", "ffi", "s39", "lib", "xcf", "wsm"] 11 | resolver = "2" 12 | 13 | # We make use of expensive slow hashes and the DX benefits greatly from 14 | # optimizations. 15 | [profile.dev] 16 | opt-level = 2 17 | 18 | [profile.unoptimized] 19 | inherits = "dev" 20 | opt-level = 0 21 | 22 | 23 | # While cargo supports specifying both a path and version dependency, doing so 24 | # in any one of the sibling projects here would impose a file-system structure 25 | # upon anybody working independently in an isolated project. Patching the deps 26 | # in the virtual manifest allows for the projects to be published without any 27 | # required structure (in other words without ugly `../` paths, which also 28 | # fail if the repo is checked out individually). 29 | 30 | # Each member should also be patched so that building happens from the local 31 | # filesystem. 32 | [patch.crates-io] 33 | #adi = { path = "adi" } 34 | #api = { path = "api" } 35 | #cli = { path = "cli" } 36 | #djb = { path = "djb" } 37 | #ffi = { path = "ffi" } 38 | #s39 = { path = "s39" } 39 | #uno = { path = "lib" } 40 | #wsm = { path = "wsm" } 41 | #xcf = { path = "xcf" } 42 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | Uno Identity Manager 2 | === 3 | 4 | The [Uno][uno] identity platform is a modern password identity manager. 5 | We have full featured clients on [iOS/macOS][apple-apps] with an accompanying [browser extension][chrome-ext] for Safari and the Chrome family of browsers. 6 | This is our Rust reference implementation with a CLI and associated API server. 7 | You can read more about our project and design on [our blog][blog]. 8 | 9 | [uno]: https://www.uno.app 10 | [apple-apps]: https://testflight.apple.com/join/LyQml0cF 11 | [chrome-ext]: https://chrome.google.com/webstore/detail/uno-password-manager/aedmcpfnnmbdobnjpglgjkhdpljdcfmh 12 | [blog]: https://www.uno.app/blog 13 | 14 | Note: the CLI is not currently designed to be used as a fully functional Uno client in the way our mobile, desktop, and browser applications are. 15 | If you're intersted in a full-featured open source rust CLI password manager like we are, please, help us build it out (: 16 | 17 | # Overview 18 | 19 | There are two binaries: 20 | 21 | * [`uno`](cli) is the cli for interacting with libuno and with the API. 22 | * [`api`](api) is the server used for storage, messaging, email verification, phone directory, and ephemeral sessions. 23 | 24 | Supporting crates include: 25 | 26 | * [`adi`](adi) contains our reference implementation of [SSS][sss], guided by HashiCorp's [go implementation][hashi-sss]. 27 | * [`djb`](djb) contains Curve25519 crypto, both symmetric cand asymmetric (chacha20-poly1305 AEAD and ed25519 public key signing). 28 | * [`ffi`](ffi) contains the C bindings for libuno. 29 | * [`lib`](lib) is libuno, which incorporates `s39` and `djb` as well as providing types for creating and working with uno identities (32 bytes of entropy plus some kdf). 30 | * [`s39`](s39) exposes SLIP-39 functionality using Uno library types. 31 | * [`wsm`](wsm) [wasm][wasm] [bindings][wbindgen] for libuno, used in our browser extensions. 32 | * [`xcf`](xcf) packages the `ffi` as an XCFramework use with [UnoSwift][] in our iOS and macOS apps. 33 | 34 | [sss]: https://en.wikipedia.org/wiki/Shamir's_Secret_Sharing 35 | [hashi-sss]: https://github.com/hashicorp/vault/tree/main/shamir 36 | [unoswift]: http://github.com/withuno/unoswift 37 | [wasm]: https://webassembly.org 38 | [wbindgen]: https://rustwasm.github.io/docs/wasm-bindgen/ 39 | 40 | # Usage 41 | 42 | Run the uno CLI like: 43 | ``` 44 | cargo run --bin uno 45 | ``` 46 | 47 | Or the API server: 48 | ``` 49 | cargo run --bin api 50 | ``` 51 | 52 | Test everything using: 53 | ``` 54 | cargo test 55 | ``` 56 | 57 | If you just want to run the tests in a single "package", use: 58 | ``` 59 | cargo test -p 60 | ``` 61 | 62 | For example, `cargo test -p lib` or `cargo test -p uno`. 63 | 64 | If something feels slow, remember by default you're running a debug configuration. 65 | Try the `--release` flag, the hash functions run noticably faster with optimizations. 66 | 67 | # Style 68 | 69 | Code should read like a book. 70 | The style goal in this repo is to structure code such that it grows vertically instead of horizontally. 71 | Prefer pulling expressions into local variables over deeply nested matching. 72 | Lines that are consistently the same length and shape are easier to reason about. 73 | To that end, we have an arbitrary column limit of 80 chars. 74 | If your lines are under the limit, you're probably doing it right. 75 | Everything else is handled by the Rust formatter. 76 | ``` 77 | cargo +nightly fmt 78 | ``` 79 | We use format options only available in nightly, so be sure to run with `+nightly`. 80 | 81 | The README is sentence lines. 82 | Since we're writing prose that gets formatted by whatever is rendering it, we don't care about manually formatting the README. 83 | It's easier to move sentences around when they're on individual lines, so that's what we do. 84 | 85 | 86 | # Legal things 87 | 88 | Inspired by Signal's README (but not copied verbatim because we are EAR99): 89 | 90 | ## Cryptography Notice 91 | 92 | This distribution includes cryptographic software. 93 | The country in which you currently reside may have restrictions on the import, possession, use, and/or re-export to another country, of encryption software. 94 | BEFORE using any encryption software, please check your country's laws, regulations and policies concerning the import, possession, or use, and re-export of encryption software, to see if this is permitted. 95 | See for more information. 96 | 97 | In it's current form, this software exists to help users practically and securely manage their account credentials and login information. 98 | In the United States, under the Export Administration Regulations (“EAR”), encryption software limited to authentication applications is not controlled as an encryption item and can be classified under Export Commodity Control Number (ECCN): **EAR99**. 99 | The usage of encryption in this software is limited to the support of its primary function: password management and authentication. 100 | Thus, this software does not require specific U.S. government authorization to export in either object or source form. 101 | 102 | ## License 103 | 104 | Copyright 2021 WithUno, Inc. 105 | 106 | Licensed under the AGPLv3: https://www.gnu.org/licenses/agpl-3.0.html 107 | -------------------------------------------------------------------------------- /adi/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "adi" 3 | version = "0.1.0" 4 | authors = ["dcow "] 5 | license = "AGPL-3.0-only" 6 | edition = "2021" 7 | 8 | [dependencies] 9 | generic-array = "0.14.4" 10 | rand = "0.8" 11 | subtle = "2.4" 12 | -------------------------------------------------------------------------------- /adi/README.md: -------------------------------------------------------------------------------- 1 | adi 2 | === 3 | 4 | Uno's shamir's secret sharing implementation, referencing HashiCorp's go version. 5 | -------------------------------------------------------------------------------- /adi/src/error.rs: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright (C) 2021 WithUno, Inc. 3 | // All rights reserved. 4 | // 5 | // SPDX-License-Identifier: AGPL-3.0-only 6 | // 7 | 8 | use std::convert::Into; 9 | use std::error; 10 | use std::fmt; 11 | use std::result::Result; 12 | 13 | #[derive(Debug)] 14 | pub enum Error 15 | { 16 | InvalidArgument(&'static str), 17 | } 18 | 19 | impl error::Error for Error 20 | { 21 | fn source(&self) -> Option<&(dyn error::Error + 'static)> 22 | { 23 | match *self { 24 | Error::InvalidArgument(_) => None, 25 | } 26 | } 27 | } 28 | 29 | impl fmt::Display for Error 30 | { 31 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result 32 | { 33 | match *self { 34 | Error::InvalidArgument(ref msg) => { 35 | write!(f, "invalid argument: {}", msg) 36 | }, 37 | } 38 | } 39 | } 40 | 41 | impl Into> for Error 42 | { 43 | fn into(self) -> Result { Err(self) } 44 | } 45 | -------------------------------------------------------------------------------- /adi/src/gf.rs: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright (C) 2021 WithUno, Inc. 3 | // All rights reserved. 4 | // 5 | // SPDX-License-Identifier: AGPL-3.0-only 6 | // 7 | 8 | // Referenced implementation: 9 | // https://github.com/hashicorp/vault/blob/v1.6.2/shamir/shamir.go 10 | 11 | use rand::{thread_rng, Rng}; 12 | 13 | use subtle::ConditionallySelectable; 14 | use subtle::ConstantTimeEq; 15 | 16 | #[derive(Debug)] 17 | pub struct Polynomial 18 | { 19 | pub coefficients: Vec, 20 | } 21 | 22 | pub fn make_polynomial(intercept: u8, degree: usize) -> Polynomial 23 | { 24 | let mut p = Polynomial { 25 | //coefficients: Vec::with_capacity(degree + 1), 26 | coefficients: vec![0u8; degree + 1], 27 | }; 28 | 29 | p.coefficients[0] = intercept; 30 | 31 | thread_rng().fill(&mut p.coefficients[1..]); 32 | 33 | p 34 | } 35 | 36 | pub fn interpolate_polynomial(x_samples: &[u8], y_samples: &[u8], x: u8) -> u8 37 | { 38 | let limit = x_samples.len(); 39 | let mut result = 0u8; 40 | let mut basis; 41 | 42 | for i in 0..limit { 43 | basis = 1; 44 | for j in 0..limit { 45 | if i == j { 46 | continue; 47 | } 48 | 49 | let num = add(x, x_samples[j]); 50 | let denom = add(x_samples[i], x_samples[j]); 51 | let term = div(num, denom); 52 | basis = mult(basis, term); 53 | } 54 | 55 | let group = mult(y_samples[i], basis); 56 | result = add(result, group); 57 | } 58 | 59 | result 60 | } 61 | 62 | impl Polynomial 63 | { 64 | pub fn evaluate(&self, x: u8) -> u8 65 | { 66 | if x == 0 { 67 | return self.coefficients[0]; 68 | } 69 | 70 | let degree = self.coefficients.len() - 1; 71 | let mut out = self.coefficients[degree]; 72 | 73 | let mut i = degree - 1; 74 | loop { 75 | let coeff = self.coefficients[i]; 76 | out = add(mult(out, x), coeff); 77 | 78 | if i == 0 { 79 | break; 80 | } 81 | 82 | i = i - 1; 83 | } 84 | 85 | out 86 | } 87 | } 88 | 89 | pub fn add(a: u8, b: u8) -> u8 { a ^ b } 90 | 91 | pub fn div(a: u8, b: u8) -> u8 92 | { 93 | if b == 0 { 94 | panic!("divide by zero"); 95 | } 96 | 97 | let log_a: u8 = LOG_TABLE[a as usize]; 98 | let log_b: u8 = LOG_TABLE[b as usize]; 99 | 100 | let diff = (((log_a as i16) - (log_b as i16)) + 255) % 255; 101 | 102 | let ret = EXP_TABLE[diff as usize]; 103 | 104 | // ret = subtle.ConstantTimeSelect(subtle.ConstantTimeByteEq(a, 0), 0, ret) 105 | u8::conditional_select(&ret, &0, a.ct_eq(&0)) 106 | } 107 | 108 | // do i need to take/return a subtle::Choice here? 109 | // how do i avoid using `as usize` and `as i16` 110 | pub fn mult(a: u8, b: u8) -> u8 111 | { 112 | let log_a: u8 = LOG_TABLE[a as usize]; 113 | let log_b: u8 = LOG_TABLE[b as usize]; 114 | 115 | let sum = ((log_a as i16) + (log_b as i16)) % 255; 116 | 117 | let ret = EXP_TABLE[sum as usize]; 118 | 119 | //ret = subtle.ConstantTimeSelect(subtle.ConstantTimeByteEq(a, 0), 0, ret) 120 | //ret = subtle.ConstantTimeSelect(subtle.ConstantTimeByteEq(b, 0), 0, ret) 121 | let ret = u8::conditional_select(&ret, &0, a.ct_eq(&0)); 122 | u8::conditional_select(&ret, &0, b.ct_eq(&0)) 123 | } 124 | 125 | // https://github.com/hashicorp/vault/blob/master/shamir/tables.go 126 | pub const LOG_TABLE: [u8; 256] = [ 127 | 0x00, 0xff, 0xc8, 0x08, 0x91, 0x10, 0xd0, 0x36, 0x5a, 0x3e, 0xd8, 0x43, 128 | 0x99, 0x77, 0xfe, 0x18, 0x23, 0x20, 0x07, 0x70, 0xa1, 0x6c, 0x0c, 0x7f, 129 | 0x62, 0x8b, 0x40, 0x46, 0xc7, 0x4b, 0xe0, 0x0e, 0xeb, 0x16, 0xe8, 0xad, 130 | 0xcf, 0xcd, 0x39, 0x53, 0x6a, 0x27, 0x35, 0x93, 0xd4, 0x4e, 0x48, 0xc3, 131 | 0x2b, 0x79, 0x54, 0x28, 0x09, 0x78, 0x0f, 0x21, 0x90, 0x87, 0x14, 0x2a, 132 | 0xa9, 0x9c, 0xd6, 0x74, 0xb4, 0x7c, 0xde, 0xed, 0xb1, 0x86, 0x76, 0xa4, 133 | 0x98, 0xe2, 0x96, 0x8f, 0x02, 0x32, 0x1c, 0xc1, 0x33, 0xee, 0xef, 0x81, 134 | 0xfd, 0x30, 0x5c, 0x13, 0x9d, 0x29, 0x17, 0xc4, 0x11, 0x44, 0x8c, 0x80, 135 | 0xf3, 0x73, 0x42, 0x1e, 0x1d, 0xb5, 0xf0, 0x12, 0xd1, 0x5b, 0x41, 0xa2, 136 | 0xd7, 0x2c, 0xe9, 0xd5, 0x59, 0xcb, 0x50, 0xa8, 0xdc, 0xfc, 0xf2, 0x56, 137 | 0x72, 0xa6, 0x65, 0x2f, 0x9f, 0x9b, 0x3d, 0xba, 0x7d, 0xc2, 0x45, 0x82, 138 | 0xa7, 0x57, 0xb6, 0xa3, 0x7a, 0x75, 0x4f, 0xae, 0x3f, 0x37, 0x6d, 0x47, 139 | 0x61, 0xbe, 0xab, 0xd3, 0x5f, 0xb0, 0x58, 0xaf, 0xca, 0x5e, 0xfa, 0x85, 140 | 0xe4, 0x4d, 0x8a, 0x05, 0xfb, 0x60, 0xb7, 0x7b, 0xb8, 0x26, 0x4a, 0x67, 141 | 0xc6, 0x1a, 0xf8, 0x69, 0x25, 0xb3, 0xdb, 0xbd, 0x66, 0xdd, 0xf1, 0xd2, 142 | 0xdf, 0x03, 0x8d, 0x34, 0xd9, 0x92, 0x0d, 0x63, 0x55, 0xaa, 0x49, 0xec, 143 | 0xbc, 0x95, 0x3c, 0x84, 0x0b, 0xf5, 0xe6, 0xe7, 0xe5, 0xac, 0x7e, 0x6e, 144 | 0xb9, 0xf9, 0xda, 0x8e, 0x9a, 0xc9, 0x24, 0xe1, 0x0a, 0x15, 0x6b, 0x3a, 145 | 0xa0, 0x51, 0xf4, 0xea, 0xb2, 0x97, 0x9e, 0x5d, 0x22, 0x88, 0x94, 0xce, 146 | 0x19, 0x01, 0x71, 0x4c, 0xa5, 0xe3, 0xc5, 0x31, 0xbb, 0xcc, 0x1f, 0x2d, 147 | 0x3b, 0x52, 0x6f, 0xf6, 0x2e, 0x89, 0xf7, 0xc0, 0x68, 0x1b, 0x64, 0x04, 148 | 0x06, 0xbf, 0x83, 0x38, 149 | ]; 150 | 151 | pub const EXP_TABLE: [u8; 256] = [ 152 | 0x01, 0xe5, 0x4c, 0xb5, 0xfb, 0x9f, 0xfc, 0x12, 0x03, 0x34, 0xd4, 0xc4, 153 | 0x16, 0xba, 0x1f, 0x36, 0x05, 0x5c, 0x67, 0x57, 0x3a, 0xd5, 0x21, 0x5a, 154 | 0x0f, 0xe4, 0xa9, 0xf9, 0x4e, 0x64, 0x63, 0xee, 0x11, 0x37, 0xe0, 0x10, 155 | 0xd2, 0xac, 0xa5, 0x29, 0x33, 0x59, 0x3b, 0x30, 0x6d, 0xef, 0xf4, 0x7b, 156 | 0x55, 0xeb, 0x4d, 0x50, 0xb7, 0x2a, 0x07, 0x8d, 0xff, 0x26, 0xd7, 0xf0, 157 | 0xc2, 0x7e, 0x09, 0x8c, 0x1a, 0x6a, 0x62, 0x0b, 0x5d, 0x82, 0x1b, 0x8f, 158 | 0x2e, 0xbe, 0xa6, 0x1d, 0xe7, 0x9d, 0x2d, 0x8a, 0x72, 0xd9, 0xf1, 0x27, 159 | 0x32, 0xbc, 0x77, 0x85, 0x96, 0x70, 0x08, 0x69, 0x56, 0xdf, 0x99, 0x94, 160 | 0xa1, 0x90, 0x18, 0xbb, 0xfa, 0x7a, 0xb0, 0xa7, 0xf8, 0xab, 0x28, 0xd6, 161 | 0x15, 0x8e, 0xcb, 0xf2, 0x13, 0xe6, 0x78, 0x61, 0x3f, 0x89, 0x46, 0x0d, 162 | 0x35, 0x31, 0x88, 0xa3, 0x41, 0x80, 0xca, 0x17, 0x5f, 0x53, 0x83, 0xfe, 163 | 0xc3, 0x9b, 0x45, 0x39, 0xe1, 0xf5, 0x9e, 0x19, 0x5e, 0xb6, 0xcf, 0x4b, 164 | 0x38, 0x04, 0xb9, 0x2b, 0xe2, 0xc1, 0x4a, 0xdd, 0x48, 0x0c, 0xd0, 0x7d, 165 | 0x3d, 0x58, 0xde, 0x7c, 0xd8, 0x14, 0x6b, 0x87, 0x47, 0xe8, 0x79, 0x84, 166 | 0x73, 0x3c, 0xbd, 0x92, 0xc9, 0x23, 0x8b, 0x97, 0x95, 0x44, 0xdc, 0xad, 167 | 0x40, 0x65, 0x86, 0xa2, 0xa4, 0xcc, 0x7f, 0xec, 0xc0, 0xaf, 0x91, 0xfd, 168 | 0xf7, 0x4f, 0x81, 0x2f, 0x5b, 0xea, 0xa8, 0x1c, 0x02, 0xd1, 0x98, 0x71, 169 | 0xed, 0x25, 0xe3, 0x24, 0x06, 0x68, 0xb3, 0x93, 0x2c, 0x6f, 0x3e, 0x6c, 170 | 0x0a, 0xb8, 0xce, 0xae, 0x74, 0xb1, 0x42, 0xb4, 0x1e, 0xd3, 0x49, 0xe9, 171 | 0x9c, 0xc8, 0xc6, 0xc7, 0x22, 0x6e, 0xdb, 0x20, 0xbf, 0x43, 0x51, 0x52, 172 | 0x66, 0xb2, 0x76, 0x60, 0xda, 0xc5, 0xf3, 0xf6, 0xaa, 0xcd, 0x9a, 0xa0, 173 | 0x75, 0x54, 0x0e, 0x01, 174 | ]; 175 | 176 | 177 | #[cfg(test)] 178 | mod tests 179 | { 180 | use super::*; 181 | 182 | #[test] 183 | fn field_add() 184 | { 185 | assert_eq!(add(16, 16), 0); 186 | assert_eq!(add(3, 4), 7); 187 | } 188 | 189 | #[test] 190 | fn field_mult() 191 | { 192 | assert_eq!(mult(3, 7), 9); 193 | assert_eq!(mult(3, 0), 0); 194 | assert_eq!(mult(0, 3), 0); 195 | } 196 | 197 | #[test] 198 | #[should_panic] 199 | fn field_divide_by_zero() { div(7, 0); } 200 | 201 | #[test] 202 | fn field_divide() 203 | { 204 | assert_eq!(div(0, 7), 0); 205 | assert_eq!(div(3, 3), 1); 206 | assert_eq!(div(6, 3), 2); 207 | } 208 | 209 | #[test] 210 | fn polynomial_random() 211 | { 212 | let p = make_polynomial(42, 2); 213 | assert_eq!(p.coefficients[0], 42); 214 | } 215 | 216 | #[test] 217 | fn polynomial_eval() 218 | { 219 | let p = make_polynomial(42, 1); 220 | 221 | assert_eq!(p.evaluate(0), 42); 222 | assert_eq!(p.evaluate(1), add(42, mult(1, p.coefficients[1]))) 223 | } 224 | 225 | #[test] 226 | fn polynomial_interpolate_rand() 227 | { 228 | let v: Vec = (0..=255).collect(); 229 | 230 | for i in v { 231 | let p = make_polynomial(i, 2); 232 | 233 | let x_vals: Vec = vec![1, 2, 3]; 234 | let y_vals: Vec = 235 | vec![p.evaluate(1), p.evaluate(2), p.evaluate(3)]; 236 | 237 | let out = interpolate_polynomial(&x_vals, &y_vals, 0); 238 | assert_eq!(out, i); 239 | } 240 | } 241 | 242 | #[test] 243 | fn test_tables() 244 | { 245 | for i in 1..256 { 246 | let log_v: u8 = LOG_TABLE[i]; 247 | let exp_v: u8 = EXP_TABLE[log_v as usize]; 248 | 249 | assert_eq!(exp_v, i as u8); 250 | } 251 | } 252 | } 253 | -------------------------------------------------------------------------------- /adi/src/lib.rs: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright (C) 2021 WithUno, Inc. 3 | // All rights reserved. 4 | // 5 | // SPDX-License-Identifier: AGPL-3.0-only 6 | // 7 | 8 | // Referenced implementation: 9 | // https://github.com/hashicorp/vault/blob/v1.6.2/shamir/shamir.go 10 | 11 | use std::collections::HashSet; 12 | 13 | use rand::seq::SliceRandom; 14 | use rand::thread_rng; 15 | 16 | mod error; 17 | pub use error::Error; 18 | 19 | mod gf; 20 | use gf::*; 21 | 22 | /// A fragment of data that can be combined with other shares to reconstitute 23 | /// the whole data. 24 | pub type Share = Vec; 25 | 26 | /// Using scheme (t, n), split `data` into `n` shares such that `t` can be re- 27 | /// combined into the original bytes. Multiple schemes can be passed in the 28 | /// array in which case groups of shares will be constructed such that the 29 | /// scheme is enforced for each group. For example: split with scheme `[(1,1), 30 | /// (2,3), (3,5)]` would result in 3 groups of shares with the first group 31 | /// being a single share, the second group requireing two of three shares, and 32 | /// the third group requiring three of five shares. 33 | /// 34 | /// The length of `data` must be at least 16 bytes (128 bits) and be a multiple 35 | /// of 16 bits. The maximum number of groups (number of tuples in the scheme 36 | /// array), cannot exceed 16. In a given group tuple (t, n), `t` must not 37 | /// exceed `n`. If `t` equals 1, then `n` must be 1. 38 | /// 39 | /// Group, and scheme information as well as the iteration exponent is encoded 40 | /// in each share so that shares can be recombined without additional context. 41 | pub fn split<'a>( 42 | data: &[u8], 43 | scheme: &[(usize, usize)], 44 | ) -> Result, Error> 45 | { 46 | let secret = data.to_vec(); 47 | 48 | if scheme.len() != 1 { 49 | let msg = "more than one group is not supported right now"; 50 | return Error::InvalidArgument(msg).into(); 51 | } 52 | 53 | let (threshold, parts) = scheme[0]; 54 | 55 | if parts < threshold { 56 | let msg = "parts cannot be less than threshold"; 57 | return Error::InvalidArgument(msg).into(); 58 | } 59 | 60 | if parts > 255 { 61 | return Error::InvalidArgument("parts cannot exceed 255").into(); 62 | } 63 | 64 | if threshold < 2 { 65 | return Error::InvalidArgument("threshold must be at least 2").into(); 66 | } 67 | 68 | if threshold > 255 { 69 | return Error::InvalidArgument("threshold cannot exceed 255").into(); 70 | } 71 | 72 | if secret.len() == 0 { 73 | return Error::InvalidArgument("cannot split empty secret").into(); 74 | } 75 | 76 | let mut rng = thread_rng(); 77 | let mut xcoord = (0..255).collect::>(); 78 | xcoord.shuffle(&mut rng); 79 | 80 | let mut out = vec![vec![0u8; secret.len() + 1]; parts]; 81 | for i in 0..out.len() { 82 | out[i][secret.len()] = xcoord[i] + 1; 83 | } 84 | 85 | for (i, v) in secret.iter().enumerate() { 86 | let p = make_polynomial(*v, threshold - 1); 87 | 88 | for j in 0..parts { 89 | let x = xcoord[j] + 1; 90 | let y = p.evaluate(x); 91 | out[j][i] = y; 92 | } 93 | } 94 | 95 | Ok(out) 96 | } 97 | 98 | /// Combine shares from a previous split operation. An error is returned if the 99 | /// provided shares are not able to satisfy group threshold requirements, or if 100 | /// the digest does not match after recombination. 101 | pub fn combine<'a>(parts: &[Share]) -> Result, Error> 102 | { 103 | if parts.len() < 2 { 104 | let msg = 105 | "less than two parts cannot be used to reconstruct the secret"; 106 | return Error::InvalidArgument(msg).into(); 107 | } 108 | 109 | let first_part_len = parts[0].len(); 110 | if first_part_len < 2 { 111 | return Error::InvalidArgument("parts must be at least 2 bytes").into(); 112 | } 113 | 114 | for i in 1..parts.len() { 115 | if parts[i].len() != first_part_len { 116 | let msg = "all parts must be the same length"; 117 | return Error::InvalidArgument(msg).into(); 118 | } 119 | } 120 | 121 | let mut secret = vec![0u8; first_part_len - 1]; 122 | let mut x_samples = vec![0u8; parts.len()]; 123 | let mut y_samples = vec![0u8; parts.len()]; 124 | 125 | let mut check = HashSet::::with_capacity(parts.len()); 126 | 127 | for (i, v) in parts.iter().enumerate() { 128 | let samp = v[first_part_len - 1]; 129 | if check.replace(samp).is_some() { 130 | return Error::InvalidArgument("duplicate part detected").into(); 131 | } 132 | 133 | x_samples[i] = samp; 134 | } 135 | 136 | for i in 0..secret.len() { 137 | for (j, part) in parts.iter().enumerate() { 138 | y_samples[j] = part[i]; 139 | } 140 | 141 | let v = interpolate_polynomial(&x_samples, &y_samples, 0); 142 | 143 | secret[i] = v; 144 | } 145 | 146 | Ok(secret) 147 | } 148 | 149 | #[cfg(test)] 150 | mod unit 151 | { 152 | use super::*; 153 | use rand::RngCore; 154 | 155 | #[test] 156 | pub fn sss_roundtrip_internal() 157 | { 158 | let mut data = [0u8; 32]; 159 | rand::thread_rng().fill_bytes(&mut data); 160 | 161 | let shares = split(&data, &[(2, 3)]).unwrap(); 162 | 163 | let r1 = combine(&shares[..2]).unwrap(); 164 | let r2 = combine(&shares[1..3]).unwrap(); 165 | 166 | assert_eq!(data, &r1[..]); 167 | assert_eq!(data, &r2[..]); 168 | } 169 | } 170 | 171 | // https://github.com/hashicorp/vault/blob/v1.6.2/shamir/shamir_test.go 172 | #[cfg(test)] 173 | mod tests 174 | { 175 | #[test] 176 | fn split_invalid() 177 | { 178 | assert!(crate::split("test".as_bytes(), &[(0, 0)]).is_err()); 179 | assert!(crate::split("test".as_bytes(), &[(3, 2)]).is_err()); 180 | assert!(crate::split("test".as_bytes(), &[(3, 1000)]).is_err()); 181 | assert!(crate::split("test".as_bytes(), &[(1000, 1001)]).is_err()); 182 | assert!(crate::split("test".as_bytes(), &[(1, 10)]).is_err()); 183 | assert!(crate::split("".as_bytes(), &[(2, 3)]).is_err()); 184 | } 185 | 186 | #[test] 187 | fn split_unsupported() 188 | { 189 | assert!(crate::split("test".as_bytes(), &[(2, 3), (3, 4)]).is_err()); 190 | } 191 | 192 | #[test] 193 | fn split() 194 | { 195 | let secret: &[u8] = "test".as_bytes(); 196 | 197 | let out = crate::split(secret, &[(3, 5)]).unwrap(); 198 | assert_eq!(out.len(), 5); 199 | 200 | for share in out { 201 | assert_eq!(share.len(), secret.len() + 1); 202 | } 203 | } 204 | 205 | #[test] 206 | fn combine_invalid() 207 | { 208 | assert!(crate::combine(&[vec![]]).is_err()); 209 | 210 | assert!( 211 | crate::combine(&vec![ 212 | "foo".as_bytes().to_vec(), 213 | "ba".as_bytes().to_vec(), 214 | ]) 215 | .is_err() 216 | ); 217 | 218 | assert!( 219 | crate::combine(&vec![ 220 | "f".as_bytes().to_vec(), 221 | "b".as_bytes().to_vec() 222 | ]) 223 | .is_err() 224 | ); 225 | 226 | assert!( 227 | crate::combine(&vec![ 228 | "foo".as_bytes().to_vec(), 229 | "foo".as_bytes().to_vec(), 230 | ]) 231 | .is_err() 232 | ); 233 | } 234 | 235 | #[test] 236 | fn combine() 237 | { 238 | let secret = "test".as_bytes(); 239 | 240 | let out = crate::split(&secret, &[(3, 5)]).unwrap(); 241 | 242 | for i in 0..5 { 243 | for j in 0..5 { 244 | if j == i { 245 | continue; 246 | } 247 | 248 | for k in 0..5 { 249 | if k == i || k == j { 250 | continue; 251 | } 252 | 253 | let mut parts: Vec> = Vec::new(); 254 | parts.push(out[k].clone()); 255 | parts.push(out[j].clone()); 256 | parts.push(out[i].clone()); 257 | 258 | let recomb = crate::combine(&parts).unwrap(); 259 | 260 | assert_eq!(recomb, secret); 261 | } 262 | } 263 | } 264 | } 265 | 266 | #[test] 267 | fn precomputed() 268 | { 269 | // precomputed split from the HashiCorp version. 270 | // 271 | // package main 272 | // // get this into your path somehow 273 | // import "github.com/hashicorp/vault/shamir" 274 | // 275 | // import ( 276 | // "log" 277 | // ) 278 | // 279 | // func main() { 280 | // split, err := shamir.Split([]byte("secret"), 3, 2) 281 | // if err != nil { 282 | // log.Fatal(err) 283 | // } 284 | // 285 | // log.Println("split:") 286 | // log.Println(split) 287 | // 288 | // combine, err := shamir.Combine(split) 289 | // if err != nil { 290 | // log.Fatal(err) 291 | // } 292 | // 293 | // log.Println("combine (string):") 294 | // log.Println(combine, string(combine)) 295 | //} 296 | // 297 | // go run main.go 298 | // 2021/02/17 14:20:53 split: 299 | // 2021/02/17 14:20:53 [[210 102 247 138 85 80 126] [107 177 243 90 138 28 140] [198 216 47 182 64 12 180]] 300 | // 2021/02/17 14:20:53 combine (string): 301 | // 2021/02/17 14:20:53 [115 101 99 114 101 116] secret 302 | 303 | let parts = vec![ 304 | vec![210, 102, 247, 138, 85, 80, 126], 305 | vec![107, 177, 243, 90, 138, 28, 140], 306 | vec![198, 216, 47, 182, 64, 12, 180], 307 | ]; 308 | 309 | let recombine = crate::combine(&parts).unwrap(); 310 | assert_eq!(recombine, vec![115, 101, 99, 114, 101, 116]); 311 | } 312 | } 313 | -------------------------------------------------------------------------------- /api/.gitignore: -------------------------------------------------------------------------------- 1 | throwaway_local_dbs/ 2 | -------------------------------------------------------------------------------- /api/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "api" 3 | version = "0.1.0" 4 | authors = ["David Cowden "] 5 | license = "AGPL-3.0-only" 6 | edition = "2021" 7 | 8 | [features] 9 | s3 = [] 10 | twilio = [] 11 | openai = [] 12 | brandfetch = [] 13 | 14 | [dependencies] 15 | anyhow = "1.0" 16 | argon2 = "0.2" 17 | async-std = { version = "1.12", features = ["attributes"] } 18 | async-trait = "0.1" 19 | base64 = "0.13" 20 | blake3 = "0.3" 21 | chrono = "0.4" 22 | futures = "0.3" 23 | http-types = "2.11" 24 | "indoc" = "2" 25 | json-patch = "0.2" 26 | password-hash = "0.2" 27 | rand = "0.8" 28 | reqwest = { version = "0.11", features = ["json", "blocking"] } 29 | rusty-s3 = "0.4" 30 | serde = { version = "1.0", features = ["derive"] } 31 | serde_derive = "1.0" 32 | serde_json = "1.0" 33 | strum = "0.24" 34 | strum_macros = "0.24" 35 | surf = "2.3" 36 | tempfile = "3.2" 37 | thiserror = "1.0" 38 | tide = "0.16" 39 | user-agent-parser = "0.3" 40 | uno = { path = "../lib" } 41 | vclock = "0.2" 42 | walkdir = "2" 43 | 44 | urlencoding = "1.3.3" 45 | serde-xml-rs = "0.4.1" 46 | 47 | [dev-dependencies] 48 | regex = "1.5" 49 | -------------------------------------------------------------------------------- /api/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM rust:bookworm as builder 2 | WORKDIR /app 3 | COPY . . 4 | RUN rustup show 5 | RUN rm rust-toolchain.toml 6 | RUN cargo test -p api --release 7 | RUN cargo install --path api --root . --features "s3 twilio openai brandfetch" 8 | 9 | FROM debian:bookworm-slim 10 | RUN apt update && apt install -y libcurl4 && rm -rf /var/lib/apt/lists/* 11 | WORKDIR /root 12 | EXPOSE 8080 13 | COPY --from=builder /app/bin/ /usr/local/bin/ 14 | COPY --from=builder /app/api/res/ /usr/local/lib/api/ 15 | CMD [ "api" ] 16 | -------------------------------------------------------------------------------- /api/README.md: -------------------------------------------------------------------------------- 1 | API 2 | === 3 | 4 | The uno API service. 5 | 6 | Formerly `ror`, after Th[ror]: a dwarf with a big vault. 7 | 8 | # Overview 9 | 10 | This is Uno's local, development, and production API server. 11 | 12 | The point of sharing this is not so that users go run their own API server. 13 | It is so that concerned users can audit, verify, and critique our implementation and help us improve it when sensible. 14 | Of couse, you're welcome to run your own server, but (other than the CLI) our apps have no way to specify a custom API endpoint. 15 | Supporting such use cases is not currently on our roadmap and, even if it was, we'd need a federated protocol so that independent nodes could route messages across an application layer "uno network". 16 | We're not building a P2P network (at least we don't think we are, yet?). 17 | The API server simply allows a given user's uno clients to store and synchronize encrypted vault data as well as coordinate recovery share distribution. 18 | 19 | # Usage 20 | 21 | From this directory, run the API server: 22 | ``` 23 | cargo run 24 | ``` 25 | ``` 26 | tide::log Logger started 27 | level Info 28 | tide::server Server listening on http://[::]:8080 29 | ``` 30 | 31 | Note, adding `--release` to turn on optimizations will speed things up significantly. 32 | However, you'll trade the nice colored logs for production friendly structured JSON data instead. 33 | 34 | ## Database 35 | 36 | By default, the server uses the filesystem as its backing store. 37 | Upon startup, the server will create some data directories for storing vautls, sessions, mailboxes, nonces, etc. 38 | If you'd prefer, you can point the server at anything that speaks S3. 39 | 40 | For example, you can run minio (a local S3) using Docker like so: 41 | 42 | ``` 43 | docker run --name minio --rm -p 9000:9000 -p 9001:9001 minio/minio server /tmp/data --console-address :9001 44 | ``` 45 | 46 | In order to point the API server at your local minio instance, build with the `s3` feature: 47 | 48 | ``` 49 | cargo run --features s3 50 | ``` 51 | 52 | You'll also need to tell the server where to find an s3 endpoint using: 53 | 54 | ``` 55 | SPACES_ACCESS_KEY_ID=minioadmin 56 | SPACES_SECRET_ACCESS_KEY=minioadmin 57 | SPACES_HOSTNAME=http://localhost:9000 58 | SPACES_REGION= 59 | SPACES_BUCKET_PREFIX= 60 | ``` 61 | 62 | For example, after starting the minio server, to run the tests using release optimizations against a real s3 impelmentation: 63 | 64 | ``` 65 | SPACES_ACCESS_KEY_ID=minioadmin SPACES_SECRET_ACCESS_KEY=minioadmin SPACES_HOSTNAME=http://localhost:9000 SPACES_REGION= SPACES_BUCKET_PREFIX= cargo +nightly test --release --features s3 66 | ``` 67 | 68 | ## Directory 69 | 70 | The API server keeps a directory of phone numbers to pubkeys so that users can find other users for sharing credentials or key shards. 71 | The client typically uses the system address book to perform lookups. 72 | To appear in the directory, a user must claim their phone number(s). 73 | By default, the API server will allow any user to claim any phone number. 74 | This method of operation is rather insecure, so the API server also supports using Twilio Verify as a verification backend. 75 | 76 | To run with Twilio enabled, pass the `twilio` feature. 77 | ``` 78 | cargo run --features s3 twilio 79 | ``` 80 | 81 | For the `twilio` feature to work, you'll need to pass valid Twilio Verify API service identifiers and credentials: 82 | ``` 83 | TWILIO_API_ENDPOINT=https://twilio.com/ 84 | TWILIO_SERVICE_SID=******************** 85 | TWILIO_ACCOUNT_SID=******************** 86 | TWILIO_AUTH_TOKEN=******************** 87 | VERIFICATION_CODE_OVERRIDE_SMS=42424242 88 | ``` 89 | 90 | The `VERIFICATION_CODE_OVERRIDE_SMS` allows a static code to be accepted for phone verifications. 91 | This is useful when using the live Twilio API in day-to-day development and regression testing. 92 | Don't do this in production. 93 | 94 | 95 | ## Email Verification 96 | 97 | When registering an account, users a prompted to verify an email address. 98 | We use email to index an account. 99 | This helps users not end up with multiple different vaults. 100 | Like with the directory, by default any user can claim any email. 101 | For real use case, an email verification backend can be enabled. 102 | 103 | To require real email verification checks, pass the `customerio` feature: 104 | ``` 105 | cargo run --features s3 twilio customerio 106 | ```` 107 | 108 | For email verification using `customerio` to work, you'll need to pass valid credentials and a message template ID: 109 | ``` 110 | CUSTOMER_IO_API_ENDPOINT=https://api.customer.io/v1/send/email 111 | CUSTOMER_IO_API_KEY=******************** 112 | CUSTOMER_IO_MESSAGE_ID=4 113 | VERIFY_EMAIL_DOMAIN=https://verify.u1o.dev 114 | ``` 115 | 116 | For email verification to work, you must be running a verify site at `VERIFY_EMAIL_DOMAIN`. 117 | Instructions TBD. 118 | -------------------------------------------------------------------------------- /api/example/seed: -------------------------------------------------------------------------------- 1 | Xx6RuJuJbSEwgH8JGeDe3V/H9wwJ+eooK1EZW8iaKL8= 2 | -------------------------------------------------------------------------------- /api/example/seed.pub: -------------------------------------------------------------------------------- 1 | r8N1EOII3DAfb1BCcbPfSu9uXF717C0xrJaf7kGi4Pk= 2 | -------------------------------------------------------------------------------- /api/example/service-get.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | curl -XGET http://localhost:8080/v1/services/$1 4 | -------------------------------------------------------------------------------- /api/example/services/hello: -------------------------------------------------------------------------------- 1 | hello body 2 | -------------------------------------------------------------------------------- /api/example/session-get.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | curl -XGET "http://localhost:8080/v1/ssss/$(cat session-id.b64)" 4 | -------------------------------------------------------------------------------- /api/example/session-id.b64: -------------------------------------------------------------------------------- 1 | wdFy1cFmCEExNPAthqEzE6bhAEBUpeTszNSwcK1KfTE= -------------------------------------------------------------------------------- /api/example/session-patch.json: -------------------------------------------------------------------------------- 1 | { 2 | "share": "b64(encrypted(share))", 3 | "confidant": { "pubkey": "b64(pubkey)" } 4 | } 5 | -------------------------------------------------------------------------------- /api/example/session-patch.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | curl -XPATCH "http://localhost:8080/v1/ssss/$(cat session-id.b64)" \ 4 | --data-binary @session-patch.json 5 | -------------------------------------------------------------------------------- /api/example/session-put.json: -------------------------------------------------------------------------------- 1 | { "user": { "cid": "b64(argon2d(client-phone))" } } 2 | -------------------------------------------------------------------------------- /api/example/session-put.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | curl -XPUT "http://localhost:8080/v1/ssss/$(cat session-id.b64)" \ 4 | --data-binary @session-put.json 5 | -------------------------------------------------------------------------------- /api/example/sessions/r8N1EOII3DAfb1BCcbPfSu9uXF717C0xrJaf7kGi4Pk=: -------------------------------------------------------------------------------- 1 | { "vault": "data" } -------------------------------------------------------------------------------- /api/example/timestamp: -------------------------------------------------------------------------------- 1 | timestamp 2 | -------------------------------------------------------------------------------- /api/example/timestamp-sig: -------------------------------------------------------------------------------- 1 | XfY0NlO8mrvUafwmflyXPcJeWvMraf0CJupD74fFb0h8wf2b2SGcK7/iwDOPQiOdfzpnHk66Ks4LlMtbCfdvDg== -------------------------------------------------------------------------------- /api/example/vault: -------------------------------------------------------------------------------- 1 | { "vault": "data" } -------------------------------------------------------------------------------- /api/example/vault-bin: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/withuno/identity/26a3fbdb275f0b094b7133d6eed301035c2c7528/api/example/vault-bin -------------------------------------------------------------------------------- /api/example/vault-get.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | curl -XGET "http://localhost:8080/v1/vaults/$(cat seed.pub)" \ 4 | -H "x-uno-timestamp: $(cat timestamp)" \ 5 | -H "x-uno-signature: $(cat timestamp-sig)" \ 6 | --trace-ascii - 7 | -------------------------------------------------------------------------------- /api/example/vault-put.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | curl -XPUT "http://localhost:8080/v1/vaults/$(cat seed.pub)" \ 4 | -H "x-uno-timestamp: $(cat timestamp)" \ 5 | -H "x-uno-signature: $(cat timestamp-sig)" \ 6 | -H "Content-Type: application/octet-stream" \ 7 | --data-binary @vault-bin \ 8 | --trace-ascii - 9 | 10 | -------------------------------------------------------------------------------- /api/example/vault-sig: -------------------------------------------------------------------------------- 1 | KKXQvBhvOoTX+WAd4qK2qJzmMUzHEpS+Mo5iFqOaYjtRXi8XVeRi4OZ+kDJjl1Iubar69dGtrH1TWUmfBrokBQ== 2 | -------------------------------------------------------------------------------- /api/example/vault-sig-bin: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/withuno/identity/26a3fbdb275f0b094b7133d6eed301035c2c7528/api/example/vault-sig-bin -------------------------------------------------------------------------------- /api/example/vaults/r8N1EOII3DAfb1BCcbPfSu9uXF717C0xrJaf7kGi4Pk=: -------------------------------------------------------------------------------- 1 | { "vault": "data" } -------------------------------------------------------------------------------- /api/src/assistant.rs: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright (C) 2023 WithUno, Inc. 3 | // All rights reserved. 4 | // 5 | // SPDX-License-Identifier: AGPL-3.0-only 6 | // 7 | 8 | use http_types::Status; 9 | use http_types::StatusCode; 10 | use serde::{Deserialize, Serialize}; 11 | 12 | use strum_macros::AsRefStr; 13 | use strum_macros::EnumString; 14 | 15 | use tide::Response; 16 | use tide::Result; 17 | 18 | #[derive(Serialize, Deserialize, Debug)] 19 | pub struct AssistTopicLookup 20 | { 21 | // Topic enum one of: reset-password, enable-2fa 22 | pub topic: String, 23 | pub domain: String, 24 | } 25 | 26 | #[derive(Debug, PartialEq, AsRefStr, EnumString)] 27 | pub enum Topic 28 | { 29 | #[strum(serialize = "reset-password")] 30 | ResetPassword, 31 | #[strum(serialize = "enable-2fa")] 32 | Enable2FA, 33 | } 34 | 35 | #[derive(Serialize, Deserialize, Debug)] 36 | pub struct AssistTopicResponse 37 | { 38 | /// List of steps the user should take 39 | pub steps: Vec, 40 | /// URL linking the user to the first step 41 | pub action_url: String, 42 | } 43 | 44 | pub async fn passthrough(req_bytes: Vec) -> Result 45 | { 46 | // use the serverless function 47 | let lambda = std::env::var("ASSISTANT_ENDPOINT") 48 | .status(StatusCode::InternalServerError)?; 49 | 50 | let mut surf_response = surf::post(lambda) 51 | .header("content-type", "application/json") 52 | .body_bytes(req_bytes) 53 | .await?; 54 | 55 | let tide_response = Response::builder(surf_response.status()) 56 | .header("content-type", "application/json") 57 | .body(surf_response.body_bytes().await?) 58 | .build(); 59 | 60 | Ok(tide_response) 61 | } 62 | -------------------------------------------------------------------------------- /api/src/brandfetch.rs: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright (C) 2023 WithUno, Inc. 3 | // All rights reserved. 4 | // 5 | // SPDX-License-Identifier: AGPL-3.0-only 6 | // 7 | 8 | use http_types::headers::ACCEPT; 9 | use http_types::headers::AUTHORIZATION; 10 | use http_types::headers::CACHE_CONTROL; 11 | use http_types::headers::CONTENT_TYPE; 12 | use http_types::Status; 13 | use http_types::StatusCode; 14 | use tide::Response; 15 | use tide::Result; 16 | 17 | use crate::Database; 18 | 19 | pub async fn get_info(domain: &str, db: &T) -> Result 20 | where 21 | T: Database, 22 | { 23 | // check cache 24 | let cache_key = format!("info/{}", &domain); 25 | let cache_control = "private, immutable, max-age=604800, \ 26 | stale-while-revalidate=86400, stale-if-error=86400"; 27 | // the brands cache db has a 30 day object expiration policy 28 | if db.exists(&cache_key).await? { 29 | let data = db.get(&cache_key).await?; 30 | let response = Response::builder(StatusCode::Ok) 31 | .header(CONTENT_TYPE, "application/json") 32 | .header(CACHE_CONTROL, cache_control) 33 | .body(data) 34 | .build(); 35 | 36 | return Ok(response); 37 | } 38 | 39 | let mut bf = req_info_authed(domain).await?; 40 | let bf_bytes = bf.body_bytes().await?; 41 | 42 | let mut builder = 43 | Response::builder(bf.status()).header(CONTENT_TYPE, "application/json"); 44 | 45 | // if successful, cache the brand data 46 | if let StatusCode::Ok = bf.status() { 47 | let _ = db.put(&cache_key, &bf_bytes).await?; 48 | builder = builder.header(CACHE_CONTROL, cache_control); 49 | } 50 | 51 | builder = builder.body(bf_bytes); 52 | 53 | Ok(builder.build()) 54 | } 55 | 56 | async fn req_info_authed(domain: &str) -> Result 57 | { 58 | let token = std::env::var("BRANDFETCH_API_KEY") 59 | .status(StatusCode::InternalServerError)?; 60 | 61 | let url = format!("https://api.brandfetch.io/v2/brands/{}", domain); 62 | let response = surf::get(url) 63 | .header(AUTHORIZATION, format!("Bearer: {}", token)) 64 | .header(ACCEPT, "application/json") 65 | .await?; 66 | 67 | Ok(response) 68 | } 69 | -------------------------------------------------------------------------------- /api/src/magic_share.rs: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright (C) 2021 WithUno, Inc. 3 | // All rights reserved. 4 | // 5 | // SPDX-License-Identifier: AGPL-3.0-only 6 | // 7 | 8 | pub use crate::store::Database; 9 | 10 | use std::result; 11 | 12 | use uno::MagicShare; 13 | 14 | use chrono::{Duration, Utc}; 15 | use serde_json::{Error as SerdeError, Value}; 16 | 17 | use thiserror::Error; 18 | 19 | const PREFIX_ONE_DAY: &'static str = "1d"; 20 | const PREFIX_ONE_WEEK: &'static str = "1w"; 21 | const PREFIX_ONE_MONTH: &'static str = "1m"; 22 | 23 | #[derive(Error, Debug)] 24 | pub enum MagicShareError 25 | { 26 | #[error("Serde error")] 27 | Serde 28 | { 29 | #[from] 30 | source: SerdeError, 31 | }, 32 | #[error("Duplicate")] 33 | Duplicate, 34 | #[error("Expired")] 35 | Expired, 36 | #[error("Not found")] 37 | NotFound, 38 | #[error("Unsupported schema version")] 39 | Schema, 40 | #[error("Unknown magic share error")] 41 | Unknown, 42 | } 43 | 44 | type Result = result::Result; 45 | 46 | fn v0_from_json(json: &[u8]) -> Result 47 | { 48 | match serde_json::from_slice::(json) { 49 | Ok(v) => Ok(v), 50 | Err(e) => Err(MagicShareError::Serde { source: e }), 51 | } 52 | } 53 | 54 | pub fn new_from_json(json: &[u8]) -> Result 55 | { 56 | let v: Value = match serde_json::from_slice(json) { 57 | Ok(s) => s, 58 | Err(e) => return Err(MagicShareError::Serde { source: e }), 59 | }; 60 | 61 | if let Some(s) = v["schema_version"].as_u64() { 62 | match s { 63 | 0 => return v0_from_json(json), 64 | _ => return Err(MagicShareError::Schema), 65 | }; 66 | } 67 | 68 | //XXX: this could be a separate error? 69 | Err(MagicShareError::Schema) 70 | } 71 | 72 | pub async fn find_by_id(db: &impl Database, id: &str) -> Result 73 | { 74 | for x in &[PREFIX_ONE_DAY, PREFIX_ONE_WEEK, PREFIX_ONE_MONTH] { 75 | let key = format!("{}/{}", x, id); 76 | if let Ok(v) = get_share(db, &key).await { 77 | // XXX: handle the other error types here? 78 | return Ok(v); 79 | } 80 | } 81 | 82 | Err(MagicShareError::NotFound) 83 | } 84 | 85 | pub async fn get_share(db: &impl Database, location: &str) 86 | -> Result 87 | { 88 | if let Ok(bytes) = db.get(location).await { 89 | match serde_json::from_slice::(&bytes) { 90 | Ok(m) => { 91 | if Utc::now() > m.expires_at { 92 | return Err(MagicShareError::Expired); 93 | } 94 | 95 | return Ok(m); 96 | }, 97 | Err(e) => return Err(MagicShareError::Serde { source: e }), 98 | } 99 | } 100 | 101 | Err(MagicShareError::NotFound) 102 | } 103 | 104 | pub async fn store_share( 105 | db: &impl Database, 106 | share: &MagicShare, 107 | ) -> Result 108 | { 109 | if find_by_id(db, &share.id).await.is_ok() { 110 | return Err(MagicShareError::Duplicate); 111 | } 112 | 113 | let diff = share.expires_at - Utc::now(); 114 | let expiration_prefix = if diff < Duration::days(1) { 115 | PREFIX_ONE_DAY 116 | } else if diff < Duration::days(7) { 117 | PREFIX_ONE_WEEK 118 | } else { 119 | PREFIX_ONE_MONTH 120 | }; 121 | 122 | let key = format!("{}/{}", expiration_prefix, share.id); 123 | let bytes = match serde_json::to_vec(&share) { 124 | Ok(b) => b, 125 | Err(e) => return Err(MagicShareError::Serde { source: e }), 126 | }; 127 | 128 | match db.put(&key, &bytes).await { 129 | Ok(_) => Ok(key.to_string()), 130 | Err(_) => Err(MagicShareError::Unknown), 131 | } 132 | } 133 | 134 | #[cfg(test)] 135 | mod tests 136 | { 137 | use super::*; 138 | 139 | #[test] 140 | fn test_new_v0_from_json() 141 | { 142 | let bad_schema_version = ""; 143 | assert!(new_from_json(bad_schema_version.as_bytes()).is_err()); 144 | 145 | let unsupported_schema = "{\"schema_version\": 1000}"; 146 | assert!(new_from_json(unsupported_schema.as_bytes()).is_err()); 147 | 148 | let v = r#" 149 | { 150 | "schema_version": 0, 151 | "id": "1234", 152 | "expires_at": "2014-03-12T13:37:27+00:00", 153 | "encrypted_credential": "some encrypted thing" 154 | }"#; 155 | 156 | let m = new_from_json(v.as_bytes()).unwrap(); 157 | 158 | assert_eq!(m.schema_version, 0); 159 | //XXX: assert dates somehow... 160 | } 161 | 162 | #[cfg(not(feature = "s3"))] 163 | use crate::store::FileStore; 164 | 165 | #[cfg(not(feature = "s3"))] 166 | #[async_std::test] 167 | async fn test_share_roundtrip() 168 | { 169 | let dir = tempfile::TempDir::new().unwrap(); 170 | let db = FileStore::new(dir.path(), "test", "v0").await.unwrap(); 171 | 172 | let m1 = MagicShare { 173 | id: "1234".to_string(), 174 | expires_at: Utc::now() + Duration::days(30), 175 | schema_version: 0, 176 | encrypted_credential: "5678".to_string(), 177 | }; 178 | 179 | let location = store_share(&db, &m1).await.unwrap(); 180 | let r = get_share(&db, &location).await.unwrap(); 181 | assert_eq!(m1.id, r.id); 182 | } 183 | 184 | #[cfg(not(feature = "s3"))] 185 | #[async_std::test] 186 | async fn test_find_by_id() 187 | { 188 | let dir = tempfile::TempDir::new().unwrap(); 189 | let db = FileStore::new(dir.path(), "test", "v0").await.unwrap(); 190 | 191 | let m1 = MagicShare { 192 | id: "1234".to_string(), 193 | expires_at: Utc::now() + Duration::days(30), 194 | schema_version: 0, 195 | encrypted_credential: "5678".to_string(), 196 | }; 197 | 198 | store_share(&db, &m1).await.unwrap(); 199 | let r = find_by_id(&db, &m1.id).await.unwrap(); 200 | assert_eq!(r.id, m1.id); 201 | 202 | // find only non-expired items 203 | let m2 = MagicShare { 204 | id: "5678".to_string(), 205 | expires_at: Utc::now() - Duration::days(30), 206 | schema_version: 0, 207 | encrypted_credential: "5678".to_string(), 208 | }; 209 | 210 | store_share(&db, &m2).await.unwrap(); 211 | let r2 = find_by_id(&db, &m2.id).await; 212 | assert!(r2.is_err()); 213 | 214 | // if an expired item exists with the same id as a 215 | // non-expired item, return the non-expired one. 216 | let m3 = MagicShare { 217 | id: "5678".to_string(), 218 | expires_at: Utc::now() + Duration::days(30), 219 | schema_version: 0, 220 | encrypted_credential: "5678".to_string(), 221 | }; 222 | 223 | store_share(&db, &m3).await.unwrap(); 224 | let r3 = find_by_id(&db, &m3.id).await.unwrap(); 225 | assert_eq!(r3.id, m3.id); 226 | } 227 | 228 | #[cfg(not(feature = "s3"))] 229 | #[async_std::test] 230 | async fn test_no_duplicate_shares() 231 | { 232 | let dir = tempfile::TempDir::new().unwrap(); 233 | let db = FileStore::new(dir.path(), "test", "v0").await.unwrap(); 234 | 235 | let mut m1 = MagicShare { 236 | id: "1234".to_string(), 237 | expires_at: Utc::now() + Duration::days(30), 238 | schema_version: 0, 239 | encrypted_credential: "5678".to_string(), 240 | }; 241 | 242 | assert!(store_share(&db, &m1).await.is_ok()); 243 | assert!(store_share(&db, &m1).await.is_err()); 244 | 245 | // different expiration class doesn't matter 246 | m1.expires_at = Utc::now() + Duration::hours(1); 247 | assert!(store_share(&db, &m1).await.is_err()); 248 | 249 | // expired objects can be overwritten 250 | let mut m2 = MagicShare { 251 | id: "5678".to_string(), 252 | expires_at: Utc::now() - Duration::days(30), 253 | schema_version: 0, 254 | encrypted_credential: "5678".to_string(), 255 | }; 256 | assert!(store_share(&db, &m2).await.is_ok()); 257 | 258 | m2.expires_at = Utc::now() + Duration::days(30); 259 | assert!(store_share(&db, &m2).await.is_ok()); 260 | } 261 | 262 | #[cfg(not(feature = "s3"))] 263 | #[async_std::test] 264 | async fn test_expiration_rounding() 265 | { 266 | let dir = tempfile::TempDir::new().unwrap(); 267 | let db = FileStore::new(dir.path(), "test", "v0").await.unwrap(); 268 | 269 | //XXX: this would be more correct if we had a test utility to freeze 270 | // time at different intervals... 271 | let m1 = MagicShare { 272 | id: "1234".to_string(), 273 | expires_at: Utc::now() + Duration::hours(1), 274 | schema_version: 0, 275 | encrypted_credential: "5678".to_string(), 276 | }; 277 | 278 | let mut location = store_share(&db, &m1).await.unwrap(); 279 | assert_eq!(location, "1d/1234".to_string()); 280 | 281 | let m2 = MagicShare { 282 | id: "1234a".to_string(), 283 | expires_at: Utc::now() + Duration::days(2), 284 | schema_version: 0, 285 | encrypted_credential: "5678".to_string(), 286 | }; 287 | 288 | location = store_share(&db, &m2).await.unwrap(); 289 | assert_eq!(location, "1w/1234a".to_string()); 290 | 291 | let m3 = MagicShare { 292 | id: "1234b".to_string(), 293 | expires_at: Utc::now() + Duration::weeks(2), 294 | schema_version: 0, 295 | encrypted_credential: "5678".to_string(), 296 | }; 297 | 298 | location = store_share(&db, &m3).await.unwrap(); 299 | assert_eq!(location, "1m/1234b".to_string()); 300 | 301 | let m4 = MagicShare { 302 | id: "1234c".to_string(), 303 | expires_at: Utc::now() + Duration::weeks(8), 304 | schema_version: 0, 305 | encrypted_credential: "5678".to_string(), 306 | }; 307 | 308 | location = store_share(&db, &m4).await.unwrap(); 309 | assert_eq!(location, "1m/1234c".to_string()); 310 | } 311 | } 312 | -------------------------------------------------------------------------------- /api/src/mailbox.rs: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright (C) 2021 WithUno, Inc. 3 | // All rights reserved. 4 | // 5 | // SPDX-License-Identifier: AGPL-3.0-only 6 | // 7 | 8 | pub use crate::store::Database; 9 | 10 | use anyhow::anyhow; 11 | use anyhow::Result; 12 | use futures::stream; 13 | use futures::stream::StreamExt; 14 | use serde::{Deserialize, Serialize}; 15 | use serde_json::value::Value; 16 | 17 | #[derive(Debug, Serialize, Deserialize, PartialEq)] 18 | pub struct Mailbox 19 | { 20 | pub messages: Vec, 21 | } 22 | 23 | #[derive(Debug, Deserialize, Serialize)] 24 | pub struct MessageToDelete 25 | { 26 | pub from: String, 27 | pub id: u64, 28 | } 29 | 30 | #[derive(Debug, Serialize, Deserialize, PartialEq, Clone)] 31 | pub struct MessageRequest 32 | { 33 | pub action: String, 34 | pub uuid: String, 35 | pub data: Value, 36 | } 37 | 38 | #[derive(Debug, Serialize, Deserialize, PartialEq)] 39 | pub struct MessageStored 40 | { 41 | pub action: String, 42 | pub uuid: String, 43 | pub id: u64, 44 | pub from: String, 45 | pub data: Value, 46 | } 47 | 48 | pub async fn delete_messages( 49 | store: &impl Database, 50 | owner: &str, 51 | messages: &Vec, 52 | ) -> Result 53 | { 54 | let results = stream::iter(messages) 55 | .filter_map(|m| async move { 56 | let dest = format!("{}/{}/{}", owner, m.from, m.id); 57 | // only count deletes that are Ok 58 | store.del(&dest).await.ok() 59 | }) 60 | .collect::>() 61 | .await; 62 | 63 | Ok(results.len()) 64 | } 65 | 66 | pub async fn get_messages(store: &impl Database, owner: &str) 67 | -> Result 68 | { 69 | // LOCK 70 | let m_ids = store.list(owner).await?; 71 | let messages = stream::iter(m_ids) 72 | .filter_map(|id| async move { 73 | store 74 | .get(&id) 75 | .await 76 | .and_then(|m| { 77 | serde_json::from_slice(&m).map_err(|e| anyhow!(e)) 78 | }) 79 | .ok() // convert to option 80 | }) 81 | .collect::>() 82 | .await; 83 | // UNLOCK 84 | 85 | Ok(Mailbox { messages }) 86 | } 87 | 88 | pub async fn post_message( 89 | store: &impl Database, 90 | recipient: &str, 91 | sender: &str, 92 | message: &MessageRequest, 93 | ) -> Result 94 | { 95 | let prefix = format!("{}/{}", recipient, sender); 96 | 97 | // LOCK 98 | let existing = store.list(&prefix).await?; 99 | let ids: Vec = existing 100 | .iter() 101 | .map(|m| { 102 | let id = m.split("/").last().unwrap(); 103 | id.parse::().unwrap() 104 | }) 105 | .collect(); 106 | 107 | let next_id = match ids.iter().max() { 108 | Some(max) => max + 1, 109 | None => 1, 110 | }; 111 | 112 | let dest = format!("{}/{}", prefix, next_id); 113 | 114 | let m = MessageStored { 115 | id: next_id, 116 | from: sender.to_string(), 117 | uuid: message.uuid.clone(), 118 | action: message.action.clone(), 119 | data: message.data.clone(), 120 | }; 121 | 122 | let j = serde_json::to_vec(&m)?; 123 | 124 | store.put(&dest, &j).await?; 125 | 126 | let b2 = store.get(&dest).await?; 127 | let m2: MessageStored = serde_json::from_slice(&b2)?; 128 | // UNLOCK 129 | 130 | Ok(m2) 131 | } 132 | 133 | #[cfg(test)] 134 | mod tests 135 | { 136 | use super::*; 137 | 138 | use serde_json::json; 139 | 140 | #[cfg(feature = "s3")] 141 | use crate::store::S3Store; 142 | 143 | #[cfg(feature = "s3")] 144 | async fn new_store() -> Result 145 | { 146 | use rand::distributions::Alphanumeric; 147 | use rand::Rng; 148 | 149 | fn tmpname(rand_len: usize) -> String 150 | { 151 | let mut buf = String::with_capacity(rand_len); 152 | 153 | // Push each character in one-by-one. Unfortunately, this is the 154 | // only safe(ish) simple way to do this without allocating a 155 | // temporary String/Vec. 156 | unsafe { 157 | rand::thread_rng() 158 | .sample_iter(&Alphanumeric) 159 | .take(rand_len) 160 | .for_each(|b| { 161 | buf.push_str(std::str::from_utf8_unchecked(&[b as u8])) 162 | }) 163 | } 164 | buf.to_lowercase() 165 | } 166 | 167 | let store = S3Store::new( 168 | "http://localhost:9000", 169 | "minio", 170 | "minioadmin", 171 | "minioadmin", 172 | &tmpname(32), 173 | "v0", 174 | ) 175 | .await?; 176 | 177 | let _ = store.create_bucket_if_not_exists().await?; 178 | 179 | Ok(store) 180 | } 181 | 182 | #[cfg(not(feature = "s3"))] 183 | use crate::store::FileStore; 184 | 185 | #[cfg(not(feature = "s3"))] 186 | async fn new_store() -> Result 187 | { 188 | let dir = tempfile::TempDir::new()?; 189 | 190 | Ok(FileStore::new(dir.path(), "test", "v0").await?) 191 | } 192 | 193 | #[async_std::test] 194 | async fn message_uuid() -> Result<()> 195 | { 196 | let store = new_store().await?; 197 | let owner1 = "owner1".to_string(); 198 | let sender1 = "sender1".to_string(); 199 | 200 | let any_message = MessageRequest { 201 | uuid: "1111-2222".to_string(), 202 | action: "packed".to_string(), 203 | data: json!("message data is opaque"), 204 | }; 205 | 206 | let _ = post_message(&store, &owner1, &sender1, &any_message).await?; 207 | 208 | let g1 = get_messages(&store, &owner1).await?; 209 | assert_eq!(g1.messages[0].uuid, "1111-2222"); 210 | 211 | Ok(()) 212 | } 213 | 214 | #[async_std::test] 215 | async fn mailbox_messages() -> Result<()> 216 | { 217 | let store = new_store().await?; 218 | let owner1 = "owner1".to_string(); 219 | let owner2 = "owner2".to_string(); 220 | 221 | let sender1 = "sender1".to_string(); 222 | let sender2 = "sender2".to_string(); 223 | 224 | let any_message = MessageRequest { 225 | uuid: "11111".to_string(), 226 | action: "packed".to_string(), 227 | data: json!({"info": "message data is opaque"}), 228 | }; 229 | 230 | let r1 = post_message(&store, &owner1, &sender1, &any_message).await?; 231 | assert_eq!(r1.id, 1); 232 | assert_eq!(r1.from, sender1.clone()); 233 | 234 | let r2 = post_message(&store, &owner1, &sender1, &any_message).await?; 235 | assert_eq!(r2.id, 2); 236 | assert_eq!(r2.from, sender1); 237 | 238 | let r3 = post_message(&store, &owner1, &sender2, &any_message).await?; 239 | assert_eq!(r3.id, 1); 240 | assert_eq!(r3.from, sender2); 241 | 242 | let r4 = post_message(&store, &owner2, &sender1, &any_message).await?; 243 | assert_eq!(r4.id, 1); 244 | assert_eq!(r4.from, sender1); 245 | 246 | let g1 = get_messages(&store, &owner1).await?; 247 | assert_eq!(g1.messages.len(), 3); 248 | 249 | let num_deleted = delete_messages(&store, &owner1, &vec![ 250 | MessageToDelete { from: sender1, id: r2.id }, 251 | MessageToDelete { from: sender2, id: r3.id }, 252 | ]) 253 | .await?; 254 | 255 | assert_eq!(num_deleted, 2); 256 | 257 | let g2 = get_messages(&store, &owner1).await?; 258 | assert_eq!(g2.messages.len(), 1); 259 | assert_eq!(g2.messages[0].id, 1); 260 | assert_eq!(g2.messages[0].from, "sender1".to_string()); 261 | 262 | Ok(()) 263 | } 264 | 265 | #[test] 266 | fn ios_deserialize() -> Result<()> 267 | { 268 | let s1 = r#"{"data":{"share":"IKlx5OuP22Xux5JSOeekYH+zLmhiemgHF25QV4yxK/Cq8VlYZa41qWElDD+Ue9tdzdm23j78MpfCTlLCew==","signature":"UEq/S7j5cXAuEo7K5LVEiMGdWbLwqQxxQNKVlXtgLbB8ecY4+u3YF3S\/uMhohZx5pmKJ6qWZccoj7+9dAqA/CQ=="},"uuid":"not_from_ios","action":"share-update","from":"DkxRk21yuqwA2Uf1P7At08OD8434fwEnAc9-Ckmve20"}"#; 269 | let s2 = r#"{"data":{"signature":"UEq/S7j5cXAuEo7K5LVEiMGdWbLwqQxxQNKVlXtgLbB8ecY4+u3YF3S\/uMhohZx5pmKJ6qWZccoj7+9dAqA/CQ=="},"uuid":"not_from_ios","action":"share-update","from":"DkxRk21yuqwA2Uf1P7At08OD8434fwEnAc9-Ckmve20"}"#; 270 | 271 | // just check that we don't panic 272 | let _: MessageRequest = serde_json::from_slice(s1.as_bytes())?; 273 | let _: MessageRequest = serde_json::from_slice(s2.as_bytes())?; 274 | assert!(true); 275 | 276 | Ok(()) 277 | } 278 | } 279 | -------------------------------------------------------------------------------- /api/src/main.rs: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright (C) 2021 WithUno, Inc. 3 | // All rights reserved. 4 | // 5 | // SPDX-License-Identifier: AGPL-3.0-only 6 | // 7 | 8 | use anyhow::Context; 9 | use anyhow::Result; 10 | 11 | #[cfg(not(feature = "s3"))] 12 | use api::store::FileStore; 13 | #[cfg(not(feature = "s3"))] 14 | async fn make_db(name: &'static str, version: &str) -> Result 15 | { 16 | // use the current directory 17 | // TODO: figure out a better dir like /var/db but one that doesn't require 18 | // root 19 | FileStore::new("./throwaway_local_dbs", name, version).await 20 | } 21 | 22 | #[cfg(feature = "s3")] 23 | use api::store::S3Store; 24 | #[cfg(feature = "s3")] 25 | async fn make_db(name: &str, version: &str) -> Result 26 | { 27 | let key_id = std::env::var("SPACES_ACCESS_KEY_ID") 28 | .context("Failed to lookup SPACES_ACCESS_KEY_ID")?; 29 | 30 | let secret = std::env::var("SPACES_SECRET_ACCESS_KEY") 31 | .context("Failed to lookup SPACES_SECRET_ACCESS_KEY")?; 32 | 33 | let host = std::env::var("SPACES_HOSTNAME") 34 | .context("Failed to lookup SPACES_HOSTNAME")?; 35 | 36 | let region = std::env::var("SPACES_REGION") 37 | .context("Failed to lookup SPACES_REGION")?; 38 | 39 | let bucket = std::env::var("SPACES_BUCKET_PREFIX") 40 | .context("Failed to lookup SPACES_BUCKET_PREFIX")?; 41 | 42 | let name = String::from(name) + "." + &String::from(bucket); 43 | 44 | S3Store::new(&host, ®ion, &key_id, &secret, &name, version).await 45 | } 46 | 47 | #[async_std::main] 48 | async fn main() -> Result<()> 49 | { 50 | if cfg!(feature = "twilio") && cfg!(not(test)) { 51 | let twilio_endpoint = std::env::var("TWILIO_API_ENDPOINT") 52 | .context("Must specify TWILIO_API_ENDPOINT")?; 53 | let _account_sid = std::env::var("TWILIO_ACCOUNT_SID") 54 | .context("Must specify TWILIO_ACCOUNT_SID")?; 55 | let _service_sid = std::env::var("TWILIO_SERVICE_SID") 56 | .context("Must specify TWILIO_SERVICE_SID")?; 57 | let _auth_token = std::env::var("TWILIO_AUTH_TOKEN") 58 | .context("Must specify TWILIO_AUTH_TOKEN")?; 59 | 60 | let _ = surf::Url::parse(&twilio_endpoint) 61 | .context("twilio API endpoint must be a url")?; 62 | } 63 | 64 | if cfg!(feature = "openai") && cfg!(not(test)) { 65 | let _ = std::env::var("OPENAI_API_KEY") 66 | .context("Must specify OPENAI_API_KEY")?; 67 | } 68 | if cfg!(feature = "openai") && cfg!(not(test)) { 69 | let _: surf::Url = std::env::var("ASSISTANT_ENDPOINT") 70 | .context("Must specify ASSISTANT_ENDPOINT")? 71 | .parse() 72 | .context("ASSISTANT_ENDPOINT must be a valid URL")?; 73 | } 74 | 75 | if cfg!(feature = "brandfetch") && cfg!(not(test)) { 76 | let _ = std::env::var("BRANDFETCH_API_KEY") 77 | .context("Must specify BRANDFETCH_API_KEY")?; 78 | } 79 | 80 | let tok2 = make_db("tokens", "v2").await?; 81 | let vau2 = make_db("vaults", "v2").await?; 82 | let srv2 = make_db("services", "").await?; // not (yet) versioned 83 | let ses2 = make_db("sessions", "v2").await?; 84 | let mbx2 = make_db("mailboxes", "v2").await?; 85 | let shr2 = make_db("shares", "v2").await?; 86 | let vdb2 = make_db("verify", "v2").await?; 87 | let dir2 = make_db("directory", "v2").await?; 88 | let ast2 = make_db("assistant", "v2").await?; // db is empty right now 89 | let brn2 = make_db("brands", "v2").await?; // brand cache 90 | 91 | 92 | let api_v2 = api::build_routes( 93 | tok2, vau2, srv2, ses2, mbx2, shr2, vdb2, dir2, ast2, brn2, 94 | )?; 95 | 96 | let mut srv = tide::new(); 97 | 98 | srv.at("/v2").nest(api_v2); 99 | 100 | tide::log::start(); 101 | 102 | let port = std::env::var("PORT").unwrap_or("8080".to_string()); 103 | srv.listen(format!("[::]:{}", port)).await?; 104 | Ok(()) 105 | } 106 | -------------------------------------------------------------------------------- /api/src/store.rs: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright (C) 2021 WithUno, Inc. 3 | // All rights reserved. 4 | // 5 | // SPDX-License-Identifier: AGPL-3.0-only 6 | // 7 | 8 | use anyhow::Result; 9 | 10 | use async_trait::async_trait; 11 | 12 | use std::fmt::Debug; 13 | 14 | pub mod s3; 15 | pub use s3::S3Store; 16 | 17 | pub mod file; 18 | pub use file::FileStore; 19 | 20 | use async_std::path::Path; 21 | 22 | #[async_trait] 23 | pub trait Database: Send + Sync + Clone + Debug 24 | { 25 | // Standard Operations 26 | // 27 | // Use these for all standard logic. 28 | // 29 | async fn exists

(&self, object: P) -> Result 30 | where 31 | P: AsRef + Send; 32 | 33 | async fn get

(&self, object: P) -> Result> 34 | where 35 | P: AsRef + Send; 36 | 37 | async fn put

(&self, object: P, data: &[u8]) -> Result<()> 38 | where 39 | P: AsRef + Send; 40 | 41 | async fn del

(&self, object: P) -> Result<()> 42 | where 43 | P: AsRef + Send; 44 | 45 | async fn list

(&self, prefix: P) -> Result> 46 | where 47 | P: AsRef + Send; 48 | 49 | // *Versioned* Operations 50 | // 51 | // The db is already versioned. The only time these are necessary is when 52 | // performing an on-demand migration of data between major api versions. 53 | // 54 | async fn exists_version(&self, version: Q, object: P) -> Result 55 | where 56 | P: AsRef + Send, 57 | Q: AsRef + Send; 58 | 59 | async fn get_version(&self, version: Q, object: P) -> Result> 60 | where 61 | P: AsRef + Send, 62 | Q: AsRef + Send; 63 | 64 | async fn put_version( 65 | &self, 66 | version: Q, 67 | object: P, 68 | data: &[u8], 69 | ) -> Result<()> 70 | where 71 | P: AsRef + Send, 72 | Q: AsRef + Send; 73 | 74 | async fn del_version(&self, version: Q, object: P) -> Result<()> 75 | where 76 | P: AsRef + Send, 77 | Q: AsRef + Send; 78 | 79 | async fn list_version( 80 | &self, 81 | version: Q, 82 | prefix: P, 83 | ) -> Result> 84 | where 85 | P: AsRef + Send, 86 | Q: AsRef + Send; 87 | } 88 | -------------------------------------------------------------------------------- /api/src/store/file.rs: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright (C) 2021 WithUno, Inc. 3 | // All rights reserved. 4 | // 5 | // SPDX-License-Identifier: AGPL-3.0-only 6 | // 7 | 8 | use anyhow::bail; 9 | use anyhow::Result; 10 | use async_std::fs; 11 | use async_std::path::Path; 12 | use async_std::path::PathBuf; 13 | use async_trait::async_trait; 14 | 15 | use std::convert::AsRef; 16 | use std::fmt::Debug; 17 | 18 | use crate::store::Database; 19 | 20 | #[derive(Clone, Debug)] 21 | pub struct FileStore 22 | { 23 | db: PathBuf, 24 | version: PathBuf, 25 | } 26 | 27 | impl FileStore 28 | { 29 | pub async fn new(root: P, name: Q, version: R) -> Result 30 | where 31 | P: AsRef, 32 | Q: AsRef, 33 | R: AsRef, 34 | { 35 | let path = root.as_ref().join(&name).join(&version); 36 | 37 | async_std::fs::create_dir_all(&path).await?; 38 | 39 | Ok(Self { 40 | db: root.as_ref().join(name).to_owned(), 41 | version: version.as_ref().to_owned(), 42 | }) 43 | } 44 | } 45 | 46 | #[async_trait] 47 | impl Database for FileStore 48 | { 49 | async fn exists

(&self, file: P) -> Result 50 | where 51 | P: AsRef + Send, 52 | { 53 | Ok(self.exists_version(&self.version, file).await?) 54 | } 55 | 56 | async fn exists_version(&self, version: Q, file: P) -> Result 57 | where 58 | P: AsRef + Send, 59 | Q: AsRef + Send, 60 | { 61 | // todo: introspect the failure cause and be more specific 62 | Ok(self.get_version(version, file).await.is_ok()) 63 | } 64 | 65 | async fn list

(&self, prefix: P) -> Result> 66 | where 67 | P: AsRef + Send, 68 | { 69 | Ok(self.list_version(&self.version, prefix).await?) 70 | } 71 | 72 | async fn list_version( 73 | &self, 74 | version: Q, 75 | prefix: P, 76 | ) -> Result> 77 | where 78 | P: AsRef + Send, 79 | Q: AsRef + Send, 80 | { 81 | let root = self.db.join(version); 82 | let dir = root.join(prefix); 83 | use walkdir::WalkDir; 84 | Ok(WalkDir::new(&dir) 85 | .into_iter() 86 | .filter_map(|e| e.ok()) 87 | .filter(|e| e.file_type().is_file()) 88 | .map(|e| { 89 | e.path() 90 | .strip_prefix(&root) 91 | .unwrap() 92 | .to_string_lossy() 93 | .to_string() 94 | }) 95 | .collect()) 96 | } 97 | 98 | async fn get

(&self, file: P) -> Result> 99 | where 100 | P: AsRef + Send, 101 | { 102 | Ok(self.get_version(&self.version, file).await?) 103 | } 104 | 105 | async fn get_version(&self, version: Q, file: P) -> Result> 106 | where 107 | P: AsRef + Send, 108 | Q: AsRef + Send, 109 | { 110 | let path = self.db.join(version).join(file); 111 | Ok(fs::read(path).await?) 112 | } 113 | 114 | async fn put

(&self, file: P, content: &[u8]) -> Result<()> 115 | where 116 | P: AsRef + Send, 117 | { 118 | Ok(self.put_version(&self.version, file, content).await?) 119 | } 120 | 121 | async fn put_version( 122 | &self, 123 | version: Q, 124 | file: P, 125 | content: &[u8], 126 | ) -> Result<()> 127 | where 128 | P: AsRef + Send, 129 | Q: AsRef + Send, 130 | { 131 | let path = self.db.join(version).join(file); 132 | match path.parent() { 133 | Some(p) => fs::create_dir_all(p).await?, 134 | None => (), 135 | } 136 | 137 | Ok(fs::write(path, content).await?) 138 | } 139 | 140 | async fn del

(&self, file: P) -> Result<()> 141 | where 142 | P: AsRef + Send, 143 | { 144 | Ok(self.del_version(&self.version, file).await?) 145 | } 146 | 147 | async fn del_version(&self, version: Q, file: P) -> Result<()> 148 | where 149 | P: AsRef + Send, 150 | Q: AsRef + Send, 151 | { 152 | let path = self.db.join(version).join(file); 153 | use async_std::io::ErrorKind; 154 | match fs::remove_file(path).await { 155 | Ok(_) => return Ok(()), 156 | // Trying to delete a file that doesn't exist is okay. 157 | Err(e) => { 158 | if ErrorKind::NotFound == e.kind() { 159 | Ok(()) 160 | } else { 161 | bail!(e) 162 | } 163 | }, 164 | } 165 | } 166 | } 167 | 168 | #[cfg(test)] 169 | #[cfg(not(feature = "s3"))] 170 | mod tests 171 | { 172 | use super::*; 173 | use tempfile::TempDir; 174 | 175 | #[async_std::test] 176 | async fn store() -> Result<()> 177 | { 178 | let dir = TempDir::new()?; 179 | let f = FileStore::new(dir.path(), "testdata", "v0").await?; 180 | 181 | let err = f.get("anyfile").await; 182 | assert!(err.is_err()); 183 | 184 | let yes = f.put("anyfile", b"some content").await; 185 | assert!(yes.is_ok()); 186 | 187 | let yes = f.get("anyfile").await; 188 | assert!(yes.is_ok()); 189 | 190 | let yes = f.del("anyfile").await; 191 | assert!(yes.is_ok()); 192 | 193 | let err = f.get("anyfile").await; 194 | assert!(err.is_err()); 195 | 196 | let result = f.put("some/sub/directory", b"subcontent").await; 197 | assert!(result.is_ok()); 198 | 199 | let result = f.get("some/sub/directory").await; 200 | assert!(result.is_ok()); 201 | 202 | let err = f.get("some/sub/missing").await; 203 | assert!(err.is_err()); 204 | 205 | let err = f.get("some/sub").await; 206 | assert!(err.is_err()); 207 | 208 | let r1 = f.put("multi/key1/file1", b"AA").await; 209 | let r2 = f.put("multi/key1/file2", b"AA").await; 210 | let r3 = f.put("multi/key2/file1", b"BB").await; 211 | let r4 = f.put("multiother/file1", b"CC").await; 212 | 213 | assert!(r1.is_ok()); 214 | assert!(r2.is_ok()); 215 | assert!(r3.is_ok()); 216 | assert!(r4.is_ok()); 217 | 218 | let result = f.list("multi/").await; 219 | assert!(result.is_ok()); 220 | 221 | assert_eq!( 222 | result?.sort(), 223 | // does not need to be order dependent eventually 224 | vec!("multi/key2/file1", "multi/key1/file2", "multi/key1/file1",) 225 | .sort() 226 | ); 227 | 228 | Ok(()) 229 | } 230 | } 231 | -------------------------------------------------------------------------------- /api/src/store/s3.rs: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright (C) 2021 WithUno, Inc. 3 | // All rights reserved. 4 | // 5 | // SPDX-License-Identifier: AGPL-3.0-only 6 | // 7 | 8 | use anyhow::{anyhow, bail, ensure, Context, Result}; 9 | 10 | use async_std::path::Path; 11 | use async_trait::async_trait; 12 | use std::fmt; 13 | 14 | use serde::Deserialize; 15 | use serde_xml_rs::from_reader; 16 | 17 | use rusty_s3::{Bucket, Credentials, S3Action, UrlStyle}; 18 | 19 | use surf::http::Method; 20 | use surf::Url; 21 | use surf::{Request, Response, StatusCode}; 22 | 23 | use std::fmt::Debug; 24 | use std::time::Duration; 25 | 26 | use crate::store::Database; 27 | 28 | use urlencoding::decode; 29 | 30 | #[derive(Debug, Clone)] 31 | pub struct DeserializationError; 32 | 33 | #[derive(Debug, Clone)] 34 | pub struct SerializationError; 35 | 36 | impl fmt::Display for DeserializationError 37 | { 38 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result 39 | { 40 | write!(f, "invalid data for deserialization") 41 | } 42 | } 43 | 44 | #[derive(Debug, Deserialize, PartialEq)] 45 | pub struct Contents 46 | { 47 | #[serde(rename = "Key")] 48 | pub key: String, 49 | } 50 | 51 | #[derive(Debug, Deserialize, PartialEq)] 52 | pub struct ListBucketResult 53 | { 54 | #[serde(rename = "Name")] 55 | name: String, 56 | #[serde(rename = "Prefix")] 57 | prefix: String, 58 | #[serde(rename = "KeyCount")] 59 | key_count: i32, 60 | #[serde(rename = "IsTruncated")] 61 | is_truncated: bool, 62 | 63 | #[serde(rename = "Contents", default)] 64 | pub contents: Vec, 65 | } 66 | 67 | impl ListBucketResult 68 | { 69 | pub fn from_xml( 70 | xml: &[u8], 71 | ) -> Result 72 | { 73 | from_reader(xml).or(Err(DeserializationError)) 74 | } 75 | } 76 | 77 | /// Store to S3 and also the file system 78 | #[derive(Clone, Debug)] 79 | pub struct S3Store 80 | { 81 | creds: Credentials, 82 | bucket: Bucket, 83 | version: String, 84 | } 85 | 86 | impl S3Store 87 | { 88 | pub async fn empty_bucket(&self) -> Result<()> 89 | { 90 | for l in self.list("").await?.iter() { 91 | self.del(l).await?; 92 | } 93 | 94 | Ok(()) 95 | } 96 | 97 | // TODO: in prod all the buckets are already created because we need to 98 | // turn object versioning on. can this be done here? 99 | pub async fn create_bucket_if_not_exists(&self) -> Result<()> 100 | { 101 | let action = self.bucket.create_bucket(&self.creds); 102 | let ttl = Duration::from_secs(60 * 60); 103 | let bro = Request::builder(Method::Put, action.sign(ttl)).build(); 104 | let res = let_it_rip(bro).await?; 105 | let status = res.status(); 106 | 107 | // bucket already exists 108 | if status == StatusCode::Conflict { 109 | return Ok(()); 110 | } 111 | 112 | ensure!(status.is_success(), "s3 PUT unexpected result ({})", status); 113 | 114 | Ok(()) 115 | } 116 | 117 | pub async fn new( 118 | host: &str, 119 | region: &str, 120 | key_id: &str, 121 | secret: &str, 122 | name: &str, 123 | version: &str, 124 | ) -> Result 125 | { 126 | let phost = host.parse()?; 127 | 128 | let bucket = Bucket::new( 129 | phost, 130 | UrlStyle::Path, 131 | name.to_string(), 132 | region.to_string(), 133 | )?; 134 | 135 | Ok(S3Store { 136 | creds: Credentials::new(key_id.to_string(), secret.to_string()), 137 | bucket: bucket, 138 | version: version.to_owned(), 139 | }) 140 | } 141 | } 142 | 143 | fn full_obj_from_path(version: Q, object: P) -> Result 144 | where 145 | P: AsRef, 146 | Q: AsRef, 147 | { 148 | let path = Path::new("/").join(version).join(object); 149 | 150 | // convert windows style paths to a universal `/` scheme 151 | let path_url = 152 | Url::from_file_path(path).map_err(|_| anyhow!("bad path for url"))?; 153 | 154 | // remove the leading "file:///" 155 | let fbase = Url::parse("file:///")?; 156 | 157 | match fbase.make_relative(&path_url) { 158 | Some(u) => Ok(u.as_str().to_owned()), 159 | None => bail!("invalid version or object name"), 160 | } 161 | } 162 | 163 | fn full_prefix_from_path(version: Q, prefix: P) -> Result 164 | where 165 | P: AsRef, 166 | Q: AsRef, 167 | { 168 | // Note to the future: be very *very* careful. In s3 a trailing `/` is 169 | // relevant but for paths it is not. Nor do we know if the incoming prefix 170 | // is a simple prefix or should be treated as a directory. We *could* 171 | // probably assume a dir for our use case and use Url::from_directory_path. 172 | let path = Path::new("/").join(version).join(prefix); 173 | let pstr = path.to_str().ok_or(anyhow!("prefix not valid utf-8"))?; 174 | 175 | // Url::make_relative unconditionally strips trailing `/`. So we must check 176 | // if the path ends with the path separator and add a `/` back at the end. 177 | let is_directory = pstr.ends_with(std::path::MAIN_SEPARATOR); 178 | 179 | let purl = match is_directory { 180 | true => Url::from_directory_path(path), 181 | false => Url::from_file_path(path), 182 | } 183 | .map_err(|_| anyhow!("cannot convert path to Url"))?; 184 | 185 | // remove the leading "file:///" 186 | let base = Url::parse("file:///")?; 187 | 188 | let url_style_prefix = base 189 | .make_relative(&purl) 190 | .context("cannot make prefix relative to the root")?; 191 | 192 | if is_directory { 193 | Ok(format!("{}/", url_style_prefix.as_str())) 194 | } else { 195 | Ok(url_style_prefix.as_str().to_owned()) 196 | } 197 | } 198 | 199 | #[async_trait] 200 | impl Database for S3Store 201 | { 202 | async fn exists

(&self, object: P) -> Result 203 | where 204 | P: AsRef + Send, 205 | { 206 | Ok(self.exists_version(&self.version, object).await?) 207 | } 208 | 209 | async fn exists_version(&self, version: Q, object: P) -> Result 210 | where 211 | P: AsRef + Send, 212 | Q: AsRef + Send, 213 | { 214 | let vobject = full_obj_from_path(version, object)?; 215 | let action = self.bucket.get_object(Some(&self.creds), &vobject); 216 | let ttl = Duration::from_secs(60 * 60); 217 | let bro = Request::builder(Method::Get, action.sign(ttl)).build(); 218 | let res = let_it_rip(bro).await?; 219 | match res.status() { 220 | http_types::StatusCode::Ok => Ok(true), 221 | http_types::StatusCode::NotFound => Ok(false), 222 | _ => anyhow::bail!("unexpected result from s3 api"), 223 | } 224 | } 225 | 226 | async fn get

(&self, object: P) -> Result> 227 | where 228 | P: AsRef + Send, 229 | { 230 | Ok(self.get_version(&self.version, object).await?) 231 | } 232 | 233 | async fn get_version(&self, version: Q, object: P) -> Result> 234 | where 235 | P: AsRef + Send, 236 | Q: AsRef + Send, 237 | { 238 | let vobject = full_obj_from_path(version, object)?; 239 | let action = self.bucket.get_object(Some(&self.creds), &vobject); 240 | let ttl = Duration::from_secs(60 * 60); 241 | let bro = Request::builder(Method::Get, action.sign(ttl)).build(); 242 | 243 | let mut res = let_it_rip(bro).await?; 244 | let status = res.status(); 245 | ensure!(status.is_success(), "s3 GET unexpected result ({})", status); 246 | Ok(res.body_bytes().await.map_err(|e| anyhow!(e))?) 247 | } 248 | 249 | async fn put

(&self, object: P, content: &[u8]) -> Result<()> 250 | where 251 | P: AsRef + Send, 252 | { 253 | Ok(self.put_version(&self.version, object, content).await?) 254 | } 255 | 256 | async fn put_version( 257 | &self, 258 | version: Q, 259 | object: P, 260 | content: &[u8], 261 | ) -> Result<()> 262 | where 263 | P: AsRef + Send, 264 | Q: AsRef + Send, 265 | { 266 | let vobject = full_obj_from_path(version, object)?; 267 | let action = self.bucket.put_object(Some(&self.creds), &vobject); 268 | let ttl = Duration::from_secs(60 * 60); 269 | let bro = Request::builder(Method::Put, action.sign(ttl)) 270 | .body(content) 271 | .build(); 272 | let res = let_it_rip(bro).await?; 273 | let status = res.status(); 274 | ensure!(status.is_success(), "s3 PUT unexpected result ({})", status); 275 | Ok(()) 276 | } 277 | 278 | async fn del

(&self, object: P) -> Result<()> 279 | where 280 | P: AsRef + Send, 281 | { 282 | Ok(self.del_version(&self.version, object).await?) 283 | } 284 | 285 | async fn del_version(&self, version: Q, object: P) -> Result<()> 286 | where 287 | P: AsRef + Send, 288 | Q: AsRef + Send, 289 | { 290 | let vobject = full_obj_from_path(version, object)?; 291 | let action = self.bucket.delete_object(Some(&self.creds), &vobject); 292 | let ttl = Duration::from_secs(60 * 60); 293 | let bro = Request::builder(Method::Delete, action.sign(ttl)).build(); 294 | let res = let_it_rip(bro).await?; 295 | let status = res.status(); 296 | ensure!( 297 | status.is_success(), 298 | "s3 DELETE unexpected result ({})", 299 | status 300 | ); 301 | Ok(()) 302 | } 303 | 304 | async fn list

(&self, prefix: P) -> Result> 305 | where 306 | P: AsRef + Send, 307 | { 308 | Ok(self.list_version(&self.version, prefix).await?) 309 | } 310 | 311 | async fn list_version( 312 | &self, 313 | version: Q, 314 | prefix: P, 315 | ) -> Result> 316 | where 317 | P: AsRef + Send, 318 | Q: AsRef + Send, 319 | { 320 | let mut action = self.bucket.list_objects_v2(Some(&self.creds)); 321 | let query = action.query_mut(); 322 | let vprefix = full_prefix_from_path(version, prefix)?; 323 | query.insert("prefix", &vprefix); 324 | 325 | let ttl = Duration::from_secs(60 * 60); 326 | let bro = Request::builder(Method::Get, action.sign(ttl)).build(); 327 | 328 | let mut res = let_it_rip(bro).await?; 329 | let status = res.status(); 330 | ensure!(status.is_success(), "s3 GET unexpected result ({})", status); 331 | 332 | let body = res.body_bytes().await.map_err(|e| anyhow!(e))?; 333 | 334 | let result = 335 | ListBucketResult::from_xml(&body[..]).map_err(|e| anyhow!(e))?; 336 | 337 | let decoded = result 338 | .contents 339 | .iter() 340 | .filter_map(|c| decode(&c.key).ok()) 341 | .collect::>(); 342 | 343 | let stripped = decoded 344 | .iter() 345 | .filter_map(|k| k.strip_prefix(&self.version)) 346 | .filter_map(|k| k.strip_prefix("/")) 347 | .collect::>(); 348 | 349 | Ok(stripped.into_iter().map(|s| s.to_owned()).collect()) 350 | } 351 | } 352 | 353 | async fn let_it_rip(req: Request) -> Result 354 | { 355 | let client = surf::client(); 356 | let res = client.send(req).await.map_err(|e| anyhow!(e))?; 357 | Ok(res) 358 | } 359 | 360 | #[cfg(test)] 361 | #[cfg(feature = "s3")] 362 | mod tests 363 | { 364 | use super::*; 365 | 366 | #[async_std::test] 367 | async fn s3_store() -> Result<()> 368 | { 369 | let s = S3Store::new( 370 | "http://localhost:9000", 371 | "minio", 372 | "minioadmin", 373 | "minioadmin", 374 | "somebucket", 375 | "v0", 376 | ) 377 | .await?; 378 | 379 | let _ = s.create_bucket_if_not_exists().await?; 380 | 381 | let err = s.get("anyfile").await; 382 | assert!(err.is_err()); 383 | 384 | let yes = s.put("anyfile", b"some content").await; 385 | assert!(yes.is_ok()); 386 | 387 | let yes = s.get("anyfile").await; 388 | assert!(yes.is_ok()); 389 | 390 | let yes = s.del("anyfile").await; 391 | assert!(yes.is_ok()); 392 | 393 | let err = s.get("anyfile").await; 394 | assert!(err.is_err()); 395 | 396 | let result = s.put("some/sub/directory", b"subcontent").await; 397 | assert!(result.is_ok()); 398 | 399 | let result = s.get("some/sub/directory").await; 400 | assert!(result.is_ok()); 401 | 402 | let err = s.get("some/sub/missing").await; 403 | assert!(err.is_err()); 404 | 405 | let err = s.get("some/sub").await; 406 | assert!(err.is_err()); 407 | 408 | let r1 = s.put("multi/key1/file1", b"AA").await; 409 | let r2 = s.put("multi/key1/file2", b"AA").await; 410 | let r3 = s.put("multi/key2/file1", b"BB").await; 411 | let r4 = s.put("multiother/file1", b"CC").await; 412 | 413 | assert!(r1.is_ok()); 414 | assert!(r2.is_ok()); 415 | assert!(r3.is_ok()); 416 | assert!(r4.is_ok()); 417 | 418 | let result = s.list("multi/").await; 419 | assert!(result.is_ok()); 420 | 421 | assert_eq!( 422 | result.unwrap(), 423 | // does not need to be order dependent eventually 424 | vec!("multi/key1/file1", "multi/key1/file2", "multi/key2/file1",) 425 | ); 426 | 427 | Ok(()) 428 | } 429 | 430 | #[test] 431 | fn list_bucket_response() -> Result<()> 432 | { 433 | let r = r#" 434 | 435 | somebucketmulti44500falsemulti/key1/file12021-06-24T14:14:00.068Z"3b98e2dffc6cb06a89dcb0d5c60a0206"202d6176db174dc93cb1b899f7c6078f08654445fe8cf1b6ce98d8855f66bdbf4minioSTANDARDmulti/key1/file22021-06-24T14:14:00.074Z"3b98e2dffc6cb06a89dcb0d5c60a0206"202d6176db174dc93cb1b899f7c6078f08654445fe8cf1b6ce98d8855f66bdbf4minioSTANDARDmulti/key2/file12021-06-24T14:14:00.080Z"9d3d9048db16a7eee539e93e3618cbe7"202d6176db174dc93cb1b899f7c6078f08654445fe8cf1b6ce98d8855f66bdbf4minioSTANDARDmultiother/file12021-06-24T14:14:00.086Z"aa53ca0b650dfd85c4f59fa156f7a2cc"202d6176db174dc93cb1b899f7c6078f08654445fe8cf1b6ce98d8855f66bdbf4minioSTANDARDurl 436 | "#; 437 | 438 | let response = 439 | ListBucketResult::from_xml(r.as_bytes()).map_err(|e| anyhow!(e))?; 440 | 441 | assert_eq!(response, ListBucketResult { 442 | name: "somebucket".to_string(), 443 | prefix: "multi".to_string(), 444 | key_count: 4, 445 | is_truncated: false, 446 | contents: vec!( 447 | Contents { key: "multi/key1/file1".to_string() }, 448 | Contents { key: "multi/key1/file2".to_string() }, 449 | Contents { key: "multi/key2/file1".to_string() }, 450 | Contents { key: "multiother/file1".to_string() } 451 | ), 452 | }); 453 | 454 | Ok(()) 455 | } 456 | } 457 | -------------------------------------------------------------------------------- /api/src/twilio.rs: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright (C) 2022 WithUno, Inc. 3 | // All rights reserved. 4 | // 5 | // SPDX-License-Identifier: AGPL-3.0-only 6 | // 7 | 8 | // 9 | // Twilio boilerplate 10 | // 11 | 12 | use anyhow::anyhow; 13 | use anyhow::bail; 14 | 15 | use http_types::StatusCode; 16 | use surf::Body; 17 | 18 | use anyhow::Result; 19 | 20 | use http_types::convert::{Deserialize, Serialize}; 21 | 22 | use serde_json::Value; 23 | 24 | use std::env; 25 | 26 | use std::convert::AsRef; 27 | use strum_macros::AsRefStr; 28 | 29 | use tide::log; 30 | 31 | 32 | #[derive(AsRefStr, Debug)] 33 | enum Products 34 | { 35 | #[strum(serialize = "lookups")] 36 | Lookups, 37 | #[strum(serialize = "verify")] 38 | Verify, 39 | } 40 | 41 | 42 | pub async fn validate_phone(phone: &str, country: &str) -> Result 43 | { 44 | #[derive(Debug, Serialize, Deserialize)] 45 | #[allow(non_snake_case)] 46 | struct Query<'a> 47 | { 48 | CountryCode: &'a str, 49 | } 50 | 51 | let query = Query { CountryCode: country }; 52 | 53 | let path = format!("PhoneNumbers/{}", phone); 54 | let url = build_url(Products::Lookups, &path)?; 55 | 56 | let mut res = surf::get(url.as_str()) 57 | .header("Authorization", basic_auth()?) 58 | .query(&query) 59 | .map_err(|e| anyhow!(e))? 60 | .await 61 | .map_err(|e| anyhow!(e))?; 62 | 63 | let status = res.status(); 64 | if status != StatusCode::Ok { 65 | log::error!( 66 | "unexpected twilio lookup response: {}, phone: {}", 67 | status, 68 | phone 69 | ); 70 | bail!("unexpected twilio response {}", status); 71 | } 72 | 73 | let json: Value = res.body_json().await.map_err(|e| anyhow!(e))?; 74 | 75 | let validated_phone = match json["phone_number"].as_str() { 76 | Some(p) => p, 77 | None => bail!("missing property `phone_number`"), 78 | }; 79 | 80 | Ok(String::from(validated_phone)) 81 | } 82 | 83 | pub async fn verify_phone(phone: &str) -> Result 84 | { 85 | #[derive(Debug, Serialize, Deserialize)] 86 | #[allow(non_snake_case)] 87 | struct Form<'a> 88 | { 89 | To: &'a str, 90 | Channel: &'a str, 91 | } 92 | 93 | let form = Form { To: phone, Channel: "sms" }; 94 | 95 | let url = build_url(Products::Verify, "Verifications")?; 96 | 97 | let mut res = surf::post(url.as_str()) 98 | .header("Authorization", basic_auth()?) 99 | .body(Body::from_form(&form).map_err(|e| anyhow!(e))?) 100 | .await 101 | .map_err(|e| anyhow!(e))?; 102 | 103 | let status = res.status(); 104 | if status != StatusCode::Created { 105 | log::error!("unexpected twilio verify start response: {}", status); 106 | bail!("unexpected twilio response {}", status); 107 | } 108 | 109 | let json: Value = res.body_json().await.map_err(|e| anyhow!(e))?; 110 | 111 | let status = match json["sid"].as_str() { 112 | Some(p) => p, 113 | None => bail!("missing property `sid`"), 114 | }; 115 | 116 | Ok(String::from(status)) 117 | } 118 | 119 | pub async fn verify_check_status(sid: &str) -> Result 120 | { 121 | let path = format!("Verifications/{}", sid); 122 | let url = build_url(Products::Verify, &path)?; 123 | 124 | let mut res = surf::get(url.as_str()) 125 | .header("Authorization", basic_auth()?) 126 | .await 127 | .map_err(|e| anyhow!(e))?; 128 | 129 | let status = res.status(); 130 | 131 | if status == StatusCode::NotFound { 132 | return Ok("canceled".into()); 133 | } 134 | 135 | if status != StatusCode::Ok { 136 | log::error!("unexpected twilio verify check response: {}", status); 137 | bail!("unexpected twilio response {}", status); 138 | } 139 | 140 | let json: Value = res.body_json().await.map_err(|e| anyhow!(e))?; 141 | 142 | let status = match json["status"].as_str() { 143 | Some(p) => p, 144 | None => bail!("missing property `status`"), 145 | }; 146 | 147 | Ok(String::from(status)) 148 | } 149 | 150 | pub async fn verify_status_update(sid: &str, status: &str) -> Result<()> 151 | { 152 | #[derive(Debug, Serialize, Deserialize)] 153 | #[allow(non_snake_case)] 154 | struct Form<'a> 155 | { 156 | Status: &'a str, 157 | } 158 | 159 | let form = Form { Status: status }; 160 | 161 | let path = format!("Verifications/{}", sid); 162 | let url = build_url(Products::Verify, &path)?; 163 | 164 | let mut res = surf::post(url.as_str()) 165 | .header("Authorization", basic_auth()?) 166 | .body(Body::from_form(&form).map_err(|e| anyhow!(e))?) 167 | .await 168 | .map_err(|e| anyhow!(e))?; 169 | 170 | let status = res.status(); 171 | 172 | if status != StatusCode::Ok { 173 | log::error!("unexpected twilio verify update response: {}", status); 174 | bail!("unexpected twilio response {}", status); 175 | } 176 | 177 | let json: Value = res.body_json().await.map_err(|e| anyhow!(e))?; 178 | 179 | let status = match json["status"].as_str() { 180 | Some(p) => p, 181 | None => bail!("missing property `status`"), 182 | }; 183 | 184 | if form.Status == status { 185 | Ok(()) 186 | } else { 187 | bail!("update mismatch"); 188 | } 189 | } 190 | 191 | pub async fn verify_code_submit(phone: &str, code: &str) -> Result 192 | { 193 | #[derive(Debug, Serialize, Deserialize)] 194 | #[allow(non_snake_case)] 195 | struct Form<'a> 196 | { 197 | To: &'a str, 198 | Code: &'a str, 199 | } 200 | 201 | let form = Form { To: phone, Code: code }; 202 | 203 | let url = build_url(Products::Verify, "VerificationCheck")?; 204 | 205 | let mut res = surf::post(url.as_str()) 206 | .header("Authorization", basic_auth()?) 207 | .body(Body::from_form(&form).map_err(|e| anyhow!(e))?) 208 | .await 209 | .map_err(|e| anyhow!(e))?; 210 | 211 | let status = res.status(); 212 | if status != 200 { 213 | log::error!("unexpected twilio verify confirm response: {}", status); 214 | bail!("unexpected twilio response {}", status); 215 | } 216 | 217 | let json: Value = res.body_json().await.map_err(|e| anyhow!(e))?; 218 | 219 | let status = match json["status"].as_str() { 220 | Some(p) => p, 221 | None => bail!("missing property `status`"), 222 | }; 223 | 224 | Ok(String::from(status)) 225 | } 226 | 227 | fn build_url(product: Products, function: &str) -> Result 228 | { 229 | // e.g. twilio.com/v2 230 | let twilio_endpoint = env::var("TWILIO_API_ENDPOINT")?; 231 | 232 | let mut url = surf::Url::parse(&twilio_endpoint)?; 233 | 234 | let service_sid = env::var("TWILIO_SERVICE_SID")?; 235 | 236 | let host = url.host().ok_or_else(|| anyhow!("missing twilio host"))?; 237 | let host = format!("{}.{}", product.as_ref(), host); 238 | 239 | url.set_host(Some(&host))?; 240 | url.set_scheme("https").map_err(|_| anyhow!("bad scheme"))?; 241 | 242 | { 243 | let mut segments = url 244 | .path_segments_mut() 245 | .map_err(|_| anyhow!("bad url path segments"))?; 246 | 247 | match product { 248 | Products::Lookups => segments.pop_if_empty().extend(&["v2"]), 249 | Products::Verify => segments.pop_if_empty().extend(&[ 250 | "v2", 251 | "Services", 252 | &service_sid, 253 | ]), 254 | }; 255 | 256 | segments.extend(function.split("/")); 257 | } 258 | 259 | Ok(url) 260 | } 261 | 262 | fn basic_auth() -> Result 263 | { 264 | let account_sid = env::var("TWILIO_ACCOUNT_SID")?; 265 | let auth_token = env::var("TWILIO_AUTH_TOKEN")?; 266 | 267 | let credential = format!("{}:{}", account_sid, auth_token); 268 | 269 | Ok(format!("Basic {}", base64::encode(credential))) 270 | } 271 | -------------------------------------------------------------------------------- /api/src/verify_token.rs: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright (C) 2022 WithUno, Inc. 3 | // All rights reserved. 4 | // 5 | // SPDX-License-Identifier: AGPL-3.0-only 6 | // 7 | 8 | pub use crate::store::Database; 9 | 10 | use std::result; 11 | 12 | use uno::{Mu, UnverifiedToken, VerifiedToken}; 13 | 14 | use chrono::{DateTime, Utc}; 15 | use serde_json::Error as SerdeError; 16 | 17 | use thiserror::Error; 18 | 19 | use serde::{Deserialize, Serialize}; 20 | use serde_json::json; 21 | 22 | #[derive(Error, Debug)] 23 | pub enum VerifyTokenError 24 | { 25 | #[error("Serde error")] 26 | Serde 27 | { 28 | #[from] 29 | source: SerdeError, 30 | }, 31 | #[error("Bad secret")] 32 | Secret, 33 | #[error("Expired")] 34 | Expired, 35 | #[error("Already verified")] 36 | Done, 37 | #[error("Not found")] 38 | NotFound, 39 | #[error("Unsupported schema version")] 40 | Schema, 41 | #[error("Unknown verify token error")] 42 | Unknown, 43 | } 44 | 45 | type Result = result::Result; 46 | 47 | #[derive(PartialEq, Debug)] 48 | pub enum VerificationStatus 49 | { 50 | Verified(String), 51 | Pending(String, PreviousStatus), 52 | Unverified, 53 | } 54 | 55 | #[derive(PartialEq, Debug)] 56 | pub enum PreviousStatus 57 | { 58 | Verified(String), 59 | Unverified, 60 | } 61 | 62 | pub async fn get(db: &impl Database, id: &str) -> Result 63 | { 64 | let pending_key = format!("pending/{}", id); 65 | let entries_key = format!("entries/{}", id); 66 | 67 | let pending_exists = 68 | db.exists(&pending_key).await.map_err(|_| VerifyTokenError::Unknown)?; 69 | 70 | let entry_exists = 71 | db.exists(&entries_key).await.map_err(|_| VerifyTokenError::Unknown)?; 72 | 73 | if pending_exists { 74 | // TODO: && not expired? 75 | 76 | let pending_bytes = db 77 | .get(&pending_key) 78 | .await 79 | .map_err(|_| VerifyTokenError::Unknown)?; 80 | let pending_entry: UnverifiedToken = 81 | serde_json::from_slice(&pending_bytes)?; 82 | 83 | if entry_exists { 84 | let entry_bytes = db 85 | .get(&entries_key) 86 | .await 87 | .map_err(|_| VerifyTokenError::Unknown)?; 88 | let verified_entry: VerifiedToken = 89 | serde_json::from_slice(&entry_bytes)?; 90 | 91 | return Ok(VerificationStatus::Pending( 92 | pending_entry.email, 93 | PreviousStatus::Verified(verified_entry.email), 94 | )); 95 | } else { 96 | return Ok(VerificationStatus::Pending( 97 | pending_entry.email, 98 | PreviousStatus::Unverified, 99 | )); 100 | } 101 | } else { 102 | if entry_exists { 103 | let entry_bytes = db 104 | .get(&entries_key) 105 | .await 106 | .map_err(|_| VerifyTokenError::Unknown)?; 107 | let verified_entry: VerifiedToken = 108 | serde_json::from_slice(&entry_bytes)?; 109 | 110 | return Ok(VerificationStatus::Verified(verified_entry.email)); 111 | } 112 | } 113 | 114 | return Ok(VerificationStatus::Unverified); 115 | } 116 | 117 | #[derive(Serialize, Deserialize, Debug)] 118 | pub struct LookupItem 119 | { 120 | pub id: String, 121 | } 122 | 123 | pub async fn get_by_email( 124 | db: &impl Database, 125 | email: &str, 126 | include_pending: Option, 127 | ) -> Result 128 | { 129 | let key = format!("lookup/{}", email); 130 | 131 | if db.exists(&key).await.map_err(|_| VerifyTokenError::Unknown)? { 132 | Ok(true) 133 | } else { 134 | if let Some(true) = include_pending { 135 | let key = format!("pending/email-cache/{}", email); 136 | if db.exists(&key).await.map_err(|_| VerifyTokenError::Unknown)? { 137 | return Ok(true); 138 | } 139 | } 140 | Ok(false) 141 | } 142 | } 143 | 144 | 145 | pub async fn create( 146 | db: &impl Database, 147 | id: &str, 148 | analytics_id: &str, 149 | email: &str, 150 | expires_at: DateTime, 151 | ) -> Result 152 | { 153 | let key = format!("pending/{}", id); 154 | 155 | let secret = Mu::new(); 156 | let encoded_secret = base64::encode(secret.0); 157 | 158 | let token = 159 | UnverifiedToken::new(email, analytics_id, encoded_secret, expires_at); 160 | 161 | let bytes = serde_json::to_vec(&token) 162 | .map_err(|e| VerifyTokenError::Serde { source: e })?; 163 | 164 | let _ = 165 | db.put(&key, &bytes).await.map_err(|_| VerifyTokenError::Unknown)?; 166 | 167 | let cache_key = format!("pending/email-cache/{}", email); 168 | let cbytes = serde_json::to_vec(&json!(true)) 169 | .map_err(|e| VerifyTokenError::Serde { source: e })?; 170 | let _ = db 171 | .put(&cache_key, &cbytes) 172 | .await 173 | .map_err(|_| VerifyTokenError::Unknown)?; 174 | 175 | Ok(token) 176 | } 177 | 178 | pub async fn verify( 179 | db: &impl Database, 180 | id: &str, 181 | secret: &str, 182 | ) -> Result 183 | { 184 | let pending_key = format!("pending/{}", id); 185 | let entries_key = format!("entries/{}", id); 186 | 187 | // get pending entry 188 | // match secrets 189 | // delete old entry 190 | // delete pending entry 191 | // commit new entry 192 | 193 | let pending_exists = 194 | db.exists(&pending_key).await.map_err(|_| VerifyTokenError::Unknown)?; 195 | if !pending_exists { 196 | return Err(VerifyTokenError::NotFound); 197 | } 198 | 199 | let pending_bytes = 200 | db.get(&pending_key).await.map_err(|_| VerifyTokenError::Unknown)?; 201 | 202 | let pending_token: UnverifiedToken = 203 | serde_json::from_slice(&pending_bytes)?; 204 | 205 | // check 206 | 207 | if Utc::now() > pending_token.expires_at { 208 | return Err(VerifyTokenError::Expired); 209 | } 210 | 211 | if secret != pending_token.secret { 212 | return Err(VerifyTokenError::Secret); 213 | } 214 | 215 | // request is allowed 216 | 217 | let _ = 218 | db.del(&pending_key).await.map_err(|_| VerifyTokenError::Unknown)?; 219 | 220 | let pending_cache_key = 221 | format!("pending/email-cache/{}", pending_token.email); 222 | let _ = db 223 | .del(&pending_cache_key) 224 | .await 225 | .map_err(|_| VerifyTokenError::Unknown)?; 226 | 227 | let lookup_key = format!("lookup/{}", pending_token.email); 228 | 229 | let old_exists = 230 | db.exists(&lookup_key).await.map_err(|_| VerifyTokenError::Unknown)?; 231 | if old_exists { 232 | let old_bytes = 233 | db.get(&lookup_key).await.map_err(|_| VerifyTokenError::Unknown)?; 234 | let old_item: LookupItem = serde_json::from_slice(&old_bytes)?; 235 | let old_entry_key = format!("entries/{}", old_item.id); 236 | db.del(&old_entry_key).await.map_err(|_| VerifyTokenError::Unknown)?; 237 | 238 | db.del(&lookup_key).await.map_err(|_| VerifyTokenError::Unknown)?; 239 | } 240 | 241 | let verified_token = 242 | VerifiedToken::new(pending_token.email, pending_token.analytics_id); 243 | 244 | let verified_token_bytes = serde_json::to_vec(&verified_token)?; 245 | 246 | let _ = db 247 | .put(&entries_key, &verified_token_bytes) 248 | .await 249 | .map_err(|_| VerifyTokenError::Unknown)?; 250 | 251 | let item = LookupItem { id: id.into() }; 252 | let item_bytes = serde_json::to_vec(&item)?; 253 | 254 | let _ = db 255 | .put(&lookup_key, &item_bytes) 256 | .await 257 | .map_err(|_| VerifyTokenError::Unknown)?; 258 | 259 | return Ok(verified_token); 260 | } 261 | 262 | #[cfg(test)] 263 | mod tests 264 | { 265 | use super::*; 266 | use chrono::Duration; 267 | 268 | #[cfg(not(feature = "s3"))] 269 | use crate::store::FileStore; 270 | 271 | #[cfg(not(feature = "s3"))] 272 | #[async_std::test] 273 | async fn test_by_email() 274 | { 275 | let dir = tempfile::TempDir::new().unwrap(); 276 | let db = FileStore::new(dir.path(), "test", "v0").await.unwrap(); 277 | 278 | let id = "some id"; 279 | let encoded_id = base64::encode_config(id, base64::URL_SAFE_NO_PAD); 280 | 281 | assert_eq!( 282 | get_by_email(&db, "email", Some(false)).await.unwrap(), 283 | false 284 | ); 285 | 286 | assert_eq!( 287 | get_by_email(&db, "email", Some(true)).await.unwrap(), 288 | false 289 | ); 290 | 291 | let token = create( 292 | &db, 293 | &encoded_id, 294 | "analytics_id", 295 | "email", 296 | Utc::now() + Duration::days(30), 297 | ) 298 | .await 299 | .unwrap(); 300 | 301 | assert_eq!( 302 | get_by_email(&db, "email", Some(false)).await.unwrap(), 303 | false 304 | ); 305 | 306 | assert_eq!(get_by_email(&db, "email", Some(true)).await.unwrap(), true); 307 | 308 | 309 | verify(&db, &encoded_id, &token.secret).await.unwrap(); 310 | 311 | assert_eq!( 312 | get_by_email(&db, "email", Some(false)).await.unwrap(), 313 | true, 314 | ); 315 | } 316 | 317 | #[cfg(not(feature = "s3"))] 318 | #[async_std::test] 319 | async fn test_statuses() 320 | { 321 | let dir = tempfile::TempDir::new().unwrap(); 322 | let db = FileStore::new(dir.path(), "test", "v0").await.unwrap(); 323 | 324 | let id = "some id"; 325 | let encoded_id = base64::encode_config(id, base64::URL_SAFE_NO_PAD); 326 | 327 | // no token 328 | assert_eq!( 329 | get(&db, &encoded_id).await.unwrap(), 330 | VerificationStatus::Unverified 331 | ); 332 | 333 | let token = create( 334 | &db, 335 | &encoded_id, 336 | "analytics_id", 337 | "email", 338 | Utc::now() + Duration::days(30), 339 | ) 340 | .await 341 | .unwrap(); 342 | 343 | // pending token, no previous 344 | assert_eq!( 345 | get(&db, &encoded_id).await.unwrap(), 346 | VerificationStatus::Pending( 347 | "email".to_string(), 348 | PreviousStatus::Unverified 349 | ) 350 | ); 351 | 352 | // verified token 353 | verify(&db, &encoded_id, &token.secret).await.unwrap(); 354 | assert_eq!( 355 | get(&db, &encoded_id).await.unwrap(), 356 | VerificationStatus::Verified("email".to_string()) 357 | ); 358 | 359 | // re-verify, same email 360 | create( 361 | &db, 362 | &encoded_id, 363 | "analytics_id", 364 | "email", 365 | Utc::now() + Duration::days(30), 366 | ) 367 | .await 368 | .unwrap(); 369 | assert_eq!( 370 | get(&db, &encoded_id).await.unwrap(), 371 | VerificationStatus::Pending( 372 | "email".to_string(), 373 | PreviousStatus::Verified("email".to_string()) 374 | ) 375 | ); 376 | 377 | // re-verify, different email 378 | create( 379 | &db, 380 | &encoded_id, 381 | "analytics_id", 382 | "email2", 383 | Utc::now() + Duration::days(30), 384 | ) 385 | .await 386 | .unwrap(); 387 | assert_eq!( 388 | get(&db, &encoded_id).await.unwrap(), 389 | VerificationStatus::Pending( 390 | "email2".to_string(), 391 | PreviousStatus::Verified("email".to_string()) 392 | ) 393 | ); 394 | } 395 | 396 | #[cfg(not(feature = "s3"))] 397 | #[async_std::test] 398 | async fn test_token_roundtrip() 399 | { 400 | let dir = tempfile::TempDir::new().unwrap(); 401 | let db = FileStore::new(dir.path(), "test", "v0").await.unwrap(); 402 | 403 | let id = "some id"; 404 | let encoded_id = base64::encode_config(id, base64::URL_SAFE_NO_PAD); 405 | 406 | let email = "user@example.com"; 407 | 408 | let result = verify(&db, &encoded_id, "secret").await; 409 | 410 | assert_eq!( 411 | VerifyTokenError::NotFound.to_string(), 412 | result.err().unwrap().to_string() 413 | ); 414 | 415 | let mut u = create( 416 | &db, 417 | &encoded_id, 418 | "analytics_id", 419 | email, 420 | Utc::now() - Duration::days(1), 421 | ) 422 | .await 423 | .unwrap(); 424 | 425 | let result = verify(&db, &encoded_id, &u.secret).await; 426 | 427 | assert_eq!( 428 | VerifyTokenError::Expired.to_string(), 429 | result.err().unwrap().to_string() 430 | ); 431 | 432 | u = create( 433 | &db, 434 | &encoded_id, 435 | "analytics_id", 436 | email, 437 | Utc::now() + Duration::days(1), 438 | ) 439 | .await 440 | .unwrap(); 441 | 442 | let result = verify(&db, &encoded_id, "some other secret").await; 443 | 444 | assert_eq!( 445 | VerifyTokenError::Secret.to_string(), 446 | result.err().unwrap().to_string() 447 | ); 448 | 449 | let result = verify(&db, &encoded_id, &u.secret).await.unwrap(); 450 | assert_eq!(result.email, "user@example.com"); 451 | } 452 | } 453 | -------------------------------------------------------------------------------- /api/tests/service_list.rs: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright (C) 2021 WithUno, Inc. 3 | // All rights reserved. 4 | // 5 | // SPDX-License-Identifier: AGPL-3.0-only 6 | // 7 | 8 | #[cfg(test)] 9 | use serde_json::{Result, Value}; 10 | use std::fs; 11 | 12 | #[test] 13 | fn service_list_is_valid() 14 | { 15 | // try to deserialize the service list file and hope for the best! 16 | let contents = fs::read_to_string("tests/services.json").unwrap(); 17 | 18 | let v: Result = serde_json::from_str(&contents); 19 | 20 | assert!(v.is_ok()); 21 | } 22 | -------------------------------------------------------------------------------- /api/tests/services.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "vault_name": "amazon", 4 | "display_name": "Amazon", 5 | "matching_hosts": ["www.amazon.com"], 6 | "login_urls": ["/ap/signin"], 7 | "brand_color": "#FF9900" 8 | }, 9 | { 10 | "vault_name": "box", 11 | "display_name": "Box", 12 | "matching_hosts": ["account.box.com", "box.com"], 13 | "login_urls": ["/login"], 14 | "image_url_string": "https://www.google.com/s2/favicons?sz=20&domain=box.com", 15 | "image_needs_inset": true, 16 | "brand_color": "#2760CD" 17 | }, 18 | { 19 | "vault_name": "chase", 20 | "display_name": "Chase", 21 | "matching_hosts": ["www.chase.com"], 22 | "login_urls": ["/login"], 23 | "image_url_string": "https://www.google.com/s2/favicons?sz=20&domain=chase.com", 24 | "image_needs_inset": true, 25 | "brand_color": "#117ACA", 26 | "uses_premium_sms": true 27 | }, 28 | { 29 | "vault_name": "discord", 30 | "display_name": "Discord", 31 | "matching_hosts": ["discord.com"], 32 | "login_urls": ["/login"], 33 | "brand_color": "#5865F2" 34 | }, 35 | { 36 | "vault_name": "dropbox", 37 | "display_name": "Dropbox", 38 | "matching_hosts": ["www.dropbox.com"], 39 | "login_urls": ["/login"], 40 | "brand_color": "#0061FF", 41 | "needs_visible_web_view": true 42 | }, 43 | { 44 | "vault_name": "facebook", 45 | "display_name": "Facebook", 46 | "matching_hosts": ["mbasic.facebook.com", "www.facebook.com"], 47 | "login_urls": ["/login"], 48 | "brand_color": "#1877F2" 49 | }, 50 | { 51 | "vault_name": "github", 52 | "display_name": "GitHub", 53 | "matching_hosts": ["github.com"], 54 | "login_urls": ["/login"], 55 | "text_color": "black", 56 | "restricted_urls": ["https://github.com/settings/two_factor_authentication/setup/recovery_download"] 57 | }, 58 | { 59 | "vault_name": "google", 60 | "display_name": "Google", 61 | "matching_hosts": ["accounts.google.com"], 62 | "text_color": "black", 63 | "login_query_params": "?hl=en", 64 | "needs_visible_web_view": true 65 | }, 66 | { 67 | "vault_name": "reddit", 68 | "display_name": "Reddit", 69 | "matching_hosts": ["www.reddit.com"], 70 | "login_urls": ["/login"], 71 | "brand_color": "#FF4500" 72 | }, 73 | { 74 | "vault_name": "robinhood", 75 | "display_name": "Robinhood", 76 | "matching_hosts": ["robinhood.com"], 77 | "login_urls": ["/login"], 78 | "image_needs_inset": true, 79 | "brand_color": "#5BC53B" 80 | }, 81 | { 82 | "vault_name": "shopify", 83 | "display_name": "Shopify", 84 | "matching_hosts": ["accounts.shopify.com"], 85 | "login_urls": ["/lookup"], 86 | "image_needs_inset": true, 87 | "brand_color": "#96BF48" 88 | }, 89 | { 90 | "vault_name": "twitter", 91 | "display_name": "Twitter", 92 | "matching_hosts": ["mobile.twitter.com", "twitter.com"], 93 | "login_urls": ["/login", "/i/flow/login"], 94 | "change_password_url": "/.well-known/change-password", 95 | "enroll_mfa_url": "/settings/account/login_verification/enrollment", 96 | "brand_color": "#1DA1F2" 97 | } 98 | ] 99 | -------------------------------------------------------------------------------- /cli/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "cli" 3 | version = "0.1.0" 4 | authors = ["David Cowden "] 5 | license = "AGPL-3.0-only" 6 | edition = "2021" 7 | 8 | [[bin]] 9 | name = "uno" 10 | path = "src/main.rs" 11 | 12 | [dependencies] 13 | api = { path = "../api" } 14 | uno = { path = "../lib" } 15 | djb = { path = "../djb" } 16 | 17 | ansi_term = "0.12" 18 | anyhow = "1.0" 19 | argon2 = "0.2" 20 | async-std = "1.9" 21 | base64 = "0.13" 22 | blake3 = "0.3" 23 | clap = { version = "3", features = ["derive", "wrap_help"] } 24 | chrono = "0.4" 25 | dirs-next = "2.0" 26 | http-types = "2.10" 27 | rand = "0.8" 28 | serde = { version = "1.0", features = ["derive"] } 29 | serde_json = "1.0" 30 | regex = "1.5" 31 | ron = "0.7" 32 | surf = "2.3" 33 | uuid = { version = "0.8", features = ["serde", "v4"] } 34 | vclock = "0.2" 35 | -------------------------------------------------------------------------------- /cli/README.md: -------------------------------------------------------------------------------- 1 | uno cli 2 | === 3 | 4 | A command-line interface for working with uno data and services. 5 | 6 | At a high level, the CLI allows you to generate entropy for an Uno ID and perform useful operations with it. 7 | For example, you can use the CLI to sign and encrypt messages, split and recombine your ID, and post ephemeral sessions. 8 | You can also add your info to the Uno directory, and verify an email. 9 | 10 | The CLI stores working information like your ID and config options in the `~/.uno` directory. 11 | 12 | 13 | # Overview 14 | 15 | The `uno` CLI program supports performing basic crypto operations with an uno identity (such as deriving keys and performing key split and recombination logic) as well as interfacing with the API server. The CLI is not a fully functional Uno client at the moment. Consult the [issue tracker][issues] for details. 16 | 17 | [issues]: https://github.com/withuno/identity/issues?q=is%3Aissue+is%3Aopen+label%3Acli-client 18 | 19 | # Examples 20 | 21 | The cli is pretty self-explanatory. 22 | Run `uno [help]` (or use `cargo run` in this directory) to get going: 23 | 24 | ``` 25 | $ uno help 26 | cli 0.1 27 | David C. 28 | 29 | USAGE: 30 | uno 31 | 32 | OPTIONS: 33 | -h, --help Print help information 34 | -V, --version Print version information 35 | 36 | SUBCOMMANDS: 37 | decrypt AEAD open The decrypt operation works with both 32 byte 38 | identity seeds and the 8 byte Mu. The actual symmetric key is 39 | derived appropriate in each case 40 | encrypt AEAD seal. The encrypt operation works with both 32 byte 41 | identity seeds and the 8 byte Mu. The actual symmetric key is 42 | derived appropriate in each case 43 | help Print this message or the help of the given subcommand(s) 44 | mu Generate an uno shamir's secert sharing session entropy seed 45 | pubkey Print the public key corresponding to the signing keypair 46 | s39 SLIP-0039 Options 47 | seed Generate an uno identity 48 | session Print the session id derived from Mu entropy 49 | sign Sign a message using an Uno ID 50 | ssss Shamir's secret sharing session operations 51 | vault Operate on a vault 52 | verify Verify a signature on a message 53 | ``` 54 | 55 | To dive in, add a subcommand: 56 | 57 | ``` 58 | $ uno help seed 59 | uno-seed 60 | Generate an uno identity. An identity seed is 32 bytes of entropy. The base64 61 | encoding of the entropy is written to standard out. 62 | 63 | USAGE: 64 | uno seed 65 | 66 | OPTIONS: 67 | -h, --help Print help information 68 | ``` 69 | 70 | If the CLI does not provide enough information from there on out, please file an issue. 71 | 72 | -------------------------------------------------------------------------------- /djb/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "djb" 3 | version = "0.1.0" 4 | authors = ["David Cowden "] 5 | license = "AGPL-3.0-only" 6 | edition = "2021" 7 | 8 | [dependencies] 9 | chacha20poly1305 = { version = "0.7.1", features = ["std"] } 10 | ed25519-dalek = "2.0" 11 | rand = "0.8.3" 12 | strum = "0.20" 13 | strum_macros = "0.20" 14 | 15 | [dev-dependencies] 16 | base64 = "0.13" 17 | -------------------------------------------------------------------------------- /djb/README.md: -------------------------------------------------------------------------------- 1 | djb 2 | === 3 | 4 | An uno wrapper around EdDSA and ChaCha20Poly1305 crypto operations. 5 | -------------------------------------------------------------------------------- /djb/src/lib.rs: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright (C) 2021 WithUno, Inc. 3 | // All rights reserved. 4 | // 5 | // SPDX-License-Identifier: AGPL-3.0-only 6 | // 7 | 8 | pub use ed25519_dalek::Signature; 9 | pub use ed25519_dalek::Signer; 10 | pub use ed25519_dalek::Verifier; 11 | pub type PublicKey = ed25519_dalek::VerifyingKey; 12 | pub type PrivateKey = ed25519_dalek::SecretKey; 13 | pub type KeyPair = ed25519_dalek::SigningKey; 14 | 15 | pub type SymmetricKey = chacha20poly1305::Key; 16 | pub type Error = aead::Error; 17 | 18 | pub const PRIVATE_KEY_LENGTH: usize = ed25519_dalek::SECRET_KEY_LENGTH; 19 | pub const PUBLIC_KEY_LENGTH: usize = ed25519_dalek::PUBLIC_KEY_LENGTH; 20 | pub const KEYPAIR_LENGTH: usize = ed25519_dalek::KEYPAIR_LENGTH; 21 | pub const SIGNATURE_LENGTH: usize = ed25519_dalek::SIGNATURE_LENGTH; 22 | 23 | use chacha20poly1305::aead; 24 | use chacha20poly1305::aead::{Aead, NewAead, Payload}; 25 | use chacha20poly1305::{ChaCha20Poly1305, Nonce}; 26 | 27 | #[cfg(not(test))] 28 | use rand::RngCore; 29 | 30 | #[cfg(not(test))] 31 | use rand; 32 | 33 | #[cfg(test)] 34 | use test_rand as rand; 35 | 36 | #[cfg(test)] 37 | mod test_rand 38 | { 39 | pub struct R {} 40 | impl R 41 | { 42 | pub fn fill_bytes(&mut self, dest: &mut [u8]) 43 | { 44 | for i in dest.iter_mut() { 45 | *i = 0; 46 | } 47 | } 48 | } 49 | pub fn thread_rng() -> R { R {} } 50 | } 51 | 52 | /// Encrypt data using key and return an opaque blob. The nonce is the first 12 53 | /// bytes of the blob. 54 | pub fn encrypt( 55 | key: SymmetricKey, 56 | data: &[u8], 57 | aad: &[u8], 58 | ) -> Result, aead::Error> 59 | { 60 | let mut nonce = Nonce::default(); 61 | rand::thread_rng().fill_bytes(&mut nonce); 62 | let cipher = ChaCha20Poly1305::new(&key); 63 | let payload = Payload { msg: data, aad: aad }; 64 | let ciphertext = cipher.encrypt(&nonce, payload)?; 65 | let blob = [&nonce.as_slice(), &ciphertext[..]].concat().to_vec(); 66 | 67 | Ok(blob) 68 | } 69 | 70 | /// Decrypt data using key and return the original message. The nonce is the 71 | /// first 12 bytes of data. 72 | pub fn decrypt( 73 | key: SymmetricKey, 74 | data: &[u8], 75 | aad: &[u8], 76 | ) -> Result, aead::Error> 77 | { 78 | let nonce = Nonce::from_slice(&data[0..12]); 79 | let cipher = ChaCha20Poly1305::new(&key); 80 | let payload = Payload { msg: &data[nonce.len()..], aad: aad }; 81 | 82 | cipher.decrypt(&nonce, payload) 83 | } 84 | 85 | #[cfg(test)] 86 | mod unit 87 | { 88 | use super::*; 89 | 90 | #[test] 91 | fn aead_encrypt() -> Result<(), Box> 92 | { 93 | let key = b"dust has only just begun to form"; 94 | let msg = b"spin me around again"; 95 | let aad = b"hide and seek"; 96 | let sym = SymmetricKey::from_slice(key); 97 | let actual = encrypt(*sym, msg, aad)?; 98 | let expected64 = 99 | "AAAAAAAAAAAAAAAASVL67erDFBxUzRM4trcn565Rqwq7SN7IXH+XfKDX3qMmVCJr"; 100 | let expected = base64::decode(expected64)?; 101 | assert_eq!(expected, &*actual); 102 | 103 | Ok(()) 104 | } 105 | 106 | #[test] 107 | fn aead_decrypt() -> Result<(), Box> 108 | { 109 | let key = b"dust has only just begun to form"; 110 | let blob64 = 111 | "66e/2LzVClrO8V/EhfoDwHUt0J35UB53CvqNgXCysoHy5Sd4yvwe+OufBEsHaHSA"; 112 | let blob = base64::decode(blob64)?; 113 | let aad = b"hide and seek"; 114 | let sym = SymmetricKey::from_slice(key); 115 | let actual = decrypt(*sym, &blob, aad)?; 116 | let expected = b"spin me around again"; 117 | assert_eq!(expected, &*actual); 118 | 119 | Ok(()) 120 | } 121 | } 122 | -------------------------------------------------------------------------------- /ffi/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "ffi" 3 | version = "0.1.0" 4 | authors = ["David Cowden "] 5 | license = "AGPL-3.0-only" 6 | edition = "2021" 7 | 8 | [dependencies] 9 | uno = { path = "../lib" } 10 | 11 | [build-dependencies] 12 | cbindgen = "0.20" 13 | -------------------------------------------------------------------------------- /ffi/build.rs: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright (C) 2021 WithUno, Inc. 3 | // All rights reserved. 4 | // 5 | // SPDX-License-Identifier: AGPL-3.0-only 6 | // 7 | 8 | use std::error::Error; 9 | use std::result::Result; 10 | 11 | // 12 | // Generate the C FFI using the cbindgen crate. 13 | // 14 | fn main() -> Result<(), Box> 15 | { 16 | let crate_dir = std::env::var("CARGO_MANIFEST_DIR")?; 17 | let res = cbindgen::generate(crate_dir)?; 18 | res.write_to_file("include/libuno.h"); 19 | Ok(()) 20 | } 21 | -------------------------------------------------------------------------------- /ffi/cbindgen.toml: -------------------------------------------------------------------------------- 1 | # This is a template cbindgen.toml file with all of the default values. 2 | # Some values are commented out because their absence is the real default. 3 | # 4 | # See https://github.com/eqrion/cbindgen/blob/master/docs.md#cbindgentoml 5 | # for detailed documentation of every option here. 6 | 7 | language = "C" 8 | 9 | 10 | ############## Options for Wrapping the Contents of the Header ################# 11 | 12 | header = ''' 13 | // 14 | // Copyright (C) 2021 WithUno, Inc. 15 | // All rights reserved. 16 | // 17 | // SPDX-License-Identifier: AGPL-3.0-only 18 | // 19 | ''' 20 | # trailer = "/* Text to put at the end of the generated file */" 21 | include_guard = "uno_ffi_h" 22 | pragma_once = true 23 | autogen_warning = ''' 24 | // 25 | // ⚠️ Warning! 26 | // 27 | // This file is auto-generated by cbindgen. Modifications must be made to the 28 | // source Rust extern "C" interface specified in the ~uno/identity/ffi crate. 29 | // 30 | // Do not manually modify this file. 31 | // 32 | ''' 33 | include_version = false 34 | # namespace = "my_namespace" 35 | namespaces = [] 36 | using_namespaces = [] 37 | sys_includes = [] 38 | includes = [] 39 | no_includes = false 40 | after_includes = "" 41 | 42 | 43 | ############################ Code Style Options ################################ 44 | 45 | braces = "NextLine" 46 | line_length = 80 47 | tab_width = 2 48 | documentation = true 49 | documentation_style = "auto" 50 | line_endings = "LF" # also "CR", "CRLF", "Native" 51 | 52 | 53 | ############################# Codegen Options ################################## 54 | 55 | style = "type" 56 | sort_by = "None" 57 | usize_is_size_t = true 58 | 59 | 60 | [defines] 61 | # "target_os = freebsd" = "DEFINE_FREEBSD" 62 | # "feature = serde" = "DEFINE_SERDE" 63 | 64 | 65 | [export] 66 | include = [] 67 | exclude = [] 68 | # prefix = "CAPI_" 69 | item_types = [] 70 | renaming_overrides_prefixing = false 71 | 72 | 73 | [export.rename] 74 | 75 | 76 | [export.body] 77 | 78 | 79 | [export.mangle] 80 | 81 | 82 | [fn] 83 | rename_args = "None" 84 | # must_use = "MUST_USE_FUNC" 85 | # no_return = "NO_RETURN" 86 | # prefix = "START_FUNC" 87 | # postfix = "END_FUNC" 88 | args = "auto" 89 | sort_by = "None" 90 | 91 | 92 | [struct] 93 | rename_fields = "None" 94 | # must_use = "MUST_USE_STRUCT" 95 | derive_constructor = false 96 | derive_eq = false 97 | derive_neq = false 98 | derive_lt = false 99 | derive_lte = false 100 | derive_gt = false 101 | derive_gte = false 102 | 103 | 104 | [enum] 105 | rename_variants = "None" 106 | # must_use = "MUST_USE_ENUM" 107 | add_sentinel = false 108 | prefix_with_name = false 109 | derive_helper_methods = false 110 | derive_const_casts = false 111 | derive_mut_casts = false 112 | # cast_assert_name = "ASSERT" 113 | derive_tagged_enum_destructor = false 114 | derive_tagged_enum_copy_constructor = false 115 | enum_class = true 116 | private_default_tagged_enum_constructor = false 117 | 118 | 119 | [const] 120 | allow_static_const = true 121 | allow_constexpr = false 122 | sort_by = "None" 123 | 124 | 125 | [ptr] 126 | # An optional string to decorate all pointers that are 127 | # required to be non null. Nullability is inferred from the Rust type: `&T`, 128 | # `&mut T` and `NonNull` all require a valid pointer value. 129 | # 130 | # clang uses _Nonnull and __nonnull, _Nullable and __nullable: 131 | # 132 | # https://clang.llvm.org/docs/AttributeReference.html#nullability-attributes 133 | # https://clang.llvm.org/docs/analyzer/developer-docs/nullability.html 134 | # 135 | non_null_attribute = "_Nonnull" 136 | 137 | 138 | [macro_expansion] 139 | bitflags = false 140 | 141 | 142 | ############## Options for How Your Rust library Should Be Parsed ############## 143 | 144 | [parse] 145 | parse_deps = true 146 | include = ["uno", "s39", "sssmc39"] 147 | exclude = [] 148 | clean = false 149 | extra_bindings = [] 150 | 151 | 152 | [parse.expand] 153 | crates = [] 154 | all_features = false 155 | default_features = true 156 | features = [] 157 | 158 | -------------------------------------------------------------------------------- /ffi/include/libuno.h: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright (C) 2021 WithUno, Inc. 3 | // All rights reserved. 4 | // 5 | // SPDX-License-Identifier: AGPL-3.0-only 6 | // 7 | 8 | 9 | #ifndef uno_ffi_h 10 | #define uno_ffi_h 11 | 12 | #pragma once 13 | 14 | // 15 | // ⚠️ Warning! 16 | // 17 | // This file is auto-generated by cbindgen. Modifications must be made to the 18 | // source Rust extern "C" interface specified in the ~uno/identity/ffi crate. 19 | // 20 | // Do not manually modify this file. 21 | // 22 | 23 | 24 | #include 25 | #include 26 | #include 27 | #include 28 | #include 29 | 30 | 31 | #define UNO_ERR_SUCCESS 0 32 | 33 | #define UNO_ERR_ILLEGAL_ARG 1 34 | 35 | #define UNO_ERR_SPLIT 2 36 | 37 | #define UNO_ERR_COMBINE 3 38 | 39 | #define UNO_ERR_SHARE_ID 4 40 | 41 | #define UNO_ERR_SHARE_MISS 5 42 | 43 | #define UNO_ERR_CHECKSUM 6 44 | 45 | #define UNO_ERR_MNEMONIC 7 46 | 47 | /** 48 | * And uno identity newtype. 49 | */ 50 | typedef struct Id Id; 51 | 52 | /** 53 | * 54 | * Opaque array containing share metadata. Get a member share by index using 55 | * `uno_get_member_share_by_index`. 56 | * 57 | */ 58 | typedef struct UnoMemberSharesVec UnoMemberSharesVec; 59 | 60 | /** 61 | * 62 | * A SplitResult is the output of successfully running `uno_s39_split` on an 63 | * UnoId. The structure represents an opaque array of UnoGroupSplit structs. 64 | * 65 | */ 66 | typedef struct UnoSplitResult UnoSplitResult; 67 | 68 | /** 69 | * 70 | * 32 bytes of seed entropy. See uno::Id. 71 | * 72 | */ 73 | typedef Id UnoId; 74 | 75 | /** 76 | * 77 | * UnoByteSlice can be treated like an array of uint8_t bytes on the C side. 78 | * You may not modify the bytes and the struct must be freed once it is no 79 | * longer needed. 80 | * 81 | */ 82 | typedef struct 83 | { 84 | const uint8_t *ptr; 85 | size_t len; 86 | size_t _cap; 87 | } UnoByteSlice; 88 | 89 | /** 90 | * 91 | * A GroupSpec is a tuple of (threshold, total) shares in a given s39 group 92 | * split. For instance, if you want a group to be split into 3 pieces, two 93 | * of which are requred to reconstitute the group secret, you'd pass (2, 3). 94 | * 95 | */ 96 | typedef struct 97 | { 98 | uint8_t threshold; 99 | uint8_t total; 100 | } UnoGroupSpec; 101 | 102 | /** 103 | * 104 | * A GroupSplit contains metadata related to one of the groups of shares 105 | * requested during the split call. The actual shares are contained in the 106 | * opaque UnoMemberSharesVec struct. 107 | * 108 | */ 109 | typedef struct 110 | { 111 | uint16_t group_id; 112 | uint8_t iteration_exponent; 113 | uint8_t group_index; 114 | uint8_t group_threshold; 115 | uint8_t group_count; 116 | /** 117 | * The number of shares from this group required to reconstitue the group 118 | * secret. 119 | */ 120 | uint8_t member_threshold; 121 | /** 122 | * Total number of member_shares 123 | */ 124 | size_t share_count; 125 | /** 126 | * Opaque reference to the constituent member shares. Acquire one of the 127 | * shares with `uno_get_member_share_by_index`. 128 | */ 129 | const UnoMemberSharesVec *member_shares; 130 | } UnoGroupSplit; 131 | 132 | /** 133 | * 134 | * Share mnemonic string. Obtained by index from an UnoGroupSplit type using 135 | * `uno_get_s39_share_by_index`. The mnemonic share data is a c string 136 | * reference and can be handled in a read-only (const) fashion using the 137 | * standard c string api. An UnoShare must be freed using `uno_free_s39_share` 138 | * when you are done using it. 139 | * 140 | */ 141 | typedef struct 142 | { 143 | const char *mnemonic; 144 | } UnoShare; 145 | 146 | /** 147 | * 148 | * Share metadata struct. Metadata about a share can be obtained by calling 149 | * `uno_get_share_metadata` with an UnoS39Share. 150 | * 151 | */ 152 | typedef struct 153 | { 154 | /** 155 | * Random 15 bit value which is the same for all shares and is used to 156 | * verify that the shares belong together; it is also used as salt in the 157 | * encryption of the master secret. (15 bits) 158 | */ 159 | uint16_t identifier; 160 | /** 161 | * Indicates the total number of iterations to be used in PBKDF2. The 162 | * number of iterations is calculated as 10000x2^e. (5 bits) 163 | */ 164 | uint8_t iteration_exponent; 165 | /** 166 | * The x value of the group share (4 bits) 167 | */ 168 | uint8_t group_index; 169 | /** 170 | * indicates how many group shares are needed to reconstruct the master 171 | * secret. The actual value is endoded as Gt = GT - 1, so a value of 0 172 | * indicates that a single group share is needed (GT = 1), a value of 1 173 | * indicates that two group shares are needed (GT = 2) etc. (4 bits) 174 | */ 175 | uint8_t group_threshold; 176 | /** 177 | * indicates the total number of groups. The actual value is encoded as 178 | * g = G - 1 (4 bits) 179 | */ 180 | uint8_t group_count; 181 | /** 182 | * Member index, or x value of the member share in the given group (4 bits) 183 | */ 184 | uint8_t member_index; 185 | /** 186 | * indicates how many member shares are needed to reconstruct the group 187 | * share. The actual value is encoded as t = T − 1. (4 bits) 188 | */ 189 | uint8_t member_threshold; 190 | /** 191 | * corresponds to a list of the SSS part's fk(x) values 1 ≤ k ≤ n. Each 192 | * fk(x) value is encoded as a string of eight bits in big-endian order. 193 | * The concatenation of these bit strings is the share value. This value is 194 | * left-padded with "0" bits so that the length of the padded share value 195 | * in bits becomes the nearest multiple of 10. (padding + 8n bits) 196 | */ 197 | UnoByteSlice share_value; 198 | /** 199 | * an RS1024 checksum of the data part of the share 200 | * (that is id || e || GI || Gt || g || I || t || ps). The customization 201 | * string (cs) of RS1024 is "shamir". (30 bits) 202 | */ 203 | uint32_t checksum; 204 | } UnoShareMetadata; 205 | 206 | /** 207 | * 208 | * Get a description for the provided error code. The lifetime of the returned 209 | * string does not need to be managed by the caller. 210 | * 211 | */ 212 | const char *uno_get_msg_from_err(int err); 213 | 214 | /** 215 | * 216 | * Create an uno id struct from a 32 byte seed data array. The caller is 217 | * responsible calling `uno_free_id` on the returned struct once finished. 218 | * 219 | */ 220 | int uno_get_id_from_bytes(const uint8_t *bytes, size_t len, const UnoId **out); 221 | 222 | /** 223 | * 224 | * Copy the raw 32 bytes backing an uno Id into caller-owned memory. 225 | * 226 | */ 227 | int uno_copy_id_bytes(const UnoId *uno_id, uint8_t *bytes, size_t len); 228 | 229 | /** 230 | * 231 | * Free a previously allocated UnoId from `uno_get_id_from_bytes`. 232 | * 233 | */ 234 | void uno_free_id(UnoId *id); 235 | 236 | /** 237 | * 238 | * Get the raw bytes backing an uno Id. 239 | * 240 | */ 241 | int uno_get_bytes_from_id(const UnoId *uno_id, UnoByteSlice *out); 242 | 243 | /** 244 | * 245 | * Free the backing array on an UnoByteSlice from a function that returns an 246 | * allocated UnoByteSlice, e.g. `uno_get_id_bytes`. 247 | * 248 | */ 249 | void uno_free_byte_slice(UnoByteSlice byte_slice); 250 | 251 | /** 252 | * 253 | * See s39::split. 254 | * 255 | * Rather than an array of tuples, the caller provides an array of GroupSpec 256 | * structs. The group_threshold is fixed at 1 so this parameter is currently 257 | * unused. 258 | * 259 | * Upon success, the SplitResult represents an array of UnoGroupSplits of 260 | * length group_total. 261 | * 262 | */ 263 | int uno_s39_split(const UnoId *uno_id, 264 | size_t _group_threshold, 265 | const UnoGroupSpec *group_specs, 266 | size_t group_total, 267 | const UnoSplitResult **out); 268 | 269 | /** 270 | * 271 | * Free a previously allocated UnoSplitResult from `uno_s39_split`. 272 | * 273 | */ 274 | void uno_free_split_result(UnoSplitResult *split_result); 275 | 276 | /** 277 | * 278 | * Get an UnoGroupSplit by index from an opaque UnoSplitResult. 279 | * 280 | */ 281 | int uno_get_group_from_split_result(const UnoSplitResult *split_result, 282 | size_t index, 283 | UnoGroupSplit *out); 284 | 285 | /** 286 | * 287 | * Free a previously allocated GroupSplit returned by 288 | * `uno_get_group_from_split_result`. 289 | * 290 | */ 291 | void uno_free_group_split(UnoGroupSplit group_split); 292 | 293 | /** 294 | * 295 | * Returns the actual member share by index. 296 | * 297 | */ 298 | int uno_get_s39_share_by_index(UnoGroupSplit group_split, 299 | uint8_t index, 300 | UnoShare *out); 301 | 302 | /** 303 | * 304 | * Convert a mnemonic string of 33 space separated words to an internal share 305 | * representation. 306 | * 307 | */ 308 | int uno_get_s39_share_from_mnemonic(const char *ptr, UnoShare *out); 309 | 310 | /** 311 | * 312 | * Free a previously allocated share returned by `uno_get_s39_share_by_index` 313 | * or `uno_get_s39_share_from_mnemonic`. 314 | * 315 | */ 316 | void uno_free_s39_share(UnoShare share); 317 | 318 | /** 319 | * 320 | * Get the share metadata from an UnoShare. 321 | * 322 | */ 323 | int uno_get_s39_share_metadata(UnoShare share, UnoShareMetadata *out); 324 | 325 | /** 326 | * 327 | * Free a previously allocated ShareMetadata returned by 328 | * `uno_get_s39_share_metadata`. 329 | * 330 | */ 331 | void uno_free_s39_share_metadata(UnoShareMetadata metadata); 332 | 333 | /** 334 | * 335 | * See s39::combine. 336 | * 337 | * Provided an array of c-stirng s39 shamir's shares, recombine and recover 338 | * the original UnoId. The returned UnoId must be freed using `uno_free_id`. 339 | * 340 | */ 341 | int uno_s39_combine(const char *const *share_nmemonics, 342 | size_t total_shares, 343 | const UnoId **out); 344 | 345 | #endif /* uno_ffi_h */ 346 | -------------------------------------------------------------------------------- /lib/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "uno" 3 | version = "0.1.0" 4 | authors = ["David Cowden "] 5 | license = "AGPL-3.0-only" 6 | edition = "2021" 7 | 8 | [dependencies] 9 | adi = { path = "../adi" } 10 | djb = { path = "../djb" } 11 | s39 = { path = "../s39" } 12 | 13 | argon2 = "0.1" 14 | base64 = "0.13" 15 | blake3 = "0.3" 16 | chrono = { version = "0.4", features = ["serde"] } 17 | rand = "0.8" 18 | serde = { version = "1.0", features = ["derive"] } 19 | strum = "0.21" 20 | strum_macros = "0.21" 21 | -------------------------------------------------------------------------------- /lib/README.md: -------------------------------------------------------------------------------- 1 | libuno 2 | === 3 | 4 | A library for working with uno identities. 5 | 6 | For now, consult [src/lib.rs][]. 7 | 8 | -------------------------------------------------------------------------------- /lib/src/error.rs: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright (C) 2021 WithUno, Inc. 3 | // All rights reserved. 4 | // 5 | // SPDX-License-Identifier: AGPL-3.0-only 6 | // 7 | 8 | use std::array; 9 | use std::error; 10 | use std::fmt; 11 | use std::string::FromUtf8Error; 12 | use std::string::String; 13 | 14 | #[derive(Debug)] 15 | pub enum Error 16 | { 17 | /// Error at the uno library level 18 | Uno(String), 19 | /// Underlying crypto error from djb 20 | Curve25519(djb::Error), 21 | /// Shamir error from adi 22 | Shamir(adi::Error), 23 | /// SLIP-0039 Error 24 | S39(s39::Error), 25 | /// Error from `argon2` hash lib 26 | Hash(String), 27 | } 28 | 29 | impl From for Error 30 | { 31 | fn from(e: adi::Error) -> Self { Error::Shamir(e) } 32 | } 33 | 34 | impl From for Error 35 | { 36 | fn from(e: djb::Error) -> Self { Error::Curve25519(e) } 37 | } 38 | 39 | impl From for Error 40 | { 41 | fn from(e: s39::Error) -> Self { Error::S39(e) } 42 | } 43 | 44 | impl From for Error 45 | { 46 | fn from(e: argon2::Error) -> Self { Error::Hash(format!("argon2 - {}", e)) } 47 | } 48 | 49 | impl From for Error 50 | { 51 | fn from(e: array::TryFromSliceError) -> Self 52 | { 53 | Error::Uno(format!("converting slice to uno id failed: {}", e)) 54 | } 55 | } 56 | 57 | impl From for Error 58 | { 59 | fn from(e: FromUtf8Error) -> Self 60 | { 61 | Error::Uno(format!("invalid utf8: {}", e)) 62 | } 63 | } 64 | 65 | impl error::Error for Error 66 | { 67 | fn source(&self) -> Option<&(dyn error::Error + 'static)> 68 | { 69 | match *self { 70 | Error::Uno(_) => None, 71 | Error::Curve25519(ref s) => Some(s), 72 | Error::Hash(_) => None, 73 | Error::Shamir(ref s) => Some(s), 74 | Error::S39(ref s) => Some(s), 75 | } 76 | } 77 | } 78 | 79 | impl fmt::Display for Error 80 | { 81 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result 82 | { 83 | match *self { 84 | Error::Uno(ref msg) => write!(f, "{}", msg), 85 | Error::Curve25519(ref s) => write!(f, "curve25519 - {}", s), 86 | Error::Hash(ref s) => write!(f, "argon2 - {}", s), 87 | Error::Shamir(ref s) => write!(f, "shamir - {}", s), 88 | Error::S39(ref s) => write!(f, "slip - {}", s), 89 | } 90 | } 91 | } 92 | -------------------------------------------------------------------------------- /lib/src/lib.rs: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright (C) 2021 WithUno, Inc. 3 | // All rights reserved. 4 | // 5 | // SPDX-License-Identifier: AGPL-3.0-only 6 | // 7 | 8 | mod error; 9 | pub use error::Error; 10 | 11 | /// The uno identity is 32 bytes of entropy. 12 | pub const ID_LENGTH: usize = 32; 13 | 14 | /// And uno identity newtype. 15 | #[derive(Debug, Copy, Clone)] 16 | pub struct Id(pub [u8; ID_LENGTH]); 17 | 18 | impl Id 19 | { 20 | /// Generate a new uno ID. 21 | pub fn new() -> Self 22 | { 23 | let mut seed = [0u8; ID_LENGTH]; 24 | use rand::RngCore; 25 | rand::thread_rng().fill_bytes(&mut seed); 26 | Id(seed) 27 | } 28 | } 29 | 30 | use chrono::{DateTime, Utc}; 31 | use serde::{Deserialize, Serialize}; 32 | 33 | #[derive(Serialize, Deserialize)] 34 | pub struct MagicShare 35 | { 36 | pub id: String, 37 | pub expires_at: DateTime, 38 | pub schema_version: u64, 39 | pub encrypted_credential: String, 40 | } 41 | 42 | #[derive(Serialize, Deserialize, Debug)] 43 | pub struct VerifiedToken 44 | { 45 | pub email: String, 46 | pub analytics_id: String, // given by customer.io for now... 47 | } 48 | 49 | impl VerifiedToken 50 | { 51 | pub fn new(email: S, analytics_id: T) -> VerifiedToken 52 | where 53 | S: AsRef, 54 | T: AsRef, 55 | { 56 | Self { 57 | email: email.as_ref().into(), 58 | analytics_id: analytics_id.as_ref().into(), 59 | } 60 | } 61 | } 62 | 63 | #[derive(Serialize, Deserialize, Debug)] 64 | pub struct UnverifiedToken 65 | { 66 | pub email: String, 67 | pub analytics_id: String, // given by customer.io for now... 68 | pub secret: String, // Mu, regular base64 encoded with padding 69 | pub expires_at: DateTime, 70 | } 71 | 72 | impl UnverifiedToken 73 | { 74 | pub fn new( 75 | email: S, 76 | analytics_id: T, 77 | secret: U, 78 | expires_at: DateTime, 79 | ) -> UnverifiedToken 80 | where 81 | S: AsRef, 82 | T: AsRef, 83 | U: AsRef, 84 | { 85 | Self { 86 | email: email.as_ref().into(), 87 | analytics_id: analytics_id.as_ref().into(), 88 | secret: secret.as_ref().into(), 89 | expires_at, 90 | } 91 | } 92 | } 93 | 94 | use std::convert::TryFrom; 95 | use std::str; 96 | 97 | /// A group share is the result of running split on an uno id. 98 | /// You need a threshold number (currently 1) of reconstructed groups in order 99 | /// to be able to reconstruct the original uno id. 100 | pub use s39::GroupShare; 101 | /// A share is the individual element in a group share. Shares in a group are 102 | /// combined to reconstruct the group share. Then group shares are combined to 103 | /// reconstruct the original ID. 104 | pub use s39::Share; 105 | 106 | /// Build an uno identity from a byte slice. 107 | impl TryFrom<&[u8]> for Id 108 | { 109 | type Error = std::array::TryFromSliceError; 110 | 111 | fn try_from(bytes: &[u8]) -> Result 112 | { 113 | let array = <[u8; ID_LENGTH]>::try_from(bytes)?; 114 | Ok(Id(array)) 115 | } 116 | } 117 | 118 | pub type KeyPair = djb::KeyPair; 119 | pub type PublicKey = djb::PublicKey; 120 | pub use djb::Signature; 121 | pub use djb::Signer; 122 | pub use djb::Verifier; 123 | 124 | pub const PRIVATE_KEY_LENGTH: usize = djb::PRIVATE_KEY_LENGTH; 125 | pub const PUBLIC_KEY_LENGTH: usize = djb::PUBLIC_KEY_LENGTH; 126 | pub const KEYPAIR_LENGTH: usize = djb::KEYPAIR_LENGTH; 127 | pub const SIGNATURE_LENGTH: usize = djb::SIGNATURE_LENGTH; 128 | 129 | pub type SymmetricKey = djb::SymmetricKey; 130 | 131 | use strum_macros::Display; 132 | use strum_macros::EnumString; 133 | use strum_macros::IntoStaticStr; 134 | 135 | /// Keys are derived from Uno IDs depending on their usage. This corresponds 136 | /// to the context passed to the key derivation function. 137 | #[derive(IntoStaticStr)] 138 | enum Usage 139 | { 140 | #[strum(to_string = "uno seed identity signing keypair")] 141 | Signature, 142 | #[strum(to_string = "uno seed private symmetric encryption key")] 143 | Encryption, 144 | } 145 | 146 | /// Convert an uno Id into its public/private keypair representation. 147 | impl From for KeyPair 148 | { 149 | fn from(id: Id) -> Self { KeyPair::from(&id) } 150 | } 151 | 152 | /// Convert an uno ID into its symmetric encryption secret. 153 | impl From for SymmetricKey 154 | { 155 | fn from(id: Id) -> Self { SymmetricKey::from(&id) } 156 | } 157 | 158 | /// Convert an uno Id into its public/private keypair representation. 159 | impl From<&Id> for KeyPair 160 | { 161 | fn from(id: &Id) -> Self 162 | { 163 | let ctx: &'static str = Usage::Signature.into(); 164 | let mut secret = [0u8; djb::PRIVATE_KEY_LENGTH]; 165 | blake3::derive_key(ctx, &id.0, &mut secret); 166 | // This only panics if we use the wrong keys size, and we use the right 167 | // one so there's no point in propagating the error. 168 | djb::KeyPair::from_bytes(&secret) 169 | } 170 | } 171 | 172 | /// Convert an uno ID into its symmetric encryption secret. 173 | impl From<&Id> for SymmetricKey 174 | { 175 | fn from(id: &Id) -> Self 176 | { 177 | let ctx: &'static str = Usage::Encryption.into(); 178 | let mut key = SymmetricKey::default(); 179 | blake3::derive_key(ctx, &id.0, key.as_mut_slice()); 180 | key 181 | } 182 | } 183 | 184 | /// Split an uno ID into shards using the SLIP-0039 shamir's protocol. 185 | /// The scheme parameter is a list of tuples (t, n) like [(3, 5)] which means, 186 | /// "one group of five with a share threshold of 3". The threshold is the 187 | /// minimum number of shares needed to reconstitute the identity. 188 | pub fn split(id: Id, scheme: &[(u8, u8)]) -> Result, Error> 189 | { 190 | let shares = s39::split(&id.0, scheme)?; 191 | Ok(shares) 192 | } 193 | 194 | /// Combine shards back into the original uno id. 195 | pub fn combine(shares: &[Vec]) -> Result 196 | { 197 | let bytes = s39::combine(shares)?; 198 | let id = Id::try_from(&bytes[..])?; 199 | Ok(id) 200 | } 201 | 202 | /// The Mu (μ) represents seed entropy for short-lived shamirs sessions. While 203 | /// our Id seed is 32 bytes, the Mu is only 10 bytes. Ecoji encodes 80 bits as 204 | /// 8 unicode emoji with no padding. 205 | pub struct Mu(pub [u8; MU_LENGTH]); 206 | 207 | pub const MU_LENGTH: usize = 10; 208 | 209 | impl Mu 210 | { 211 | /// Generate new uno Mu entropy. 212 | pub fn new() -> Self 213 | { 214 | let mut seed = [0u8; MU_LENGTH]; 215 | use rand::RngCore; 216 | rand::thread_rng().fill_bytes(&mut seed); 217 | Mu(seed) 218 | } 219 | } 220 | 221 | /// Convert an uno ID into its symmetric encryption secret. 222 | impl From for SymmetricKey 223 | { 224 | fn from(mu: Mu) -> Self { SymmetricKey::from(&mu) } 225 | } 226 | 227 | impl TryFrom<&[u8]> for Mu 228 | { 229 | type Error = std::array::TryFromSliceError; 230 | 231 | fn try_from(bytes: &[u8]) -> Result 232 | { 233 | let array = <[u8; MU_LENGTH]>::try_from(bytes)?; 234 | Ok(Mu(array)) 235 | } 236 | } 237 | 238 | /// Convert an uno Mu into its symmetric encryption secret. 239 | impl From<&Mu> for SymmetricKey 240 | { 241 | fn from(mu: &Mu) -> Self 242 | { 243 | let ctx = "uno recovery share secret"; 244 | let mut key = SymmetricKey::default(); 245 | blake3::derive_key(ctx, &mu.0, key.as_mut_slice()); 246 | key 247 | } 248 | } 249 | 250 | /// A Session is "public" bits derived from Mu entropy for keying ephemeral 251 | /// shamir's sessions on the server. 252 | pub struct Session(pub [u8; 32]); 253 | 254 | impl TryFrom for Session 255 | { 256 | type Error = Error; 257 | 258 | fn try_from(mu: Mu) -> Result { Session::try_from(&mu) } 259 | } 260 | 261 | impl TryFrom<&Mu> for Session 262 | { 263 | type Error = Error; 264 | 265 | fn try_from(mu: &Mu) -> Result 266 | { 267 | let salt = b"uno shamir secret share session id"; 268 | 269 | use argon2::{Algorithm, Argon2, Version}; 270 | 271 | #[cfg(not(test))] 272 | // let ctx = Argon2::new(None, 512, 4096, 16, Version::V0x13)?; 273 | let ctx = Argon2::new(None, 16, 65536, 16, Version::V0x13)?; 274 | #[cfg(test)] 275 | let ctx = Argon2::new(None, 3, 4096, 1, Version::V0x13)?; 276 | 277 | let mut out = [0u8; 32]; 278 | let _ = ctx.hash_password_into( 279 | Algorithm::Argon2d, 280 | &mu.0, 281 | salt, 282 | b"", 283 | &mut out, 284 | )?; 285 | 286 | Ok(Session(out)) 287 | } 288 | } 289 | 290 | /// The additional data associated with an encrypt/decrypt (aead) operation. 291 | #[derive(Copy, Clone, Debug, Display, EnumString)] 292 | pub enum Binding<'a> 293 | { 294 | /// Vault data 295 | #[strum(serialize = "vault")] 296 | Vault, 297 | /// Shamir's Scret Sharing Session split 298 | #[strum(serialize = "split")] 299 | Split, 300 | /// Shamir's Secret Sharing Session combine 301 | #[strum(serialize = "combine")] 302 | Combine, 303 | /// A 1 of 1 "split" for bootstrapping the web extension or another app 304 | #[strum(serialize = "transfer")] 305 | Transfer, 306 | /// A Magic Share payload 307 | #[strum(serialize = "share")] 308 | MagicShare, 309 | /// User-specified additional data 310 | #[strum(disabled)] 311 | Custom(&'a str), 312 | /// Empty additional data 313 | #[strum(serialize = "none")] 314 | None, 315 | } 316 | 317 | impl<'a> Binding<'a> 318 | { 319 | pub fn context(self) -> &'a str 320 | { 321 | match self { 322 | Binding::Vault => "authentication vault", 323 | Binding::Split => "uno ssss split", 324 | Binding::Combine => "uno share combine", 325 | Binding::Transfer => "uno ssss transfer", 326 | Binding::MagicShare => "uno magic share", 327 | Binding::Custom(s) => s, 328 | Binding::None => "", 329 | } 330 | } 331 | } 332 | 333 | pub fn encrypt( 334 | usage: Binding, 335 | key: SymmetricKey, 336 | data: &[u8], 337 | ) -> Result, Error> 338 | { 339 | let ctx = usage.context(); 340 | Ok(djb::encrypt(key, data, ctx.as_bytes())?) 341 | } 342 | 343 | pub fn decrypt( 344 | usage: Binding, 345 | key: SymmetricKey, 346 | data: &[u8], 347 | ) -> Result, Error> 348 | { 349 | let ctx = usage.context(); 350 | Ok(djb::decrypt(key, data, ctx.as_bytes())?) 351 | } 352 | 353 | pub fn prove_blake3_work(challenge: &[u8], cost: u8) -> Option 354 | { 355 | let maxn: u32 = u32::MAX - 1; 356 | let mut n: u32 = 0; 357 | while n < maxn { 358 | if verify_blake3_work(challenge, n, cost) { 359 | return Some(n); 360 | } 361 | 362 | n += 1; 363 | } 364 | 365 | None 366 | } 367 | 368 | pub fn verify_blake3_work(challenge: &[u8], proof: u32, cost: u8) -> bool 369 | { 370 | let mut hash = blake3::Hasher::new(); 371 | hash.update(&challenge); 372 | hash.update(&proof.to_le_bytes()); 373 | 374 | let digest = hash.finalize().as_bytes().to_vec(); 375 | 376 | let u32value = 377 | u32::from_be_bytes([digest[0], digest[1], digest[2], digest[3]]); 378 | if (u32value >> (32 - cost)) == 0 { 379 | return true; 380 | } 381 | 382 | false 383 | } 384 | 385 | #[cfg(test)] 386 | mod unit 387 | { 388 | use super::*; 389 | 390 | #[test] 391 | fn keypair_from_id() -> Result<(), Box> 392 | { 393 | let bytes64 = "JAqq6Fa/tHQD2LRtyn5B/RgX0FzKpjikcgDPi5Rgxbo"; 394 | let bytes = base64::decode(bytes64)?; 395 | let id = Id::try_from(&*bytes)?; 396 | let actual = KeyPair::from(&id); 397 | let expected64 = "18ORHYIJBf48uXH9tj3uSx/0/hK1EtIxB6aY/\ 398 | fedPHYdQFZwBfUaRtU33C/w7eeqC0G+vHbLq/nmFFZay2/8Vg=="; 399 | 400 | let expected = base64::decode(expected64)?; 401 | assert_eq!(expected, actual.to_bytes()); 402 | 403 | Ok(()) 404 | } 405 | 406 | #[test] 407 | fn encryption_from_id() -> Result<(), Box> 408 | { 409 | let bytes64 = "JAqq6Fa/tHQD2LRtyn5B/RgX0FzKpjikcgDPi5Rgxbo"; 410 | let bytes = base64::decode(bytes64)?; 411 | let id = Id::try_from(&*bytes)?; 412 | let actual = SymmetricKey::from(&id); 413 | let expected64 = "DrEDTahFReS8G+dCGz5GjUnG+idrEWZbOXfsgo7ZGFc="; 414 | let expected = base64::decode(expected64)?; 415 | 416 | assert_eq!(expected, actual.as_slice()); 417 | 418 | Ok(()) 419 | } 420 | 421 | #[test] 422 | fn session_from_mu() -> Result<(), Box> 423 | { 424 | let bytes64 = "zrzOvM68zrzOvA"; // "μμμμμ".as_bytes(); 425 | let bytes = base64::decode(bytes64)?; 426 | let mu = Mu::try_from(&*bytes)?; 427 | let actual = Session::try_from(mu)?; 428 | // #[cfg(not(test))] 429 | // let expected64 = "/OyfB68hodit2UYqBp/9nMY1qukjNhEMH401e/r7D78"; 430 | // #[cfg(test)] 431 | let expected64 = "rFM2e4J8LBPhFZ2AeyK70/wkfiomaiVh8+Ktya+XNdg"; 432 | let expected = base64::decode(expected64)?; 433 | dbg!(base64::encode(&actual.0)); 434 | assert_eq!(expected, actual.0); 435 | 436 | Ok(()) 437 | } 438 | 439 | #[test] 440 | fn encryption_from_mu() -> Result<(), Box> 441 | { 442 | let bytes64 = "zrzOvM68zrzOvA"; // "μμμμμ".as_bytes(); 443 | let bytes = base64::decode(bytes64)?; 444 | let mu = Mu::try_from(&*bytes)?; 445 | let actual = SymmetricKey::from(&mu); 446 | let expected = vec![ 447 | 231, 65, 139, 157, 21, 173, 103, 71, 3, 93, 33, 90, 217, 249, 187, 448 | 37, 4, 1, 111, 216, 84, 125, 27, 119, 71, 92, 3, 52, 10, 37, 70, 449 | 116, 450 | ]; 451 | assert_eq!(expected, actual.as_slice()); 452 | 453 | Ok(()) 454 | } 455 | 456 | #[test] 457 | fn blake3_proof() 458 | { 459 | let random_bytes = b"12345678901234567890123456789012"; 460 | let cost = 2; // make it easy 461 | 462 | let proof = prove_blake3_work(random_bytes, cost); 463 | assert!(proof.is_some()); 464 | 465 | assert!(verify_blake3_work(random_bytes, proof.unwrap(), cost)); 466 | // some other proof will fail 467 | assert!(!verify_blake3_work(random_bytes, 0, cost)); 468 | } 469 | } 470 | -------------------------------------------------------------------------------- /rust-toolchain.toml: -------------------------------------------------------------------------------- 1 | [toolchain] 2 | channel = "nightly" 3 | 4 | -------------------------------------------------------------------------------- /rustfmt.toml: -------------------------------------------------------------------------------- 1 | binop_separator = "Front" 2 | blank_lines_lower_bound = 0 3 | blank_lines_upper_bound = 2 4 | brace_style = "AlwaysNextLine" 5 | control_brace_style = "AlwaysSameLine" 6 | edition = "2021" 7 | enum_discrim_align_threshold = 0 8 | fn_params_layout = "Tall" 9 | fn_single_line = true 10 | format_strings = true 11 | group_imports = "Preserve" 12 | imports_indent = "Block" 13 | imports_layout = "Mixed" 14 | imports_granularity = "Preserve" 15 | indent_style = "Block" 16 | match_block_trailing_comma = true 17 | max_width = 80 18 | merge_derives = true 19 | normalize_comments = false 20 | normalize_doc_attributes = true 21 | overflow_delimited_expr = true 22 | reorder_imports = true 23 | reorder_modules = true 24 | space_after_colon = true 25 | space_before_colon = false 26 | spaces_around_ranges = false 27 | struct_field_align_threshold = 0 28 | tab_spaces = 4 29 | trailing_comma = "Vertical" 30 | trailing_semicolon = true 31 | unstable_features = true 32 | use_small_heuristics = "Max" 33 | version = "Two" 34 | wrap_comments = false 35 | -------------------------------------------------------------------------------- /s39/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "s39" 3 | version = "0.1.0" 4 | authors = ["dcow "] 5 | license = "AGPL-3.0-only" 6 | edition = "2021" 7 | 8 | [dependencies] 9 | anyhow = "1.0" 10 | failure = "0.1" 11 | rand = "0.8" 12 | 13 | sssmc39 = { git = "https://github.com/withuno/rust-sssmc39.git", features = ["rust_crypto_pbkdf2"], default-features = false } 14 | -------------------------------------------------------------------------------- /s39/README.md: -------------------------------------------------------------------------------- 1 | s39 2 | === 3 | 4 | A wrapper around sssmc39 slip-39 (shamir's secret sharing with metadata) implementation. 5 | -------------------------------------------------------------------------------- /s39/src/error.rs: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright (C) 2021 WithUno, Inc. 3 | // All rights reserved. 4 | // 5 | // SPDX-License-Identifier: AGPL-3.0-only 6 | // 7 | 8 | use failure::Fail; 9 | use std::error; 10 | use std::fmt; 11 | 12 | #[derive(Debug)] 13 | pub enum Error 14 | { 15 | Underlying(failure::Compat), 16 | } 17 | 18 | impl error::Error for Error 19 | { 20 | fn source(&self) -> Option<&(dyn error::Error + 'static)> 21 | { 22 | match *self { 23 | Error::Underlying(ref e) => Some(e), 24 | } 25 | } 26 | } 27 | 28 | impl fmt::Display for Error 29 | { 30 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result 31 | { 32 | match *self { 33 | Error::Underlying(ref e) => { 34 | write!(f, "invalid argument: {}", e.get_ref()) 35 | }, 36 | } 37 | } 38 | } 39 | 40 | impl From for Error 41 | { 42 | fn from(e: sssmc39::Error) -> Error { Error::Underlying(e.kind().compat()) } 43 | } 44 | -------------------------------------------------------------------------------- /s39/src/lib.rs: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright (C) 2021 WithUno, Inc. 3 | // All rights reserved. 4 | // 5 | // SPDX-License-Identifier: AGPL-3.0-only 6 | // 7 | 8 | mod error; 9 | pub use error::Error; 10 | 11 | pub use sssmc39::GroupShare; 12 | pub use sssmc39::Share; 13 | 14 | const PASS: &str = "uno shamir secret share"; 15 | 16 | /// Using scheme (t, n), split `data` into `n` shares such that `t` can be re- 17 | /// combined into the original bytes. Multiple schemes can be passed in the 18 | /// array in which case groups of shares will be constructed such that the 19 | /// scheme is enforced for each group. For example: split with scheme `[(1,1), 20 | /// (2,3), (3,5)]` would result in 3 groups of shares with the first group 21 | /// being a single share, the second group requireing two of three shares, and 22 | /// the third group requiring three of five shares. 23 | /// 24 | /// The length of `data` must be at least 16 bytes (128 bits) and be a multiple 25 | /// of 16 bits. The maximum number of groups (number of tuples in the scheme 26 | /// array), cannot exceed 16. In a given group tuple (t, n), `t` must not 27 | /// exceed `n`. If `t` equals 1, then `n` must be 1. 28 | /// 29 | /// Group, and scheme information as well as the iteration exponent is encoded 30 | /// in each share so that shares can be recombined without additional context. 31 | /// 32 | pub fn split<'a>( 33 | data: &[u8], 34 | scheme: &[(u8, u8)], 35 | ) -> Result, Error> 36 | { 37 | // We encrypt with a fixed password and a mere 10,000 iterations of pbkdf. 38 | // The security of each share is managed by our software eslewhere. Each 39 | // share is encrypted when in transit and at rest in a user's vault. The 40 | // encryption component of slip39 is not applicable in our use case although 41 | // it's something we could consider supporting in the future if we develop 42 | // a compelling UX that incorporates it. 43 | let groups = sssmc39::generate_mnemonics(1, scheme, data, PASS, 0)?; 44 | Ok(groups) 45 | } 46 | 47 | /// Combine shares from a previous split operation. An error is returned if the 48 | /// provided shares are not able to satisfy group threshold requirements, or if 49 | /// the digest does not match after recombination. 50 | /// 51 | pub fn combine<'a>(shares: &[Vec]) -> Result, Error> 52 | { 53 | let data = sssmc39::combine_mnemonics(shares, PASS)?; 54 | Ok(data) 55 | } 56 | 57 | #[cfg(test)] 58 | mod unit 59 | { 60 | use super::*; 61 | 62 | use anyhow::Result; 63 | use rand::RngCore; 64 | 65 | #[test] 66 | pub fn s39_roundtrip() -> Result<()> 67 | { 68 | let mut data = [0u8; 32]; 69 | rand::thread_rng().fill_bytes(&mut data); 70 | 71 | let groups = split(&data, &[(2, 3)])?; 72 | let group = &groups[0]; 73 | 74 | let mnemonics1 = group.member_shares[..2] 75 | .iter() 76 | .map(|s| s.to_mnemonic()) 77 | .collect::>, _>>() 78 | .map_err(|e| Error::from(e))?; 79 | let mnemonics2 = group.member_shares[1..3] 80 | .iter() 81 | .map(|s| s.to_mnemonic()) 82 | .collect::>, _>>() 83 | .map_err(|e| Error::from(e))?; 84 | 85 | let r1 = combine(&mnemonics1[..])?; 86 | let r2 = combine(&mnemonics2[..])?; 87 | 88 | assert_eq!(data, &r1[..]); 89 | assert_eq!(data, &r2[..]); 90 | 91 | Ok(()) 92 | } 93 | } 94 | -------------------------------------------------------------------------------- /wsm/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "wsm" 3 | version = "0.1.0" 4 | authors = ["Chris Eberly "] 5 | license = "AGPL-3.0-only" 6 | edition = "2021" 7 | 8 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 9 | [lib] 10 | crate-type = ['cdylib', 'rlib'] 11 | 12 | [dev-dependencies] 13 | wasm-bindgen-test = "0.3" 14 | rand = "0.8.5" 15 | 16 | [dependencies] 17 | # this is required for a random source in a wasm environment 18 | # https://docs.rs/getrandom/0.2.3/getrandom/#webassembly-support 19 | getrandom = { version = "0.2.3", features = ["js"] } 20 | 21 | argon2 = "0.3" 22 | base64 = "0.13" 23 | blake3 = "1.0" 24 | uno = { path = "../lib" } 25 | wasm-bindgen = "0.2" 26 | -------------------------------------------------------------------------------- /wsm/README.md: -------------------------------------------------------------------------------- 1 | to test 2 | 3 | `wasm-pack test --node` 4 | 5 | to build without a bundler... 6 | 7 | `wasm-pack build --target web` 8 | -------------------------------------------------------------------------------- /wsm/src/lib.rs: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright (C) 2021 WithUno, Inc. 3 | // All rights reserved. 4 | // 5 | // SPDX-License-Identifier: AGPL-3.0-only 6 | // 7 | 8 | use std::convert::TryFrom; 9 | use std::fmt::Debug; 10 | 11 | use argon2::{Algorithm, Argon2, Params, Version}; 12 | 13 | use wasm_bindgen::prelude::*; 14 | 15 | use uno::prove_blake3_work; 16 | use uno::Signer; 17 | 18 | #[derive(Debug)] 19 | pub enum Error 20 | { 21 | Fatal(String), 22 | } 23 | 24 | impl From for Error 25 | { 26 | fn from(e: argon2::Error) -> Self { Error::Fatal(e.to_string()) } 27 | } 28 | 29 | impl From for Error 30 | { 31 | fn from(e: base64::DecodeError) -> Self { Error::Fatal(e.to_string()) } 32 | } 33 | 34 | impl From for Error 35 | { 36 | fn from(e: uno::Error) -> Self { Error::Fatal(e.to_string()) } 37 | } 38 | 39 | fn argon_hash( 40 | t_cost: u32, 41 | m_cost: u32, 42 | p: u32, 43 | salt: &[u8], 44 | body: &[u8], 45 | ) -> Result, Error> 46 | { 47 | let params = Params::new(m_cost, t_cost, p, Some(32))?; 48 | let argon2 = Argon2::new(Algorithm::Argon2d, Version::V0x13, params); 49 | 50 | let mut out = [0u8; 32]; 51 | argon2.hash_password_into(body, salt, &mut out)?; 52 | 53 | Ok(out.to_vec()) 54 | } 55 | 56 | #[wasm_bindgen] 57 | pub fn wasm_sign_message(seed: String, message: String) -> Option 58 | { 59 | let decoded_seed = match base64::decode(seed) { 60 | Ok(v) => v, 61 | Err(_) => return None, 62 | }; 63 | 64 | let id = match uno::Id::try_from(&decoded_seed[..]) { 65 | Ok(v) => v, 66 | Err(_) => return None, 67 | }; 68 | 69 | let keypair = match uno::KeyPair::try_from(id) { 70 | Ok(v) => v, 71 | Err(_) => return None, 72 | }; 73 | 74 | Some(base64::encode(keypair.sign(message.as_bytes()))) 75 | } 76 | 77 | #[wasm_bindgen] 78 | pub fn wasm_auth_header( 79 | nonce: String, 80 | method: String, 81 | resource: String, 82 | salt: &[u8], 83 | body: &[u8], 84 | argon_m: u32, 85 | argon_t: u32, 86 | argon_p: u32, 87 | ) -> Option 88 | { 89 | let body_hash = blake3::hash(body); 90 | let body_enc = 91 | base64::encode_config(body_hash.as_bytes(), base64::STANDARD_NO_PAD); 92 | 93 | let string_to_argon = 94 | format!("{}:{}:{}:{}", nonce, method, resource, body_enc); 95 | 96 | match argon_hash( 97 | argon_t, 98 | argon_m, 99 | argon_p, 100 | salt, 101 | &string_to_argon.as_bytes(), 102 | ) { 103 | Ok(out) => { 104 | let hash = base64::encode_config(out, base64::STANDARD_NO_PAD); 105 | let salthash = base64::encode_config(salt, base64::STANDARD_NO_PAD); 106 | Some(format!("{}${}", salthash, hash)) 107 | }, 108 | Err(_) => None, 109 | } 110 | } 111 | 112 | #[wasm_bindgen] 113 | pub fn wasm_async_auth_header( 114 | nonce: String, 115 | method: String, 116 | resource: String, 117 | cost: u8, 118 | body: &[u8], 119 | ) -> Option 120 | { 121 | let body_hash = blake3::hash(body); 122 | let body_enc = 123 | base64::encode_config(body_hash.as_bytes(), base64::STANDARD_NO_PAD); 124 | 125 | let challenge = format!("{}:{}:{}:{}", nonce, method, resource, body_enc); 126 | 127 | match prove_blake3_work(&challenge.as_bytes(), cost) { 128 | Some(n) => Some(format!("blake3${}${}", n, nonce,)), 129 | None => None, 130 | } 131 | } 132 | 133 | pub fn share_seed(seed_to_share: &[u8]) -> Result 134 | { 135 | let id_to_share = match uno::Id::try_from(seed_to_share) { 136 | Ok(v) => v, 137 | Err(e) => return Err(Error::Fatal(e.to_string())), 138 | }; 139 | 140 | let split = match uno::split(id_to_share, &[(1, 1)]) { 141 | Ok(v) => v, 142 | Err(e) => return Err(Error::Fatal(e.to_string())), 143 | }; 144 | 145 | let group = &split[0]; 146 | let share = &group.member_shares[0]; 147 | 148 | let mnemonic = match share.to_mnemonic() { 149 | Ok(v) => v, 150 | Err(e) => return Err(Error::Fatal(e.to_string())), 151 | }; 152 | 153 | Ok(mnemonic.join(" ")) 154 | } 155 | 156 | #[wasm_bindgen] 157 | pub fn wasm_share_seed(seed_to_share: String) -> Option 158 | { 159 | let decoded_seed_to_share = match base64::decode(seed_to_share) { 160 | Ok(v) => v, 161 | Err(_) => return None, 162 | }; 163 | 164 | match share_seed(&decoded_seed_to_share) { 165 | Ok(v) => Some(v), 166 | Err(_) => return None, 167 | } 168 | } 169 | 170 | pub fn decrypt_share( 171 | share: &[u8], 172 | seed: &[u8], 173 | ) -> Result<[u8; uno::ID_LENGTH], Error> 174 | { 175 | let id = match uno::Mu::try_from(&seed[..]) { 176 | Ok(v) => v, 177 | Err(e) => return Err(Error::Fatal(e.to_string())), 178 | }; 179 | 180 | let key = uno::SymmetricKey::from(&id); 181 | let ctx = uno::Binding::Combine; 182 | 183 | let decrypted_share = match uno::decrypt(ctx, key, &share) { 184 | Ok(v) => v, 185 | Err(e) => return Err(Error::Fatal(e.to_string())), 186 | }; 187 | 188 | let string_share = match String::from_utf8(decrypted_share) { 189 | Ok(v) => v, 190 | Err(e) => return Err(Error::Fatal(e.to_string())), 191 | }; 192 | 193 | share_from_mnemonic(&string_share) 194 | } 195 | 196 | #[wasm_bindgen] 197 | pub fn wasm_decrypt_share(share: String, seed: String) -> Option 198 | { 199 | let decoded_share = match base64::decode(share) { 200 | Ok(v) => v, 201 | Err(_) => return None, 202 | }; 203 | 204 | let decoded_seed = match base64::decode(seed) { 205 | Ok(v) => v, 206 | Err(_) => return None, 207 | }; 208 | 209 | match decrypt_share(&decoded_share, &decoded_seed) { 210 | Ok(v) => Some(base64::encode(v)), 211 | Err(_) => None, 212 | } 213 | } 214 | 215 | pub fn share_from_mnemonic(share: &str) -> Result<[u8; uno::ID_LENGTH], Error> 216 | { 217 | let words: Vec = share.split(' ').map(|s| s.to_owned()).collect(); 218 | 219 | let shares = vec![words]; 220 | 221 | match uno::combine(&shares) { 222 | Ok(v) => Ok(v.0), 223 | Err(e) => Err(Error::Fatal(e.to_string())), 224 | } 225 | } 226 | 227 | #[wasm_bindgen] 228 | pub fn wasm_share_from_mnemonic(share: String) -> Option 229 | { 230 | match share_from_mnemonic(&share) { 231 | Ok(v) => Some(base64::encode(v)), 232 | Err(_) => None, 233 | } 234 | } 235 | 236 | #[wasm_bindgen] 237 | pub fn wasm_decrypt_magic_share(share: &[u8], seed: String) -> Option 238 | { 239 | let decoded_seed = 240 | match base64::decode_config(seed, base64::URL_SAFE_NO_PAD) { 241 | Ok(v) => v, 242 | Err(_) => return None, 243 | }; 244 | 245 | 246 | let id = match uno::Id::try_from(&decoded_seed[..]) { 247 | Ok(v) => v, 248 | Err(_) => return None, 249 | }; 250 | 251 | let key = uno::SymmetricKey::from(&id); 252 | let ctx = uno::Binding::MagicShare; 253 | 254 | let decrypted_share = match uno::decrypt(ctx, key, &share) { 255 | Ok(v) => v, 256 | Err(_) => return None, 257 | }; 258 | 259 | match String::from_utf8(decrypted_share) { 260 | Ok(s) => Some(s), 261 | Err(_) => None, 262 | } 263 | } 264 | 265 | #[wasm_bindgen] 266 | pub fn wasm_encrypt_vault(vault: String, seed: String) -> Option> 267 | { 268 | let decoded_seed = match base64::decode(seed) { 269 | Ok(v) => v, 270 | Err(_) => return None, 271 | }; 272 | 273 | let id = match uno::Id::try_from(&decoded_seed[..]) { 274 | Ok(v) => v, 275 | Err(_) => return None, 276 | }; 277 | 278 | let key = uno::SymmetricKey::from(&id); 279 | let ctx = uno::Binding::Vault; 280 | 281 | match uno::encrypt(ctx, key, vault.as_bytes()) { 282 | Ok(v) => Some(v.into_boxed_slice()), 283 | Err(_) => return None, 284 | } 285 | } 286 | 287 | #[wasm_bindgen] 288 | pub fn wasm_decrypt_vault(vault: &[u8], seed: String) -> Option 289 | { 290 | let decoded_seed = match base64::decode(seed) { 291 | Ok(v) => v, 292 | Err(_) => return None, 293 | }; 294 | 295 | let id = match uno::Id::try_from(&decoded_seed[..]) { 296 | Ok(v) => v, 297 | Err(_) => return None, 298 | }; 299 | 300 | let key = uno::SymmetricKey::from(&id); 301 | let ctx = uno::Binding::Vault; 302 | 303 | let decrypted_vault = match uno::decrypt(ctx, key, vault) { 304 | Ok(v) => v, 305 | Err(_) => return None, 306 | }; 307 | 308 | match String::from_utf8(decrypted_vault) { 309 | Ok(s) => Some(s), 310 | Err(_) => None, 311 | } 312 | } 313 | 314 | #[wasm_bindgen] 315 | pub fn wasm_generate_session_id(mu: String) -> Option 316 | { 317 | // assert len(seed) == 32 318 | 319 | let decoded_mu_bytes = match base64::decode(mu) { 320 | Ok(v) => v, 321 | Err(_) => return None, 322 | }; 323 | 324 | let salt = b"uno recovery session id"; 325 | 326 | match argon_hash(32, 256, 2, salt, &decoded_mu_bytes) { 327 | Ok(v) => Some(base64::encode_config(v, base64::URL_SAFE_NO_PAD)), 328 | Err(_) => None, 329 | } 330 | } 331 | 332 | #[wasm_bindgen] 333 | pub fn wasm_get_public_key_url_encoded(seed: String) -> Option 334 | { 335 | match base64::decode_config(seed, base64::URL_SAFE_NO_PAD) { 336 | Ok(v) => wasm_get_public_key(base64::encode(v), true), 337 | Err(_) => return None, 338 | } 339 | } 340 | 341 | #[wasm_bindgen] 342 | pub fn wasm_get_public_key(seed: String, url_encode: bool) -> Option 343 | { 344 | let decoded_seed = match base64::decode(seed) { 345 | Ok(v) => v, 346 | Err(_) => return None, 347 | }; 348 | 349 | let id = match uno::Id::try_from(&decoded_seed[..]) { 350 | Ok(v) => v, 351 | Err(_) => return None, 352 | }; 353 | 354 | match uno::KeyPair::try_from(id) { 355 | Ok(v) => { 356 | if url_encode { 357 | return Some(base64::encode_config( 358 | v.public, 359 | base64::URL_SAFE_NO_PAD, 360 | )); 361 | } 362 | 363 | Some(base64::encode(v.public)) 364 | }, 365 | Err(_) => None, 366 | } 367 | } 368 | 369 | #[wasm_bindgen(getter_with_clone)] 370 | pub struct StringTuple(pub String, pub String); 371 | 372 | use std::str; 373 | 374 | #[wasm_bindgen] 375 | pub fn wasm_verify_params_from_query(query: String) -> Option 376 | { 377 | // Query is a url encoded, no padding string of the following form: 378 | // 379 | // 3 strings concated togther: 380 | // Mu + "::" + PublicKey 381 | // where Mu is the base64 encoded (regular with padding) secret 382 | // PublicKey is the base64 encoded (url encoded without padding) vault id 383 | // 384 | // Each of Mu and PublicKey can be used transparently for the 385 | // PUT call to api/verify_tokens 386 | 387 | match base64::decode_config(&query, base64::URL_SAFE_NO_PAD) { 388 | Ok(q) => match str::from_utf8(&q) { 389 | Ok(sq) => { 390 | let s = sq.split("::").collect::>(); 391 | if s.len() != 2 { 392 | return None; 393 | } 394 | 395 | Some(StringTuple(s[0].to_string(), s[1].to_string())) 396 | }, 397 | Err(_) => None, 398 | }, 399 | Err(_) => None, 400 | } 401 | } 402 | -------------------------------------------------------------------------------- /wsm/tests/wasm.rs: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright (C) 2021 WithUno, Inc. 3 | // All rights reserved. 4 | // 5 | // SPDX-License-Identifier: AGPL-3.0-only 6 | // 7 | 8 | use wasm_bindgen_test::*; 9 | 10 | use wsm::*; 11 | 12 | #[test] 13 | fn test_share_roundtrip() 14 | { 15 | use rand::prelude::*; 16 | let mut rng = rand::thread_rng(); 17 | 18 | let mut seed_to_share: [u8; 32] = [0; 32]; 19 | 20 | for _ in 1..100 { 21 | rng.fill(&mut seed_to_share); 22 | 23 | let share = share_seed(&seed_to_share).unwrap(); 24 | let recovered = share_from_mnemonic(&share).unwrap(); 25 | 26 | assert_eq!(recovered, seed_to_share); 27 | } 28 | } 29 | 30 | #[wasm_bindgen_test] 31 | fn test_encrypt_decrypt_vault() 32 | { 33 | let seed = base64::encode(vec![ 34 | 185, 203, 86, 9, 47, 81, 143, 207, 19, 215, 220, 79, 129, 50, 252, 151, 35 | 18, 101, 187, 123, 90, 83, 228, 37, 202, 54, 46, 236, 245, 152, 160, 36 | 159, 37 | ]); 38 | let seed2 = seed.clone(); 39 | 40 | let vault = String::from("hello, vault"); 41 | let vault2 = vault.clone(); 42 | 43 | let e = wasm_encrypt_vault(vault, seed).unwrap(); 44 | let d = wasm_decrypt_vault(&e, seed2).unwrap(); 45 | 46 | assert_eq!(vault2, *d); 47 | } 48 | 49 | #[wasm_bindgen_test] 50 | fn test_auth_header() 51 | { 52 | //m=65536,t=3,p=8 53 | assert_eq!( 54 | wasm_auth_header( 55 | String::from("1234"), 56 | String::from("GET"), 57 | String::from("/"), 58 | b"salt1234", 59 | b"", 60 | 128, 61 | 1, 62 | 1 63 | ) 64 | .unwrap(), 65 | "c2FsdDEyMzQ$epmTu7qNOCcaCJ5GMStEjN9Xfq0jGm1mbwDFu/E5K4Q" 66 | ); 67 | } 68 | 69 | #[wasm_bindgen_test] 70 | fn test_generate_session_id() 71 | { 72 | let expected = "kgFbVxPUAoaXjGXnyXJUlaKejL8SKaxM_9X0RvBYb44"; 73 | let msg = base64::encode("0123456789"); 74 | 75 | let r = wasm_generate_session_id(msg); 76 | assert_eq!(r.unwrap(), expected) 77 | } 78 | 79 | #[wasm_bindgen_test] 80 | fn test_sign_message() 81 | { 82 | assert_eq!( 83 | wasm_sign_message( 84 | String::from("WdqX7a7/vRDzJUBdoTXituZ7S6GnhYH+i/hrw0puMV8="), 85 | String::from("0123456789") 86 | ) 87 | .unwrap(), 88 | "KX0MAhQIxsKBpj4IvdvQpJdYkaU3gNXELdnPd9UWMaowCmjG2hcN60b5VLwO/\ 89 | cGzzIVQqdzEJniufvAJL3/WCw==", 90 | ); 91 | } 92 | 93 | #[wasm_bindgen_test] 94 | fn test_get_public_key() 95 | { 96 | assert_eq!( 97 | wasm_get_public_key( 98 | String::from("WdqX7a7/vRDzJUBdoTXituZ7S6GnhYH+i/hrw0puMV8="), 99 | true 100 | ) 101 | .unwrap(), 102 | "hQuTnfStbKhU-i4ri9QnMQFrsbHHOm04kHm3fE190aY" 103 | ); 104 | 105 | assert_eq!( 106 | wasm_get_public_key( 107 | String::from("WdqX7a7/vRDzJUBdoTXituZ7S6GnhYH+i/hrw0puMV8="), 108 | false 109 | ) 110 | .unwrap(), 111 | "hQuTnfStbKhU+i4ri9QnMQFrsbHHOm04kHm3fE190aY=" 112 | ); 113 | } 114 | 115 | use uno::{Id, KeyPair, Mu}; 116 | 117 | #[wasm_bindgen_test] 118 | fn test_verify_params_from_query() 119 | { 120 | let id = Id::new(); 121 | let keypair = KeyPair::from(id); 122 | 123 | let mu = Mu::new(); 124 | 125 | let q = format!( 126 | "{}::{}", 127 | base64::encode(&mu.0), 128 | base64::encode_config(keypair.public, base64::URL_SAFE_NO_PAD) 129 | ); 130 | 131 | let StringTuple(one, two) = 132 | wasm_verify_params_from_query(base64::encode(q)).unwrap(); 133 | assert_eq!(one, base64::encode(&mu.0)); 134 | assert_eq!( 135 | two, 136 | base64::encode_config(keypair.public, base64::URL_SAFE_NO_PAD) 137 | ); 138 | } 139 | -------------------------------------------------------------------------------- /xcf/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "xcf" 3 | version = "0.1.0" 4 | authors = ["David Cowden "] 5 | license = "AGPL-3.0-only" 6 | edition = "2021" 7 | 8 | [lib] 9 | name = "unoxcf" 10 | crate-type = ["staticlib"] 11 | 12 | [dependencies] 13 | ffi = { path = "../ffi" } 14 | 15 | [build-dependencies] 16 | cbindgen = "0.20" 17 | -------------------------------------------------------------------------------- /xcf/build.rs: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright (C) 2021 WithUno, Inc. 3 | // All rights reserved. 4 | // 5 | // SPDX-License-Identifier: AGPL-3.0-only 6 | // 7 | 8 | use std::error::Error; 9 | use std::result::Result; 10 | 11 | // 12 | // Generate the C FFI using the cbindgen crate. 13 | // 14 | fn main() -> Result<(), Box> 15 | { 16 | let crate_dir = std::env::var("CARGO_MANIFEST_DIR")?; 17 | let res = cbindgen::generate(crate_dir)?; 18 | res.write_to_file("include/uno.h"); 19 | Ok(()) 20 | } 21 | -------------------------------------------------------------------------------- /xcf/cbindgen.toml: -------------------------------------------------------------------------------- 1 | # This is a template cbindgen.toml file with all of the default values. 2 | # Some values are commented out because their absence is the real default. 3 | # 4 | # See https://github.com/eqrion/cbindgen/blob/master/docs.md#cbindgentoml 5 | # for detailed documentation of every option here. 6 | 7 | language = "C" 8 | 9 | 10 | ############## Options for Wrapping the Contents of the Header ################# 11 | 12 | header = ''' 13 | // 14 | // Copyright (C) 2021 WithUno, Inc. 15 | // All rights reserved. 16 | // 17 | // SPDX-License-Identifier: AGPL-3.0-only 18 | // 19 | ''' 20 | # trailer = "/* Text to put at the end of the generated file */" 21 | include_guard = "uno_ffi_h" 22 | pragma_once = true 23 | autogen_warning = ''' 24 | // 25 | // ⚠️ Warning! 26 | // 27 | // This file is auto-generated by cbindgen. Modifications must be made to the 28 | // source Rust extern "C" interface specified in the ~uno/identity/ffi crate. 29 | // 30 | // Do not manually modify this file. 31 | // 32 | ''' 33 | include_version = false 34 | # namespace = "my_namespace" 35 | namespaces = [] 36 | using_namespaces = [] 37 | sys_includes = [] 38 | includes = [] 39 | no_includes = false 40 | after_includes = "" 41 | 42 | 43 | ############################ Code Style Options ################################ 44 | 45 | braces = "NextLine" 46 | line_length = 80 47 | tab_width = 2 48 | documentation = true 49 | documentation_style = "auto" 50 | line_endings = "LF" # also "CR", "CRLF", "Native" 51 | 52 | 53 | ############################# Codegen Options ################################## 54 | 55 | style = "type" 56 | sort_by = "None" 57 | usize_is_size_t = true 58 | 59 | 60 | [defines] 61 | # "target_os = freebsd" = "DEFINE_FREEBSD" 62 | # "feature = serde" = "DEFINE_SERDE" 63 | 64 | 65 | [export] 66 | include = [] 67 | exclude = [] 68 | # prefix = "CAPI_" 69 | item_types = [] 70 | renaming_overrides_prefixing = false 71 | 72 | 73 | [export.rename] 74 | 75 | 76 | [export.body] 77 | 78 | 79 | [export.mangle] 80 | 81 | 82 | [fn] 83 | rename_args = "None" 84 | # must_use = "MUST_USE_FUNC" 85 | # no_return = "NO_RETURN" 86 | # prefix = "START_FUNC" 87 | # postfix = "END_FUNC" 88 | args = "auto" 89 | sort_by = "None" 90 | 91 | 92 | [struct] 93 | rename_fields = "None" 94 | # must_use = "MUST_USE_STRUCT" 95 | derive_constructor = false 96 | derive_eq = false 97 | derive_neq = false 98 | derive_lt = false 99 | derive_lte = false 100 | derive_gt = false 101 | derive_gte = false 102 | 103 | 104 | [enum] 105 | rename_variants = "None" 106 | # must_use = "MUST_USE_ENUM" 107 | add_sentinel = false 108 | prefix_with_name = false 109 | derive_helper_methods = false 110 | derive_const_casts = false 111 | derive_mut_casts = false 112 | # cast_assert_name = "ASSERT" 113 | derive_tagged_enum_destructor = false 114 | derive_tagged_enum_copy_constructor = false 115 | enum_class = true 116 | private_default_tagged_enum_constructor = false 117 | 118 | 119 | [const] 120 | allow_static_const = true 121 | allow_constexpr = false 122 | sort_by = "None" 123 | 124 | 125 | [ptr] 126 | # An optional string to decorate all pointers that are 127 | # required to be non null. Nullability is inferred from the Rust type: `&T`, 128 | # `&mut T` and `NonNull` all require a valid pointer value. 129 | # 130 | # clang uses _Nonnull and __nonnull, _Nullable and __nullable: 131 | # 132 | # https://clang.llvm.org/docs/AttributeReference.html#nullability-attributes 133 | # https://clang.llvm.org/docs/analyzer/developer-docs/nullability.html 134 | # 135 | non_null_attribute = "_Nonnull" 136 | 137 | 138 | [macro_expansion] 139 | bitflags = false 140 | 141 | 142 | ############## Options for How Your Rust library Should Be Parsed ############## 143 | 144 | [parse] 145 | parse_deps = true 146 | include = ["ffi", "uno", "s39", "sssmc39"] 147 | exclude = [] 148 | clean = false 149 | extra_bindings = ["ffi"] 150 | 151 | 152 | [parse.expand] 153 | crates = [] 154 | all_features = false 155 | default_features = true 156 | features = [] 157 | 158 | -------------------------------------------------------------------------------- /xcf/include/uno.h: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright (C) 2021 WithUno, Inc. 3 | // All rights reserved. 4 | // 5 | // SPDX-License-Identifier: AGPL-3.0-only 6 | // 7 | 8 | 9 | #ifndef uno_ffi_h 10 | #define uno_ffi_h 11 | 12 | #pragma once 13 | 14 | // 15 | // ⚠️ Warning! 16 | // 17 | // This file is auto-generated by cbindgen. Modifications must be made to the 18 | // source Rust extern "C" interface specified in the ~uno/identity/ffi crate. 19 | // 20 | // Do not manually modify this file. 21 | // 22 | 23 | 24 | #include 25 | #include 26 | #include 27 | #include 28 | #include 29 | 30 | 31 | #define UNO_ERR_SUCCESS 0 32 | 33 | #define UNO_ERR_ILLEGAL_ARG 1 34 | 35 | #define UNO_ERR_SPLIT 2 36 | 37 | #define UNO_ERR_COMBINE 3 38 | 39 | #define UNO_ERR_SHARE_ID 4 40 | 41 | #define UNO_ERR_SHARE_MISS 5 42 | 43 | #define UNO_ERR_CHECKSUM 6 44 | 45 | #define UNO_ERR_MNEMONIC 7 46 | 47 | /** 48 | * And uno identity newtype. 49 | */ 50 | typedef struct Id Id; 51 | 52 | /** 53 | * 54 | * Opaque array containing share metadata. Get a member share by index using 55 | * `uno_get_member_share_by_index`. 56 | * 57 | */ 58 | typedef struct UnoMemberSharesVec UnoMemberSharesVec; 59 | 60 | /** 61 | * 62 | * A SplitResult is the output of successfully running `uno_s39_split` on an 63 | * UnoId. The structure represents an opaque array of UnoGroupSplit structs. 64 | * 65 | */ 66 | typedef struct UnoSplitResult UnoSplitResult; 67 | 68 | /** 69 | * 70 | * 32 bytes of seed entropy. See uno::Id. 71 | * 72 | */ 73 | typedef Id UnoId; 74 | 75 | /** 76 | * 77 | * UnoByteSlice can be treated like an array of uint8_t bytes on the C side. 78 | * You may not modify the bytes and the struct must be freed once it is no 79 | * longer needed. 80 | * 81 | */ 82 | typedef struct 83 | { 84 | const uint8_t *ptr; 85 | size_t len; 86 | size_t _cap; 87 | } UnoByteSlice; 88 | 89 | /** 90 | * 91 | * A GroupSpec is a tuple of (threshold, total) shares in a given s39 group 92 | * split. For instance, if you want a group to be split into 3 pieces, two 93 | * of which are requred to reconstitute the group secret, you'd pass (2, 3). 94 | * 95 | */ 96 | typedef struct 97 | { 98 | uint8_t threshold; 99 | uint8_t total; 100 | } UnoGroupSpec; 101 | 102 | /** 103 | * 104 | * A GroupSplit contains metadata related to one of the groups of shares 105 | * requested during the split call. The actual shares are contained in the 106 | * opaque UnoMemberSharesVec struct. 107 | * 108 | */ 109 | typedef struct 110 | { 111 | uint16_t group_id; 112 | uint8_t iteration_exponent; 113 | uint8_t group_index; 114 | uint8_t group_threshold; 115 | uint8_t group_count; 116 | /** 117 | * The number of shares from this group required to reconstitue the group 118 | * secret. 119 | */ 120 | uint8_t member_threshold; 121 | /** 122 | * Total number of member_shares 123 | */ 124 | size_t share_count; 125 | /** 126 | * Opaque reference to the constituent member shares. Acquire one of the 127 | * shares with `uno_get_member_share_by_index`. 128 | */ 129 | const UnoMemberSharesVec *member_shares; 130 | } UnoGroupSplit; 131 | 132 | /** 133 | * 134 | * Share mnemonic string. Obtained by index from an UnoGroupSplit type using 135 | * `uno_get_s39_share_by_index`. The mnemonic share data is a c string 136 | * reference and can be handled in a read-only (const) fashion using the 137 | * standard c string api. An UnoShare must be freed using `uno_free_s39_share` 138 | * when you are done using it. 139 | * 140 | */ 141 | typedef struct 142 | { 143 | const char *mnemonic; 144 | } UnoShare; 145 | 146 | /** 147 | * 148 | * Share metadata struct. Metadata about a share can be obtained by calling 149 | * `uno_get_share_metadata` with an UnoS39Share. 150 | * 151 | */ 152 | typedef struct 153 | { 154 | /** 155 | * Random 15 bit value which is the same for all shares and is used to 156 | * verify that the shares belong together; it is also used as salt in the 157 | * encryption of the master secret. (15 bits) 158 | */ 159 | uint16_t identifier; 160 | /** 161 | * Indicates the total number of iterations to be used in PBKDF2. The 162 | * number of iterations is calculated as 10000x2^e. (5 bits) 163 | */ 164 | uint8_t iteration_exponent; 165 | /** 166 | * The x value of the group share (4 bits) 167 | */ 168 | uint8_t group_index; 169 | /** 170 | * indicates how many group shares are needed to reconstruct the master 171 | * secret. The actual value is endoded as Gt = GT - 1, so a value of 0 172 | * indicates that a single group share is needed (GT = 1), a value of 1 173 | * indicates that two group shares are needed (GT = 2) etc. (4 bits) 174 | */ 175 | uint8_t group_threshold; 176 | /** 177 | * indicates the total number of groups. The actual value is encoded as 178 | * g = G - 1 (4 bits) 179 | */ 180 | uint8_t group_count; 181 | /** 182 | * Member index, or x value of the member share in the given group (4 bits) 183 | */ 184 | uint8_t member_index; 185 | /** 186 | * indicates how many member shares are needed to reconstruct the group 187 | * share. The actual value is encoded as t = T − 1. (4 bits) 188 | */ 189 | uint8_t member_threshold; 190 | /** 191 | * corresponds to a list of the SSS part's fk(x) values 1 ≤ k ≤ n. Each 192 | * fk(x) value is encoded as a string of eight bits in big-endian order. 193 | * The concatenation of these bit strings is the share value. This value is 194 | * left-padded with "0" bits so that the length of the padded share value 195 | * in bits becomes the nearest multiple of 10. (padding + 8n bits) 196 | */ 197 | UnoByteSlice share_value; 198 | /** 199 | * an RS1024 checksum of the data part of the share 200 | * (that is id || e || GI || Gt || g || I || t || ps). The customization 201 | * string (cs) of RS1024 is "shamir". (30 bits) 202 | */ 203 | uint32_t checksum; 204 | } UnoShareMetadata; 205 | 206 | /** 207 | * 208 | * Get a description for the provided error code. The lifetime of the returned 209 | * string does not need to be managed by the caller. 210 | * 211 | */ 212 | const char *uno_get_msg_from_err(int err); 213 | 214 | /** 215 | * 216 | * Create an uno id struct from a 32 byte seed data array. The caller is 217 | * responsible calling `uno_free_id` on the returned struct once finished. 218 | * 219 | */ 220 | int uno_get_id_from_bytes(const uint8_t *bytes, size_t len, const UnoId **out); 221 | 222 | /** 223 | * 224 | * Copy the raw 32 bytes backing an uno Id into caller-owned memory. 225 | * 226 | */ 227 | int uno_copy_id_bytes(const UnoId *uno_id, uint8_t *bytes, size_t len); 228 | 229 | /** 230 | * 231 | * Free a previously allocated UnoId from `uno_get_id_from_bytes`. 232 | * 233 | */ 234 | void uno_free_id(UnoId *id); 235 | 236 | /** 237 | * 238 | * Get the raw bytes backing an uno Id. 239 | * 240 | */ 241 | int uno_get_bytes_from_id(const UnoId *uno_id, UnoByteSlice *out); 242 | 243 | /** 244 | * 245 | * Free the backing array on an UnoByteSlice from a function that returns an 246 | * allocated UnoByteSlice, e.g. `uno_get_id_bytes`. 247 | * 248 | */ 249 | void uno_free_byte_slice(UnoByteSlice byte_slice); 250 | 251 | /** 252 | * 253 | * See s39::split. 254 | * 255 | * Rather than an array of tuples, the caller provides an array of GroupSpec 256 | * structs. The group_threshold is fixed at 1 so this parameter is currently 257 | * unused. 258 | * 259 | * Upon success, the SplitResult represents an array of UnoGroupSplits of 260 | * length group_total. 261 | * 262 | */ 263 | int uno_s39_split(const UnoId *uno_id, 264 | size_t _group_threshold, 265 | const UnoGroupSpec *group_specs, 266 | size_t group_total, 267 | const UnoSplitResult **out); 268 | 269 | /** 270 | * 271 | * Free a previously allocated UnoSplitResult from `uno_s39_split`. 272 | * 273 | */ 274 | void uno_free_split_result(UnoSplitResult *split_result); 275 | 276 | /** 277 | * 278 | * Get an UnoGroupSplit by index from an opaque UnoSplitResult. 279 | * 280 | */ 281 | int uno_get_group_from_split_result(const UnoSplitResult *split_result, 282 | size_t index, 283 | UnoGroupSplit *out); 284 | 285 | /** 286 | * 287 | * Free a previously allocated GroupSplit returned by 288 | * `uno_get_group_from_split_result`. 289 | * 290 | */ 291 | void uno_free_group_split(UnoGroupSplit group_split); 292 | 293 | /** 294 | * 295 | * Returns the actual member share by index. 296 | * 297 | */ 298 | int uno_get_s39_share_by_index(UnoGroupSplit group_split, 299 | uint8_t index, 300 | UnoShare *out); 301 | 302 | /** 303 | * 304 | * Convert a mnemonic string of 33 space separated words to an internal share 305 | * representation. 306 | * 307 | */ 308 | int uno_get_s39_share_from_mnemonic(const char *ptr, UnoShare *out); 309 | 310 | /** 311 | * 312 | * Free a previously allocated share returned by `uno_get_s39_share_by_index` 313 | * or `uno_get_s39_share_from_mnemonic`. 314 | * 315 | */ 316 | void uno_free_s39_share(UnoShare share); 317 | 318 | /** 319 | * 320 | * Get the share metadata from an UnoShare. 321 | * 322 | */ 323 | int uno_get_s39_share_metadata(UnoShare share, UnoShareMetadata *out); 324 | 325 | /** 326 | * 327 | * Free a previously allocated ShareMetadata returned by 328 | * `uno_get_s39_share_metadata`. 329 | * 330 | */ 331 | void uno_free_s39_share_metadata(UnoShareMetadata metadata); 332 | 333 | /** 334 | * 335 | * See s39::combine. 336 | * 337 | * Provided an array of c-stirng s39 shamir's shares, recombine and recover 338 | * the original UnoId. The returned UnoId must be freed using `uno_free_id`. 339 | * 340 | */ 341 | int uno_s39_combine(const char *const *share_nmemonics, 342 | size_t total_shares, 343 | const UnoId **out); 344 | 345 | #endif /* uno_ffi_h */ 346 | -------------------------------------------------------------------------------- /xcf/src/lib.rs: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright (C) 2021 WithUno, Inc. 3 | // All rights reserved. 4 | // 5 | // SPDX-License-Identifier: AGPL-3.0-only 6 | // 7 | 8 | /// 9 | /// Reexport everything from the ffi crate. 10 | /// 11 | pub use ffi::*; 12 | -------------------------------------------------------------------------------- /xcf/sumo.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | set -ex 3 | 4 | : "${LIBNAME:=libunoxcf}" 5 | : "${OUTNAME:=UnoRust}" 6 | : "${TOOLCHAIN:=nightly}" 7 | : "${PROFILE:=release}" 8 | : "${PROFDIR:=$PROFILE}" 9 | : "${OUTDIR:=../target/$PROFDIR}" 10 | 11 | # 12 | # Build an archs table because the triple arch is not the same as lipo arch. 13 | # 14 | ARCHS=" 15 | aarch64 16 | x86_64 17 | " 18 | subarchs=$(mktemp -d) 19 | echo "arm64v8" > $subarchs/aarch64 20 | echo "x86_64" > $subarchs/x86_64 21 | 22 | mkdir -p $OUTDIR/a 23 | 24 | # 25 | # Build macOS. 26 | # 27 | lipo_args="" 28 | 29 | for ARCH in $ARCHS 30 | do 31 | TRIPLE="$ARCH-apple-darwin" 32 | cargo +$TOOLCHAIN build \ 33 | -Z unstable-options --profile $PROFILE \ 34 | --target $TRIPLE 35 | 36 | larch=$(< $subarchs/$ARCH) 37 | lipo_args="$lipo_args 38 | -arch $larch ../target/$TRIPLE/$PROFDIR/$LIBNAME.a" 39 | done 40 | 41 | lipo -create $lipo_args -output $OUTDIR/a/$LIBNAME-macos.a 42 | 43 | xc_args="$xc_args 44 | -library $OUTDIR/a/$LIBNAME-macos.a" 45 | xc_args="$xc_args 46 | -headers include" 47 | 48 | 49 | # 50 | # Build iOS. 51 | # 52 | cargo +$TOOLCHAIN build \ 53 | -Z unstable-options --profile $PROFILE \ 54 | --target aarch64-apple-ios 55 | 56 | cp ../target/aarch64-apple-ios/$PROFDIR/$LIBNAME.a $OUTDIR/a/$LIBNAME-ios.a 57 | 58 | xc_args="$xc_args 59 | -library $OUTDIR/a/$LIBNAME-ios.a" 60 | xc_args="$xc_args 61 | -headers include" 62 | 63 | 64 | # 65 | # Build ios simulator. 66 | # 67 | cargo +$TOOLCHAIN build \ 68 | -Z unstable-options --profile $PROFILE \ 69 | --target aarch64-apple-ios-sim 70 | 71 | lipo_args=" 72 | -arch arm64v8 ../target/aarch64-apple-ios-sim/$PROFDIR/$LIBNAME.a" 73 | 74 | # The simulator target doesn't end in `-sim` on x86_64 75 | cargo +$TOOLCHAIN build \ 76 | -Z unstable-options --profile $PROFILE \ 77 | --target x86_64-apple-ios 78 | 79 | lipo_args="$lipo_args 80 | -arch x86_64 ../target/x86_64-apple-ios/$PROFDIR/$LIBNAME.a" 81 | 82 | lipo -create $lipo_args -output $OUTDIR/a/$LIBNAME-ios-sim.a 83 | 84 | xc_args="$xc_args 85 | -library $OUTDIR/a/$LIBNAME-ios-sim.a" 86 | xc_args="$xc_args 87 | -headers include" 88 | 89 | 90 | # 91 | # Build mac catalyst. 92 | # 93 | lipo_args="" 94 | for ARCH in $ARCHS 95 | do 96 | TRIPLE="$ARCH-apple-ios-macabi" 97 | cargo +$TOOLCHAIN build \ 98 | -Z unstable-options --profile $PROFILE \ 99 | -Z build-std \ 100 | --target $TRIPLE 101 | 102 | larch=$(< $subarchs/$ARCH) 103 | lipo_args="$lipo_args 104 | -arch $larch ../target/$TRIPLE/$PROFDIR/$LIBNAME.a" 105 | done 106 | 107 | lipo -create $lipo_args -output $OUTDIR/a/$LIBNAME-ios-macabi.a 108 | 109 | xc_args="$xc_args 110 | -library $OUTDIR/a/$LIBNAME-ios-macabi.a" 111 | xc_args="$xc_args 112 | -headers include" 113 | 114 | 115 | # 116 | # Build the sumo xcframework. 117 | # 118 | rm -rf $OUTDIR/$OUTNAME.xcframework 119 | xcodebuild -create-xcframework $xc_args -output $OUTDIR/$OUTNAME.xcframework 120 | 121 | -------------------------------------------------------------------------------- /xcf/xcode12/aarch64-apple-ios14.0-macabi.json: -------------------------------------------------------------------------------- 1 | { 2 | "abi-return-struct-as-int": true, 3 | "arch": "aarch64", 4 | "archive-format": "darwin", 5 | "bitcode-llvm-cmdline": "-triple\u0000arm64-apple-ios-macabi\u0000-emit-obj\u0000-disable-llvm-passes\u0000-target-abi\u0000darwinpcs\u0000-Os\u0000", 6 | "cpu": "apple-a12", 7 | "data-layout": "e-m:o-i64:64-i128:128-n32:64-S128", 8 | "dll-suffix": ".dylib", 9 | "dwarf-version": 2, 10 | "eh-frame-header": false, 11 | "eliminate-frame-pointer": false, 12 | "emit-debug-gdb-scripts": false, 13 | "executables": true, 14 | "features": "+neon,+fp-armv8,+apple-a7", 15 | "forces-embed-bitcode": true, 16 | "function-sections": false, 17 | "has-rpath": true, 18 | "is-builtin": true, 19 | "is-like-osx": true, 20 | "link-env": [ 21 | "ZERO_AR_DATE=1" 22 | ], 23 | "link-env-remove": [ 24 | "IPHONEOS_DEPLOYMENT_TARGET" 25 | ], 26 | "llvm-target": "arm64-apple-ios14.0-macabi", 27 | "max-atomic-width": 128, 28 | "os": "ios", 29 | "split-debuginfo": "packed", 30 | "target-family": "unix", 31 | "target-pointer-width": "64", 32 | "unsupported-abis": [ 33 | "stdcall", 34 | "fastcall", 35 | "vectorcall", 36 | "thiscall", 37 | "win64", 38 | "sysv64" 39 | ], 40 | "vendor": "apple" 41 | } 42 | -------------------------------------------------------------------------------- /xcf/xcode12/aarch64-apple-ios14.0-simulator.json: -------------------------------------------------------------------------------- 1 | { 2 | "abi-return-struct-as-int": true, 3 | "arch": "aarch64", 4 | "archive-format": "darwin", 5 | "bitcode-llvm-cmdline": "-triple\u0000arm64-apple-ios14.0-simulator\u0000-emit-obj\u0000-disable-llvm-passes\u0000-target-abi\u0000darwinpcs\u0000-Os\u0000", 6 | "cpu": "apple-a7", 7 | "data-layout": "e-m:o-i64:64-i128:128-n32:64-S128", 8 | "dll-suffix": ".dylib", 9 | "dwarf-version": 2, 10 | "eh-frame-header": false, 11 | "eliminate-frame-pointer": false, 12 | "emit-debug-gdb-scripts": false, 13 | "executables": true, 14 | "features": "+neon,+fp-armv8,+apple-a7", 15 | "forces-embed-bitcode": true, 16 | "function-sections": false, 17 | "has-rpath": true, 18 | "is-builtin": true, 19 | "is-like-osx": true, 20 | "link-env": [ 21 | "ZERO_AR_DATE=1" 22 | ], 23 | "link-env-remove": [ 24 | "MACOSX_DEPLOYMENT_TARGET" 25 | ], 26 | "llvm-target": "arm64-apple-ios14.0-simulator", 27 | "max-atomic-width": 128, 28 | "os": "ios", 29 | "target-family": "unix", 30 | "target-pointer-width": "64", 31 | "unsupported-abis": [ 32 | "stdcall", 33 | "fastcall", 34 | "vectorcall", 35 | "thiscall", 36 | "win64", 37 | "sysv64" 38 | ], 39 | "vendor": "apple" 40 | } 41 | -------------------------------------------------------------------------------- /xcf/xcode12/aarch64-apple-ios7.0.0.json: -------------------------------------------------------------------------------- 1 | { 2 | "abi-return-struct-as-int": true, 3 | "arch": "aarch64", 4 | "archive-format": "darwin", 5 | "bitcode-llvm-cmdline": "-triple\u0000arm64-apple-ios11.0.0\u0000-emit-obj\u0000-disable-llvm-passes\u0000-target-abi\u0000darwinpcs\u0000-Os\u0000", 6 | "cpu": "apple-a7", 7 | "data-layout": "e-m:o-i64:64-i128:128-n32:64-S128", 8 | "dll-suffix": ".dylib", 9 | "dwarf-version": 2, 10 | "eh-frame-header": false, 11 | "eliminate-frame-pointer": false, 12 | "emit-debug-gdb-scripts": false, 13 | "executables": true, 14 | "features": "+neon,+fp-armv8,+apple-a7", 15 | "forces-embed-bitcode": true, 16 | "function-sections": false, 17 | "has-rpath": true, 18 | "is-builtin": true, 19 | "is-like-osx": true, 20 | "link-env": [ 21 | "ZERO_AR_DATE=1" 22 | ], 23 | "link-env-remove": [ 24 | "MACOSX_DEPLOYMENT_TARGET" 25 | ], 26 | "llvm-target": "arm64-apple-ios7.0.0", 27 | "max-atomic-width": 128, 28 | "os": "ios", 29 | "split-debuginfo": "packed", 30 | "target-family": "unix", 31 | "target-pointer-width": "64", 32 | "unsupported-abis": [ 33 | "stdcall", 34 | "fastcall", 35 | "vectorcall", 36 | "thiscall", 37 | "win64", 38 | "sysv64" 39 | ], 40 | "vendor": "apple" 41 | } 42 | -------------------------------------------------------------------------------- /xcf/xcode12/sumo.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | set -ex 3 | 4 | : "${LIBNAME:=libunoxcf}" 5 | : "${OUTNAME:=UnoRust}" 6 | : "${TOOLCHAIN:=nightly-2021-02-06}" 7 | : "${PROFILE:=release}" 8 | : "${PROFDIR:=$PROFILE}" 9 | : "${OUTDIR:=../target/$PROFDIR/xcode12}" 10 | 11 | # 12 | # Build an archs table because the triple arch is not the same as lipo arch. 13 | # 14 | ARCHS=" 15 | aarch64 16 | x86_64 17 | " 18 | subarchs=$(mktemp -d) 19 | echo "arm64v8" > $subarchs/aarch64 20 | echo "x86_64" > $subarchs/x86_64 21 | 22 | mkdir -p $OUTDIR/a 23 | 24 | # 25 | # Build macOS. 26 | # 27 | lipo_args="" 28 | for ARCH in $ARCHS 29 | do 30 | TRIPLE="$ARCH-apple-darwin" 31 | cargo +$TOOLCHAIN build \ 32 | -Z unstable-options --profile $PROFILE \ 33 | -Z build-std \ 34 | --target $TRIPLE 35 | 36 | larch=$(< $subarchs/$ARCH) 37 | lipo_args="$lipo_args 38 | -arch $larch ../target/$TRIPLE/$PROFDIR/$LIBNAME.a" 39 | done 40 | 41 | lipo -create $lipo_args -output $OUTDIR/a/$LIBNAME-macos.a 42 | 43 | xc_args="$xc_args 44 | -library $OUTDIR/a/$LIBNAME-macos.a" 45 | xc_args="$xc_args 46 | -headers include" 47 | 48 | 49 | # 50 | # Build iOS. 51 | # 52 | TRIPLE=aarch64-apple-ios7.0.0 53 | cargo +$TOOLCHAIN build \ 54 | -Z unstable-options --profile $PROFILE \ 55 | -Z build-std \ 56 | --target xcode12/$TRIPLE.json 57 | 58 | cp ../target/$TRIPLE/$PROFDIR/$LIBNAME.a $OUTDIR/a/$LIBNAME-ios.a 59 | 60 | xc_args="$xc_args 61 | -library $OUTDIR/a/$LIBNAME-ios.a" 62 | xc_args="$xc_args 63 | -headers include" 64 | 65 | 66 | # 67 | # Build ios simulator. 68 | # 69 | # I guess we can't build for Xcode12 iOS simulator in rust because llvm doesn't 70 | # have the right target. 71 | # 72 | lipo_args="" 73 | TRIPLE="x86_64-apple-ios7.0.0-simulator" 74 | cargo +$TOOLCHAIN build \ 75 | -Z unstable-options --profile $PROFILE \ 76 | -Z build-std \ 77 | --target xcode12/$TRIPLE.json 78 | 79 | lipo_args="$lipo_args 80 | -arch x86_64 ../target/$TRIPLE/$PROFDIR/$LIBNAME.a" 81 | 82 | TRIPLE="aarch64-apple-ios14.0-simulator" 83 | cargo +$TOOLCHAIN build \ 84 | -Z unstable-options --profile $PROFILE \ 85 | -Z build-std \ 86 | --target xcode12/$TRIPLE.json 87 | 88 | lipo_args="$lipo_args 89 | -arch arm64v8 ../target/$TRIPLE/$PROFDIR/$LIBNAME.a" 90 | 91 | lipo -create $lipo_args -output $OUTDIR/a/$LIBNAME-ios-sim.a 92 | 93 | xc_args="$xc_args 94 | -library $OUTDIR/a/$LIBNAME-ios-sim.a" 95 | xc_args="$xc_args 96 | -headers include" 97 | 98 | 99 | # 100 | # Build mac catalyst. 101 | # 102 | lipo_args="" 103 | TRIPLE=aarch64-apple-ios14.0-macabi 104 | cargo +$TOOLCHAIN build \ 105 | -Z unstable-options --profile $PROFILE \ 106 | -Z build-std \ 107 | --target xcode12/$TRIPLE.json 108 | 109 | lipo_args="$lipo_args 110 | -arch arm64v8 ../target/$TRIPLE/$PROFDIR/$LIBNAME.a" 111 | 112 | TRIPLE=x86_64-apple-ios-macabi 113 | cargo +$TOOLCHAIN build \ 114 | -Z unstable-options --profile $PROFILE \ 115 | -Z build-std \ 116 | --target $TRIPLE 117 | 118 | lipo_args="$lipo_args 119 | -arch x86_64 ../target/$TRIPLE/$PROFDIR/$LIBNAME.a" 120 | 121 | lipo -create $lipo_args -output $OUTDIR/a/$LIBNAME-ios-macabi.a 122 | 123 | xc_args="$xc_args 124 | -library $OUTDIR/a/$LIBNAME-ios-macabi.a" 125 | xc_args="$xc_args 126 | -headers include" 127 | 128 | 129 | # 130 | # Build the sumo xcframework. 131 | # 132 | rm -rf $OUTDIR/$OUTNAME.xcframework 133 | xcodebuild -create-xcframework $xc_args -output $OUTDIR/$OUTNAME.xcframework 134 | 135 | -------------------------------------------------------------------------------- /xcf/xcode12/x86_64-apple-ios7.0.0-simulator.json: -------------------------------------------------------------------------------- 1 | { 2 | "abi-return-struct-as-int": true, 3 | "arch": "x86_64", 4 | "archive-format": "darwin", 5 | "cpu": "core2", 6 | "data-layout": "e-m:o-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", 7 | "dll-suffix": ".dylib", 8 | "dwarf-version": 2, 9 | "eh-frame-header": false, 10 | "eliminate-frame-pointer": false, 11 | "emit-debug-gdb-scripts": false, 12 | "executables": true, 13 | "function-sections": false, 14 | "has-rpath": true, 15 | "is-builtin": true, 16 | "is-like-osx": true, 17 | "link-env": [ 18 | "ZERO_AR_DATE=1" 19 | ], 20 | "link-env-remove": [ 21 | "MACOSX_DEPLOYMENT_TARGET" 22 | ], 23 | "llvm-target": "x86_64-apple-ios7.0.0", 24 | "max-atomic-width": 64, 25 | "os": "ios", 26 | "split-debuginfo": "packed", 27 | "stack-probes": { 28 | "kind": "inline-or-call", 29 | "min-llvm-version-for-inline": [ 30 | 11, 31 | 0, 32 | 1 33 | ] 34 | }, 35 | "target-family": "unix", 36 | "target-pointer-width": "64", 37 | "vendor": "apple" 38 | } 39 | -------------------------------------------------------------------------------- /xcf/xcode13/aarch64-apple-ios7.0.0.json: -------------------------------------------------------------------------------- 1 | { 2 | "abi-return-struct-as-int": true, 3 | "arch": "aarch64", 4 | "archive-format": "darwin", 5 | "bitcode-llvm-cmdline": "-triple\u0000arm64-apple-ios11.0.0\u0000-emit-obj\u0000-disable-llvm-passes\u0000-target-abi\u0000darwinpcs\u0000-Os\u0000", 6 | "cpu": "apple-a7", 7 | "data-layout": "e-m:o-i64:64-i128:128-n32:64-S128", 8 | "dll-suffix": ".dylib", 9 | "dwarf-version": 2, 10 | "eh-frame-header": false, 11 | "emit-debug-gdb-scripts": false, 12 | "executables": true, 13 | "features": "+neon,+fp-armv8,+apple-a7", 14 | "forces-embed-bitcode": true, 15 | "frame-pointer": "non-leaf", 16 | "function-sections": false, 17 | "has-rpath": true, 18 | "is-builtin": false, 19 | "is-like-osx": true, 20 | "link-env": [ 21 | "ZERO_AR_DATE=1" 22 | ], 23 | "link-env-remove": [ 24 | "MACOSX_DEPLOYMENT_TARGET" 25 | ], 26 | "linker-is-gnu": false, 27 | "llvm-target": "arm64-apple-ios7.0.0", 28 | "max-atomic-width": 128, 29 | "os": "ios", 30 | "split-debuginfo": "packed", 31 | "target-family": [ 32 | "unix" 33 | ], 34 | "target-pointer-width": "64", 35 | "vendor": "apple" 36 | } 37 | -------------------------------------------------------------------------------- /xcf/xcode13/sumo.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | set -ex 3 | 4 | : "${LIBNAME:=libunoxcf}" 5 | : "${OUTNAME:=UnoRust}" 6 | : "${TOOLCHAIN:=nightly-2021-07-24}" 7 | : "${PROFILE:=release}" 8 | : "${PROFDIR:=$PROFILE}" 9 | : "${OUTDIR:=../target/$PROFDIR/xcode13}" 10 | 11 | # 12 | # Build an archs table because the triple arch is not the same as lipo arch. 13 | # 14 | ARCHS=" 15 | aarch64 16 | x86_64 17 | " 18 | subarchs=$(mktemp -d) 19 | echo "arm64v8" > $subarchs/aarch64 20 | echo "x86_64" > $subarchs/x86_64 21 | 22 | mkdir -p $OUTDIR/a 23 | 24 | # 25 | # Build macOS. 26 | # 27 | lipo_args="" 28 | for ARCH in $ARCHS 29 | do 30 | TRIPLE="$ARCH-apple-darwin" 31 | cargo +$TOOLCHAIN build \ 32 | -Z unstable-options --profile $PROFILE \ 33 | -Z build-std \ 34 | --target $TRIPLE 35 | 36 | larch=$(< $subarchs/$ARCH) 37 | lipo_args="$lipo_args 38 | -arch $larch ../target/$TRIPLE/$PROFDIR/$LIBNAME.a" 39 | done 40 | 41 | lipo -create $lipo_args -output $OUTDIR/a/$LIBNAME-macos.a 42 | 43 | xc_args="$xc_args 44 | -library $OUTDIR/a/$LIBNAME-macos.a" 45 | xc_args="$xc_args 46 | -headers include" 47 | 48 | 49 | # 50 | # Build iOS. 51 | # 52 | TRIPLE=aarch64-apple-ios7.0.0 53 | cargo +$TOOLCHAIN build \ 54 | -Z unstable-options --profile $PROFILE \ 55 | -Z build-std \ 56 | --target xcode13/$TRIPLE.json 57 | 58 | cp ../target/$TRIPLE/$PROFDIR/$LIBNAME.a $OUTDIR/a/$LIBNAME-ios.a 59 | 60 | xc_args="$xc_args 61 | -library $OUTDIR/a/$LIBNAME-ios.a" 62 | xc_args="$xc_args 63 | -headers include" 64 | 65 | 66 | # 67 | # Build ios simulator. 68 | # 69 | cargo +$TOOLCHAIN build \ 70 | -Z unstable-options --profile $PROFILE \ 71 | -Z build-std \ 72 | --target aarch64-apple-ios-sim 73 | 74 | lipo_args=" 75 | -arch arm64v8 ../target/aarch64-apple-ios-sim/$PROFDIR/$LIBNAME.a" 76 | 77 | # The simulator target doesn't end in `-sim` on x86_64 78 | TRIPLE=x86_64-apple-ios7.0.0-sim 79 | cargo +$TOOLCHAIN build \ 80 | -Z unstable-options --profile $PROFILE \ 81 | -Z build-std \ 82 | --target xcode13/$TRIPLE.json 83 | 84 | lipo_args="$lipo_args 85 | -arch x86_64 ../target/$TRIPLE/$PROFDIR/$LIBNAME.a" 86 | 87 | lipo -create $lipo_args -output $OUTDIR/a/$LIBNAME-ios-sim.a 88 | 89 | xc_args="$xc_args 90 | -library $OUTDIR/a/$LIBNAME-ios-sim.a" 91 | xc_args="$xc_args 92 | -headers include" 93 | 94 | 95 | # 96 | # Build mac catalyst. 97 | # 98 | lipo_args="" 99 | for ARCH in $ARCHS 100 | do 101 | TRIPLE="$ARCH-apple-ios-macabi" 102 | cargo +$TOOLCHAIN build \ 103 | -Z unstable-options --profile $PROFILE \ 104 | -Z build-std \ 105 | --target $TRIPLE 106 | 107 | larch=$(< $subarchs/$ARCH) 108 | lipo_args="$lipo_args 109 | -arch $larch ../target/$TRIPLE/$PROFDIR/$LIBNAME.a" 110 | done 111 | 112 | lipo -create $lipo_args -output $OUTDIR/a/$LIBNAME-ios-macabi.a 113 | 114 | xc_args="$xc_args 115 | -library $OUTDIR/a/$LIBNAME-ios-macabi.a" 116 | xc_args="$xc_args 117 | -headers include" 118 | 119 | 120 | # 121 | # Build the sumo xcframework. 122 | # 123 | rm -rf $OUTDIR/$OUTNAME.xcframework 124 | xcodebuild -create-xcframework $xc_args -output $OUTDIR/$OUTNAME.xcframework 125 | 126 | -------------------------------------------------------------------------------- /xcf/xcode13/x86_64-apple-ios7.0.0-sim.json: -------------------------------------------------------------------------------- 1 | { 2 | "abi-return-struct-as-int": true, 3 | "arch": "x86_64", 4 | "archive-format": "darwin", 5 | "cpu": "core2", 6 | "data-layout": "e-m:o-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", 7 | "dll-suffix": ".dylib", 8 | "dwarf-version": 2, 9 | "eh-frame-header": false, 10 | "emit-debug-gdb-scripts": false, 11 | "executables": true, 12 | "frame-pointer": "always", 13 | "function-sections": false, 14 | "has-rpath": true, 15 | "is-builtin": false, 16 | "is-like-osx": true, 17 | "link-env": [ 18 | "ZERO_AR_DATE=1" 19 | ], 20 | "link-env-remove": [ 21 | "MACOSX_DEPLOYMENT_TARGET" 22 | ], 23 | "linker-is-gnu": false, 24 | "llvm-target": "x86_64-apple-ios7.0.0-simulator", 25 | "max-atomic-width": 64, 26 | "os": "ios", 27 | "split-debuginfo": "packed", 28 | "stack-probes": { 29 | "kind": "call" 30 | }, 31 | "target-family": [ 32 | "unix" 33 | ], 34 | "target-pointer-width": "64", 35 | "vendor": "apple" 36 | } 37 | --------------------------------------------------------------------------------