├── .github ├── FUNDING.yml └── workflows │ ├── cicd.yml │ └── doc.yml ├── .gitignore ├── Cargo.lock ├── Cargo.toml ├── LICENSE-APACHE2 ├── LICENSE-MIT ├── README.md ├── flake.lock ├── flake.nix ├── lofire-broker ├── Cargo.toml └── src │ ├── account.rs │ ├── auth.rs │ ├── config.rs │ ├── connection.rs │ ├── lib.rs │ ├── overlay.rs │ ├── peer.rs │ ├── repostoreinfo.rs │ ├── server.rs │ └── topic.rs ├── lofire-demo ├── Cargo.toml └── src │ └── main.rs ├── lofire-net ├── Cargo.toml └── src │ ├── errors.rs │ ├── lib.rs │ └── types.rs ├── lofire-node ├── Cargo.toml └── src │ └── main.rs ├── lofire-p2p ├── Cargo.toml └── src │ └── lib.rs ├── lofire-store-lmdb ├── Cargo.toml └── src │ ├── brokerstore.rs │ ├── lib.rs │ └── repostore.rs └── lofire ├── Cargo.toml └── src ├── block.rs ├── branch.rs ├── brokerstore.rs ├── commit.rs ├── errors.rs ├── lib.rs ├── object.rs ├── repo.rs ├── store.rs ├── types.rs └── utils.rs /.github/FUNDING.yml: -------------------------------------------------------------------------------- 1 | # These are supported funding model platforms 2 | 3 | github: # Replace with up to 4 GitHub Sponsors-enabled usernames e.g., [user1, user2] 4 | patreon: # Replace with a single Patreon username 5 | open_collective: # Replace with a single Open Collective username 6 | ko_fi: # Replace with a single Ko-fi username 7 | tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel 8 | community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry 9 | liberapay: # Replace with a single Liberapay username 10 | issuehunt: # Replace with a single IssueHunt username 11 | otechie: # Replace with a single Otechie username 12 | lfx_crowdfunding: # Replace with a single LFX Crowdfunding project-name e.g., cloud-foundry 13 | custom: # Replace with up to 4 custom sponsorship URLs e.g., ['link1', 'link2'] 14 | - https://p2pcollab.net/donate 15 | -------------------------------------------------------------------------------- /.github/workflows/cicd.yml: -------------------------------------------------------------------------------- 1 | name: Build 2 | on: push 3 | jobs: 4 | cargo-build-test: 5 | runs-on: ubuntu-latest 6 | steps: 7 | - uses: actions/checkout@v3 8 | - uses: nixbuild/nix-quick-install-action@v17 9 | with: 10 | nix_conf: experimental-features = nix-command flakes 11 | - name: nix develop 12 | run: nix develop 13 | - name: cargo build 14 | run: cargo build 15 | - name: cargo test 16 | run: cargo test 17 | 18 | default-package: 19 | runs-on: ubuntu-latest 20 | steps: 21 | - uses: actions/checkout@v3 22 | - uses: nixbuild/nix-quick-install-action@v17 23 | with: 24 | nix_conf: experimental-features = nix-command flakes 25 | - name: default package 26 | run: nix build 27 | 28 | lofire-package: 29 | runs-on: ubuntu-latest 30 | steps: 31 | - uses: actions/checkout@v3 32 | - uses: nixbuild/nix-quick-install-action@v17 33 | with: 34 | nix_conf: experimental-features = nix-command flakes 35 | - name: lofire package 36 | run: nix build '.#lofire' 37 | 38 | lofire-broker-package: 39 | runs-on: ubuntu-latest 40 | steps: 41 | - uses: actions/checkout@v3 42 | - uses: nixbuild/nix-quick-install-action@v17 43 | with: 44 | nix_conf: experimental-features = nix-command flakes 45 | - name: lofire-broker package 46 | run: nix build '.#lofire-broker' 47 | 48 | lofire-p2p-package: 49 | runs-on: ubuntu-latest 50 | steps: 51 | - uses: actions/checkout@v3 52 | - uses: nixbuild/nix-quick-install-action@v17 53 | with: 54 | nix_conf: experimental-features = nix-command flakes 55 | - name: lofire-p2p package 56 | run: nix build '.#lofire-p2p' 57 | 58 | lofire-node-package: 59 | runs-on: ubuntu-latest 60 | steps: 61 | - uses: actions/checkout@v3 62 | - uses: nixbuild/nix-quick-install-action@v17 63 | with: 64 | nix_conf: experimental-features = nix-command flakes 65 | - name: lofire-node package 66 | run: nix build '.#lofire-node' 67 | 68 | lofire-demo-package: 69 | runs-on: ubuntu-latest 70 | steps: 71 | - uses: actions/checkout@v3 72 | - uses: nixbuild/nix-quick-install-action@v17 73 | with: 74 | nix_conf: experimental-features = nix-command flakes 75 | - name: lofire-demo package 76 | run: nix build '.#lofire-demo' 77 | -------------------------------------------------------------------------------- /.github/workflows/doc.yml: -------------------------------------------------------------------------------- 1 | # Simple workflow for deploying static content to GitHub Pages 2 | name: Publish documentation 3 | 4 | on: 5 | # Runs on pushes targeting the default branch 6 | push: 7 | branches: [master] 8 | 9 | # Allows you to run this workflow manually from the Actions tab 10 | workflow_dispatch: 11 | 12 | # Sets permissions of the GITHUB_TOKEN to allow deployment to GitHub Pages 13 | permissions: 14 | contents: read 15 | pages: write 16 | id-token: write 17 | 18 | # Allow one concurrent deployment 19 | concurrency: 20 | group: "pages" 21 | cancel-in-progress: true 22 | 23 | jobs: 24 | # Single deploy job since we're just deploying 25 | deploy: 26 | environment: 27 | name: github-pages 28 | url: ${{ steps.deployment.outputs.page_url }} 29 | runs-on: ubuntu-latest 30 | steps: 31 | - uses: actions/checkout@v3 32 | - uses: nixbuild/nix-quick-install-action@v17 33 | with: 34 | nix_conf: experimental-features = nix-command flakes 35 | - name: nix develop 36 | run: nix develop 37 | - name: cargo doc 38 | run: cargo doc --no-deps 39 | - name: public/ 40 | run: mkdir public; mv target/doc public 41 | - name: public/index.html 42 | run: echo 'LoFiRe Rust Documentation

LoFiRe Rust Documentation

API Documentation

' >public/index.html 43 | - name: doc/index.html 44 | run: (echo 'LoFiRe Rust API Documentation

LoFiRe Rust API Documentation

') >public/doc/index.html 45 | - name: Setup Pages 46 | uses: actions/configure-pages@v2 47 | - name: Upload artifact 48 | uses: actions/upload-pages-artifact@v1 49 | with: 50 | path: public 51 | - name: Deploy to GitHub Pages 52 | id: deployment 53 | uses: actions/deploy-pages@v1 54 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *~ 2 | .* 3 | !.github 4 | \#* 5 | /target 6 | /result* 7 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [workspace] 2 | members = [ 3 | "lofire", 4 | "lofire-net", 5 | "lofire-broker", 6 | "lofire-p2p", 7 | "lofire-store-lmdb", 8 | "lofire-node", 9 | "lofire-demo", 10 | ] -------------------------------------------------------------------------------- /LICENSE-APACHE2: -------------------------------------------------------------------------------- 1 | Copyright 2022 TG x Thoth 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | -------------------------------------------------------------------------------- /LICENSE-MIT: -------------------------------------------------------------------------------- 1 | Copyright 2022 TG x Thoth 2 | 3 | Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: 4 | 5 | The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. 6 | 7 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # LoFiRe Rust implementation 2 | 3 | ## Website 4 | 5 | [LoFiRe](https://lofi.re) 6 | 7 | ## Design & specification 8 | 9 | [LoFiRe: Local-First Repositories for Asynchronous Collaboration over Community Overlay Networks](https://lofi.re/design/lofire) 10 | 11 | ## API documentation 12 | 13 | [LoFiRe Rust API Documentation](https://p2pcollab.github.io/lofire-rs/doc/) 14 | 15 | ## Overview 16 | 17 | The following components are implemented so far: 18 | 19 | - lofire: library that allows access to the repository, branches, commits, objects, blocks, and contains a hash map backed store implementation. 20 | - lofire-store-lmdb: encrypted LMDB store implementation 21 | - lofire-net: library that provides network message types 22 | - lofire-broker: library that implements the broker server and client protocol with async, this allows running them via arbitrary network transports or in-process without networking 23 | - lofire-node: daemon that runs a websocket server and the broker protocol over it 24 | - lofire-demo: an application to demonstrate the usage and functionality that connects to the node and sends messages to it 25 | 26 | For examples on using the libraries, see the test cases and the demo application. 27 | To run the demo, first run lofire-node, then lofire-demo (see below). 28 | 29 | ## Development 30 | 31 | ### Cargo 32 | 33 | #### Build 34 | 35 | Build all packages: 36 | 37 | ``` 38 | cargo build 39 | ``` 40 | 41 | #### Test 42 | 43 | Test all: 44 | 45 | ``` 46 | cargo test --all --verbose -- --nocapture 47 | ``` 48 | 49 | Test a single module: 50 | 51 | ``` 52 | cargo test --package lofire --lib -- branch::test --nocapture 53 | ``` 54 | 55 | #### Documentation 56 | 57 | Generate documentation for all packages without their dependencies: 58 | 59 | ``` 60 | cargo doc --no-deps 61 | ``` 62 | 63 | The generated documentation can be found in `target/doc/`. 64 | 65 | #### Run 66 | 67 | Build & run executables: 68 | 69 | ``` 70 | cargo run --bin lofire-node 71 | cargo run --bin lofire-demo 72 | ``` 73 | 74 | ### Nix 75 | 76 | Install the [Nix package manager](https://nixos.org/download.html) 77 | and [Nix Flakes](https://nixos.wiki/wiki/Flakes) 78 | 79 | #### Development shell 80 | 81 | Get a development shell with all dependencies available: 82 | 83 | ``` 84 | nix develop 85 | cargo build 86 | ... 87 | ``` 88 | 89 | #### Build a package 90 | 91 | Build the default package (`.#lofire-node`): 92 | 93 | ``` 94 | nix build 95 | ``` 96 | 97 | Bulid a specific package: 98 | 99 | ``` 100 | nix build '.#lofire' 101 | ``` 102 | 103 | #### Run 104 | 105 | Run the default executable (`.#lofire-node`): 106 | 107 | ``` 108 | nix run 109 | ``` 110 | 111 | Run executables: 112 | 113 | ``` 114 | nix run '.#lofire-node' 115 | nix run '.#lofire-demo' 116 | ``` 117 | 118 | ## License 119 | 120 | Licensed under either of Apache License, Version 2.0 or MIT license at your option. 121 | 122 | Unless you explicitly stated otherwise, any contribution intentionally submitted 123 | for inclusion in this crate by you, as defined in the Apache-2.0 license, 124 | shall be dual licensed as above, without any additional terms or conditions. 125 | -------------------------------------------------------------------------------- /flake.lock: -------------------------------------------------------------------------------- 1 | { 2 | "nodes": { 3 | "flake-utils": { 4 | "locked": { 5 | "lastModified": 1656065134, 6 | "narHash": "sha256-oc6E6ByIw3oJaIyc67maaFcnjYOz1mMcOtHxbEf9NwQ=", 7 | "owner": "numtide", 8 | "repo": "flake-utils", 9 | "rev": "bee6a7250dd1b01844a2de7e02e4df7d8a0a206c", 10 | "type": "github" 11 | }, 12 | "original": { 13 | "owner": "numtide", 14 | "repo": "flake-utils", 15 | "type": "github" 16 | } 17 | }, 18 | "nixpkgs": { 19 | "locked": { 20 | "lastModified": 1657693803, 21 | "narHash": "sha256-G++2CJ9u0E7NNTAi9n5G8TdDmGJXcIjkJ3NF8cetQB8=", 22 | "owner": "nixos", 23 | "repo": "nixpkgs", 24 | "rev": "365e1b3a859281cf11b94f87231adeabbdd878a2", 25 | "type": "github" 26 | }, 27 | "original": { 28 | "owner": "nixos", 29 | "ref": "nixos-22.05", 30 | "repo": "nixpkgs", 31 | "type": "github" 32 | } 33 | }, 34 | "nixpkgs_2": { 35 | "locked": { 36 | "lastModified": 1656401090, 37 | "narHash": "sha256-bUS2nfQsvTQW2z8SK7oEFSElbmoBahOPtbXPm0AL3I4=", 38 | "owner": "NixOS", 39 | "repo": "nixpkgs", 40 | "rev": "16de63fcc54e88b9a106a603038dd5dd2feb21eb", 41 | "type": "github" 42 | }, 43 | "original": { 44 | "owner": "NixOS", 45 | "ref": "nixpkgs-unstable", 46 | "repo": "nixpkgs", 47 | "type": "github" 48 | } 49 | }, 50 | "root": { 51 | "inputs": { 52 | "nixpkgs": "nixpkgs", 53 | "rust-overlay": "rust-overlay", 54 | "utils": "utils" 55 | } 56 | }, 57 | "rust-overlay": { 58 | "inputs": { 59 | "flake-utils": "flake-utils", 60 | "nixpkgs": "nixpkgs_2" 61 | }, 62 | "locked": { 63 | "lastModified": 1657767064, 64 | "narHash": "sha256-Mp7LmSPnRfWqX7OElXr4HKNbTiDCXLaxijp23xQlDJk=", 65 | "owner": "oxalica", 66 | "repo": "rust-overlay", 67 | "rev": "db9443ca1384f94c0aa63f4e74115f7c0632a8e6", 68 | "type": "github" 69 | }, 70 | "original": { 71 | "owner": "oxalica", 72 | "repo": "rust-overlay", 73 | "type": "github" 74 | } 75 | }, 76 | "utils": { 77 | "locked": { 78 | "lastModified": 1656928814, 79 | "narHash": "sha256-RIFfgBuKz6Hp89yRr7+NR5tzIAbn52h8vT6vXkYjZoM=", 80 | "owner": "numtide", 81 | "repo": "flake-utils", 82 | "rev": "7e2a3b3dfd9af950a856d66b0a7d01e3c18aa249", 83 | "type": "github" 84 | }, 85 | "original": { 86 | "owner": "numtide", 87 | "repo": "flake-utils", 88 | "type": "github" 89 | } 90 | } 91 | }, 92 | "root": "root", 93 | "version": 7 94 | } 95 | -------------------------------------------------------------------------------- /flake.nix: -------------------------------------------------------------------------------- 1 | { 2 | description = "LoFiRe"; 3 | 4 | inputs.nixpkgs.url = "github:nixos/nixpkgs/nixos-22.05"; 5 | inputs.utils.url = "github:numtide/flake-utils"; 6 | inputs.rust-overlay.url = "github:oxalica/rust-overlay"; 7 | 8 | outputs = { 9 | self, 10 | nixpkgs, 11 | utils, 12 | rust-overlay, 13 | }: 14 | utils.lib.eachDefaultSystem (system: let 15 | overlays = [ 16 | (import rust-overlay) 17 | ]; 18 | pkgs = import nixpkgs rec { 19 | inherit system overlays; 20 | }; 21 | rust = pkgs.rust-bin.stable."1.62.0".default.override { 22 | extensions = ["rust-src"]; 23 | }; 24 | buildRustPackage = 25 | (pkgs.makeRustPlatform { 26 | cargo = rust; 27 | rustc = rust; 28 | }) 29 | .buildRustPackage; 30 | myNativeBuildInputs = with pkgs; 31 | [ 32 | pkgconfig 33 | ] 34 | ++ lib.optionals stdenv.isLinux 35 | (with pkgs; [ 36 | cargo-kcov 37 | ]); 38 | myBuildInputs = with pkgs; 39 | [ 40 | openssl 41 | ] 42 | ++ lib.optionals stdenv.isDarwin 43 | (with darwin.apple_sdk.frameworks; [ 44 | Security 45 | ]); 46 | myBuildRustPackage = attrs: 47 | buildRustPackage ({ 48 | version = "0.1.0"; 49 | src = ./.; 50 | cargoLock = { 51 | lockFile = ./Cargo.lock; 52 | outputHashes = { 53 | "lmdb-crypto-rs-0.14.0" = "sha256-cCTQuYxhiFHQZb+OW997sis50z5XrQk+KiTeaa2mlhk="; 54 | "rkv-0.18.0" = "sha256-G0E2qC4Uie4es91aNiVZeI50SLBUc0KQQq+t08kpRIc="; 55 | }; 56 | }; 57 | nativeBuildInputs = myNativeBuildInputs; 58 | buildInputs = myBuildInputs; 59 | RUST_BACKTRACE = 1; 60 | } 61 | // attrs); 62 | in rec { 63 | packages = rec { 64 | lofire = myBuildRustPackage rec { 65 | pname = "lofire"; 66 | buildAndTestSubdir = "./lofire"; 67 | }; 68 | lofire-broker = myBuildRustPackage rec { 69 | pname = "lofire-broker"; 70 | buildAndTestSubdir = "./lofire-broker"; 71 | }; 72 | lofire-p2p = myBuildRustPackage rec { 73 | pname = "lofire-p2p"; 74 | buildAndTestSubdir = "./lofire-p2p"; 75 | }; 76 | lofire-store-lmdb = myBuildRustPackage rec { 77 | pname = "lofire-store-lmdb"; 78 | buildAndTestSubdir = "./lofire-store-lmdb"; 79 | }; 80 | lofire-node = myBuildRustPackage rec { 81 | pname = "lofire-node"; 82 | buildAndTestSubdir = "./lofire-node"; 83 | }; 84 | lofire-demo = myBuildRustPackage rec { 85 | pname = "lofire-demo"; 86 | buildAndTestSubdir = "./lofire-demo"; 87 | }; 88 | default = lofire-node; 89 | }; 90 | 91 | apps = rec { 92 | lofire-node = utils.lib.mkApp { 93 | drv = packages.lofire-node; 94 | exePath = "/bin/lofire-node"; 95 | }; 96 | lofire-demo = utils.lib.mkApp { 97 | drv = packages.lofire-demo; 98 | exePath = "/bin/lofire-demo"; 99 | }; 100 | default = lofire-node; 101 | }; 102 | }); 103 | } 104 | -------------------------------------------------------------------------------- /lofire-broker/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "lofire-broker" 3 | version = "0.1.0" 4 | edition = "2021" 5 | 6 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 7 | 8 | [dependencies] 9 | debug_print = "1.0.0" 10 | lofire = { path = "../lofire" } 11 | lofire-net = { path = "../lofire-net" } 12 | lofire-store-lmdb = { path = "../lofire-store-lmdb" } 13 | blake3 = "1.3.1" 14 | chacha20 = "0.9.0" 15 | serde = { version = "1.0", features = ["derive"] } 16 | serde_bare = "0.5.0" 17 | serde_bytes = "0.11.7" 18 | xactor = "0.7.11" 19 | async-std = { version = "1.7.0", features = ["attributes"] } 20 | async-trait = "0.1.57" 21 | async-broadcast = "0.4.1" 22 | futures = "0.3.24" 23 | async-oneshot = "0.5.0" 24 | rust-fsm = "0.6.0" 25 | getrandom = "0.2.7" 26 | async-channel = "1.7.1" 27 | tempfile = "3" 28 | hex = "0.4.3" 29 | -------------------------------------------------------------------------------- /lofire-broker/src/account.rs: -------------------------------------------------------------------------------- 1 | //! User account 2 | 3 | use lofire::brokerstore::BrokerStore; 4 | use lofire::store::*; 5 | use lofire::types::*; 6 | use lofire_net::types::*; 7 | use serde_bare::to_vec; 8 | 9 | pub struct Account<'a> { 10 | /// User ID 11 | id: UserId, 12 | store: &'a dyn BrokerStore, 13 | } 14 | 15 | impl<'a> Account<'a> { 16 | const PREFIX: u8 = b"u"[0]; 17 | 18 | // propertie's suffixes 19 | const CLIENT: u8 = b"c"[0]; 20 | const ADMIN: u8 = b"a"[0]; 21 | const OVERLAY: u8 = b"o"[0]; 22 | 23 | const ALL_PROPERTIES: [u8; 3] = [Self::CLIENT, Self::ADMIN, Self::OVERLAY]; 24 | 25 | const SUFFIX_FOR_EXIST_CHECK: u8 = Self::ADMIN; 26 | 27 | pub fn open(id: &UserId, store: &'a dyn BrokerStore) -> Result, StorageError> { 28 | let opening = Account { 29 | id: id.clone(), 30 | store, 31 | }; 32 | if !opening.exists() { 33 | return Err(StorageError::NotFound); 34 | } 35 | Ok(opening) 36 | } 37 | pub fn create( 38 | id: &UserId, 39 | admin: bool, 40 | store: &'a dyn BrokerStore, 41 | ) -> Result, StorageError> { 42 | let acc = Account { 43 | id: id.clone(), 44 | store, 45 | }; 46 | if acc.exists() { 47 | return Err(StorageError::BackendError); 48 | } 49 | store.put( 50 | Self::PREFIX, 51 | &to_vec(&id)?, 52 | Some(Self::ADMIN), 53 | to_vec(&admin)?, 54 | )?; 55 | Ok(acc) 56 | } 57 | pub fn exists(&self) -> bool { 58 | self.store 59 | .get( 60 | Self::PREFIX, 61 | &to_vec(&self.id).unwrap(), 62 | Some(Self::SUFFIX_FOR_EXIST_CHECK), 63 | ) 64 | .is_ok() 65 | } 66 | pub fn id(&self) -> UserId { 67 | self.id 68 | } 69 | pub fn add_client(&self, client: &ClientId) -> Result<(), StorageError> { 70 | if !self.exists() { 71 | return Err(StorageError::BackendError); 72 | } 73 | self.store.put( 74 | Self::PREFIX, 75 | &to_vec(&self.id)?, 76 | Some(Self::CLIENT), 77 | to_vec(client)?, 78 | ) 79 | } 80 | pub fn remove_client(&self, client: &ClientId) -> Result<(), StorageError> { 81 | self.store.del_property_value( 82 | Self::PREFIX, 83 | &to_vec(&self.id)?, 84 | Some(Self::CLIENT), 85 | to_vec(client)?, 86 | ) 87 | } 88 | 89 | pub fn has_client(&self, client: &ClientId) -> Result<(), StorageError> { 90 | self.store.has_property_value( 91 | Self::PREFIX, 92 | &to_vec(&self.id)?, 93 | Some(Self::CLIENT), 94 | to_vec(client)?, 95 | ) 96 | } 97 | 98 | pub fn add_overlay(&self, overlay: &OverlayId) -> Result<(), StorageError> { 99 | if !self.exists() { 100 | return Err(StorageError::BackendError); 101 | } 102 | self.store.put( 103 | Self::PREFIX, 104 | &to_vec(&self.id)?, 105 | Some(Self::OVERLAY), 106 | to_vec(overlay)?, 107 | ) 108 | } 109 | pub fn remove_overlay(&self, overlay: &OverlayId) -> Result<(), StorageError> { 110 | self.store.del_property_value( 111 | Self::PREFIX, 112 | &to_vec(&self.id)?, 113 | Some(Self::OVERLAY), 114 | to_vec(overlay)?, 115 | ) 116 | } 117 | 118 | pub fn has_overlay(&self, overlay: &OverlayId) -> Result<(), StorageError> { 119 | self.store.has_property_value( 120 | Self::PREFIX, 121 | &to_vec(&self.id)?, 122 | Some(Self::OVERLAY), 123 | to_vec(overlay)?, 124 | ) 125 | } 126 | 127 | pub fn is_admin(&self) -> Result { 128 | if self 129 | .store 130 | .has_property_value( 131 | Self::PREFIX, 132 | &to_vec(&self.id)?, 133 | Some(Self::ADMIN), 134 | to_vec(&true)?, 135 | ) 136 | .is_ok() 137 | { 138 | return Ok(true); 139 | } 140 | Ok(false) 141 | } 142 | 143 | pub fn del(&self) -> Result<(), StorageError> { 144 | self.store 145 | .del_all(Self::PREFIX, &to_vec(&self.id)?, &Self::ALL_PROPERTIES) 146 | } 147 | } 148 | 149 | #[cfg(test)] 150 | mod test { 151 | 152 | use lofire::store::*; 153 | use lofire::types::*; 154 | use lofire::utils::*; 155 | use lofire_store_lmdb::brokerstore::LmdbBrokerStore; 156 | use std::fs; 157 | use tempfile::Builder; 158 | 159 | use crate::account::Account; 160 | 161 | #[test] 162 | pub fn test_account() { 163 | let path_str = "test-env"; 164 | let root = Builder::new().prefix(path_str).tempdir().unwrap(); 165 | let key: [u8; 32] = [0; 32]; 166 | fs::create_dir_all(root.path()).unwrap(); 167 | println!("{}", root.path().to_str().unwrap()); 168 | let mut store = LmdbBrokerStore::open(root.path(), key); 169 | 170 | let user_id = PubKey::Ed25519PubKey([1; 32]); 171 | 172 | let account = Account::create(&user_id, true, &store).unwrap(); 173 | println!("account created {}", account.id()); 174 | 175 | let account2 = Account::open(&user_id, &store).unwrap(); 176 | println!("account opened {}", account2.id()); 177 | 178 | let client_id = PubKey::Ed25519PubKey([56; 32]); 179 | let client_id_not_added = PubKey::Ed25519PubKey([57; 32]); 180 | 181 | account2.add_client(&client_id).unwrap(); 182 | 183 | assert!(account2.is_admin().unwrap()); 184 | 185 | account.has_client(&client_id).unwrap(); 186 | assert!(account.has_client(&client_id_not_added).is_err()); 187 | } 188 | } 189 | -------------------------------------------------------------------------------- /lofire-broker/src/auth.rs: -------------------------------------------------------------------------------- 1 | use std::pin::Pin; 2 | 3 | use debug_print::*; 4 | use futures::future::BoxFuture; 5 | use futures::future::OptionFuture; 6 | use futures::FutureExt; 7 | use lofire::types::*; 8 | use lofire::utils::*; 9 | use lofire_net::errors::*; 10 | use lofire_net::types::*; 11 | use rust_fsm::*; 12 | 13 | state_machine! { 14 | derive(Debug) 15 | AuthProtocolClient(Ready) 16 | 17 | Ready(ClientHelloSent) => ClientHelloSent, 18 | ClientHelloSent(ServerHelloReceived) => ServerHelloReceived, 19 | ServerHelloReceived(ClientAuthSent) => ClientAuthSent, 20 | ClientAuthSent(AuthResultReceived) => AuthResult, 21 | AuthResult => { 22 | Ok => BrokerProtocol, 23 | Error => Closed, 24 | }, 25 | } 26 | 27 | state_machine! { 28 | derive(Debug) 29 | AuthProtocolServer(Ready) 30 | 31 | Ready(ClientHelloReceived) => ClientHelloReceived, 32 | ClientHelloReceived(ServerHelloSent) => ServerHelloSent, 33 | ServerHelloSent(ClientAuthReceived) => ClientAuthReceived, 34 | ClientAuthReceived => { 35 | Ok => AuthResultOk, 36 | Error => AuthResultError, 37 | }, 38 | AuthResultOk(AuthResultSent) => BrokerProtocol, 39 | AuthResultError(AuthResultSent) => Closed, 40 | } 41 | 42 | pub struct AuthProtocolHandler { 43 | machine: StateMachine, 44 | nonce: Option>, 45 | user: Option, 46 | } 47 | 48 | impl AuthProtocolHandler { 49 | pub fn new() -> AuthProtocolHandler { 50 | AuthProtocolHandler { 51 | machine: StateMachine::new(), 52 | nonce: None, 53 | user: None, 54 | } 55 | } 56 | 57 | pub fn get_user(&self) -> Option { 58 | self.user 59 | } 60 | 61 | pub fn handle_init(&mut self, client_hello: ClientHello) -> Result, ProtocolError> { 62 | let _ = self 63 | .machine 64 | .consume(&AuthProtocolServerInput::ClientHelloReceived) 65 | .map_err(|_e| ProtocolError::InvalidState)?; 66 | 67 | let mut random_buf = [0u8; 32]; 68 | getrandom::getrandom(&mut random_buf).unwrap(); 69 | let nonce = random_buf.to_vec(); 70 | let reply = ServerHello::V0(ServerHelloV0 { 71 | nonce: nonce.clone(), 72 | }); 73 | self.nonce = Some(nonce); 74 | 75 | let _ = self 76 | .machine 77 | .consume(&AuthProtocolServerInput::ServerHelloSent) 78 | .map_err(|_e| ProtocolError::InvalidState)?; 79 | 80 | //debug_println!("sending nonce to client: {:?}", self.nonce); 81 | 82 | Ok(serde_bare::to_vec(&reply).unwrap()) 83 | } 84 | 85 | pub fn handle_incoming( 86 | &mut self, 87 | frame: Vec, 88 | ) -> ( 89 | Result, ProtocolError>, 90 | Pin>>>, 91 | ) { 92 | fn prepare_reply(res: Result, ProtocolError>) -> AuthResult { 93 | let (result, metadata) = match res { 94 | Ok(m) => (0, m), 95 | Err(e) => (e.into(), vec![]), 96 | }; 97 | AuthResult::V0(AuthResultV0 { result, metadata }) 98 | } 99 | 100 | fn process_state( 101 | handler: &mut AuthProtocolHandler, 102 | frame: Vec, 103 | ) -> Result, ProtocolError> { 104 | match handler.machine.state() { 105 | &AuthProtocolServerState::ServerHelloSent => { 106 | let message = serde_bare::from_slice::(&frame)?; 107 | let _ = handler 108 | .machine 109 | .consume(&AuthProtocolServerInput::ClientAuthReceived) 110 | .map_err(|_e| ProtocolError::InvalidState)?; 111 | 112 | // verifying client auth 113 | 114 | debug_println!("verifying client auth"); 115 | 116 | let _ = verify( 117 | &serde_bare::to_vec(&message.content_v0()).unwrap(), 118 | message.sig(), 119 | message.user(), 120 | ) 121 | .map_err(|_e| ProtocolError::AccessDenied)?; 122 | 123 | // debug_println!( 124 | // "matching nonce : {:?} {:?}", 125 | // message.nonce(), 126 | // handler.nonce.as_ref().unwrap() 127 | // ); 128 | 129 | if message.nonce() != handler.nonce.as_ref().unwrap() { 130 | let _ = handler 131 | .machine 132 | .consume(&AuthProtocolServerInput::Error) 133 | .map_err(|_e| ProtocolError::InvalidState); 134 | 135 | return Err(ProtocolError::AccessDenied); 136 | } 137 | 138 | // TODO check that the device has been registered for this user. if not, return AccessDenied 139 | 140 | // all is good, we advance the FSM and send back response 141 | let _ = handler 142 | .machine 143 | .consume(&AuthProtocolServerInput::Ok) 144 | .map_err(|_e| ProtocolError::InvalidState)?; 145 | 146 | handler.user = Some(message.user()); 147 | 148 | Ok(vec![]) // without any metadata 149 | } 150 | _ => Err(ProtocolError::InvalidState), 151 | } 152 | } 153 | 154 | let res = process_state(self, frame); 155 | let is_err = res.as_ref().err().cloned(); 156 | let reply = prepare_reply(res); 157 | let reply_ser: Result, ProtocolError> = Ok(serde_bare::to_vec(&reply).unwrap()); 158 | if is_err.is_some() { 159 | ( 160 | reply_ser, 161 | Box::pin(OptionFuture::from(Some( 162 | async move { reply.result() }.boxed(), 163 | ))), 164 | ) 165 | } else { 166 | (reply_ser, Box::pin(OptionFuture::from(None))) 167 | } 168 | } 169 | } 170 | -------------------------------------------------------------------------------- /lofire-broker/src/config.rs: -------------------------------------------------------------------------------- 1 | //! Broker Config, persisted to store 2 | 3 | use lofire::brokerstore::BrokerStore; 4 | use lofire::store::*; 5 | use lofire::types::*; 6 | use lofire_net::types::*; 7 | use serde::{Deserialize, Serialize}; 8 | use serde_bare::{from_slice, to_vec}; 9 | 10 | // TODO: versioning V0 11 | #[derive(Clone, Debug, Serialize, Deserialize, PartialEq)] 12 | pub enum ConfigMode { 13 | Local, 14 | Core, 15 | } 16 | 17 | pub struct Config<'a> { 18 | store: &'a dyn BrokerStore, 19 | } 20 | 21 | impl<'a> Config<'a> { 22 | const PREFIX: u8 = b"c"[0]; 23 | 24 | const KEY: [u8; 5] = *b"onfig"; 25 | 26 | // propertie's suffixes 27 | const MODE: u8 = b"m"[0]; 28 | 29 | const ALL_PROPERTIES: [u8; 1] = [Self::MODE]; 30 | 31 | const SUFFIX_FOR_EXIST_CHECK: u8 = Self::MODE; 32 | 33 | pub fn open(store: &'a dyn BrokerStore) -> Result, StorageError> { 34 | let opening = Config { store }; 35 | if !opening.exists() { 36 | return Err(StorageError::NotFound); 37 | } 38 | Ok(opening) 39 | } 40 | pub fn get_or_create( 41 | mode: &ConfigMode, 42 | store: &'a dyn BrokerStore, 43 | ) -> Result, StorageError> { 44 | match Self::open(store) { 45 | Err(e) => { 46 | if e == StorageError::NotFound { 47 | Self::create(mode, store) 48 | } else { 49 | Err(StorageError::BackendError) 50 | } 51 | } 52 | Ok(p) => { 53 | if &p.mode().unwrap() != mode { 54 | return Err(StorageError::InvalidValue); 55 | } 56 | Ok(p) 57 | } 58 | } 59 | } 60 | pub fn create( 61 | mode: &ConfigMode, 62 | store: &'a dyn BrokerStore, 63 | ) -> Result, StorageError> { 64 | let acc = Config { store }; 65 | if acc.exists() { 66 | return Err(StorageError::BackendError); 67 | } 68 | store.put( 69 | Self::PREFIX, 70 | &to_vec(&Self::KEY)?, 71 | Some(Self::MODE), 72 | to_vec(&mode)?, 73 | )?; 74 | Ok(acc) 75 | } 76 | pub fn exists(&self) -> bool { 77 | self.store 78 | .get( 79 | Self::PREFIX, 80 | &to_vec(&Self::KEY).unwrap(), 81 | Some(Self::SUFFIX_FOR_EXIST_CHECK), 82 | ) 83 | .is_ok() 84 | } 85 | pub fn mode(&self) -> Result { 86 | match self 87 | .store 88 | .get(Self::PREFIX, &to_vec(&Self::KEY)?, Some(Self::MODE)) 89 | { 90 | Ok(ver) => Ok(from_slice::(&ver)?), 91 | Err(e) => Err(e), 92 | } 93 | } 94 | } 95 | -------------------------------------------------------------------------------- /lofire-broker/src/lib.rs: -------------------------------------------------------------------------------- 1 | #[macro_export] 2 | macro_rules! before { 3 | ( $self:expr, $request_id:ident, $addr:ident, $receiver:ident ) => { 4 | let mut actor = BrokerMessageActor::new(); 5 | let $receiver = actor.receiver(); 6 | let mut $addr = actor 7 | .start() 8 | .await 9 | .map_err(|_e| ProtocolError::ActorError)?; 10 | 11 | let $request_id = $addr.actor_id(); 12 | //debug_println!("actor ID {}", $request_id); 13 | 14 | { 15 | let mut map = $self.actors.write().expect("RwLock poisoned"); 16 | map.insert($request_id, $addr.downgrade()); 17 | } 18 | }; 19 | } 20 | 21 | macro_rules! after { 22 | ( $self:expr, $request_id:ident, $addr:ident, $receiver:ident, $reply:ident ) => { 23 | //debug_println!("waiting for reply"); 24 | 25 | $addr.wait_for_stop().await; // TODO add timeout and close connection if there's no reply 26 | let r = $receiver.await; 27 | if r.is_err() { return Err(ProtocolError::Closing);} 28 | let $reply = r.unwrap(); 29 | //debug_println!("reply arrived {:?}", $reply); 30 | { 31 | let mut map = $self.actors.write().expect("RwLock poisoned"); 32 | map.remove(&$request_id); 33 | } 34 | }; 35 | } 36 | 37 | pub mod account; 38 | 39 | pub mod overlay; 40 | 41 | pub mod peer; 42 | 43 | pub mod topic; 44 | 45 | pub mod connection; 46 | 47 | pub mod server; 48 | 49 | pub mod config; 50 | 51 | pub mod repostoreinfo; 52 | 53 | pub mod auth; 54 | -------------------------------------------------------------------------------- /lofire-broker/src/overlay.rs: -------------------------------------------------------------------------------- 1 | //! Overlay 2 | 3 | use lofire::brokerstore::BrokerStore; 4 | use lofire::store::*; 5 | use lofire::types::*; 6 | use lofire::utils::now_timestamp; 7 | use lofire_net::types::*; 8 | use serde::{Deserialize, Serialize}; 9 | use serde_bare::{from_slice, to_vec}; 10 | 11 | // TODO: versioning V0 12 | #[derive(Clone, Debug, Serialize, Deserialize, PartialEq)] 13 | pub struct OverlayMeta { 14 | pub users: u32, 15 | pub last_used: Timestamp, 16 | } 17 | 18 | pub struct Overlay<'a> { 19 | /// Overlay ID 20 | id: OverlayId, 21 | store: &'a dyn BrokerStore, 22 | } 23 | 24 | impl<'a> Overlay<'a> { 25 | const PREFIX: u8 = b"o"[0]; 26 | 27 | // propertie's suffixes 28 | const SECRET: u8 = b"s"[0]; 29 | const PEER: u8 = b"p"[0]; 30 | const TOPIC: u8 = b"t"[0]; 31 | const META: u8 = b"m"[0]; 32 | const REPO: u8 = b"r"[0]; 33 | 34 | const ALL_PROPERTIES: [u8; 5] = [ 35 | Self::SECRET, 36 | Self::PEER, 37 | Self::TOPIC, 38 | Self::META, 39 | Self::REPO, 40 | ]; 41 | 42 | const SUFFIX_FOR_EXIST_CHECK: u8 = Self::SECRET; 43 | 44 | pub fn open(id: &OverlayId, store: &'a dyn BrokerStore) -> Result, StorageError> { 45 | let opening = Overlay { 46 | id: id.clone(), 47 | store, 48 | }; 49 | if !opening.exists() { 50 | return Err(StorageError::NotFound); 51 | } 52 | Ok(opening) 53 | } 54 | pub fn create( 55 | id: &OverlayId, 56 | secret: &SymKey, 57 | repo: Option, 58 | store: &'a dyn BrokerStore, 59 | ) -> Result, StorageError> { 60 | let acc = Overlay { 61 | id: id.clone(), 62 | store, 63 | }; 64 | if acc.exists() { 65 | return Err(StorageError::BackendError); 66 | } 67 | store.put( 68 | Self::PREFIX, 69 | &to_vec(&id)?, 70 | Some(Self::SECRET), 71 | to_vec(&secret)?, 72 | )?; 73 | if repo.is_some() { 74 | store.put( 75 | Self::PREFIX, 76 | &to_vec(&id)?, 77 | Some(Self::REPO), 78 | to_vec(&repo.unwrap())?, 79 | )?; 80 | //TODO if failure, should remove the previously added SECRET property 81 | } 82 | let meta = OverlayMeta { 83 | users: 1, 84 | last_used: now_timestamp(), 85 | }; 86 | store.put( 87 | Self::PREFIX, 88 | &to_vec(&id)?, 89 | Some(Self::META), 90 | to_vec(&meta)?, 91 | )?; 92 | //TODO if failure, should remove the previously added SECRET and REPO properties 93 | Ok(acc) 94 | } 95 | pub fn exists(&self) -> bool { 96 | self.store 97 | .get( 98 | Self::PREFIX, 99 | &to_vec(&self.id).unwrap(), 100 | Some(Self::SUFFIX_FOR_EXIST_CHECK), 101 | ) 102 | .is_ok() 103 | } 104 | pub fn id(&self) -> OverlayId { 105 | self.id 106 | } 107 | pub fn add_peer(&self, peer: &PeerId) -> Result<(), StorageError> { 108 | if !self.exists() { 109 | return Err(StorageError::BackendError); 110 | } 111 | self.store.put( 112 | Self::PREFIX, 113 | &to_vec(&self.id)?, 114 | Some(Self::PEER), 115 | to_vec(peer)?, 116 | ) 117 | } 118 | pub fn remove_peer(&self, peer: &PeerId) -> Result<(), StorageError> { 119 | self.store.del_property_value( 120 | Self::PREFIX, 121 | &to_vec(&self.id)?, 122 | Some(Self::PEER), 123 | to_vec(peer)?, 124 | ) 125 | } 126 | 127 | pub fn has_peer(&self, peer: &PeerId) -> Result<(), StorageError> { 128 | self.store.has_property_value( 129 | Self::PREFIX, 130 | &to_vec(&self.id)?, 131 | Some(Self::PEER), 132 | to_vec(peer)?, 133 | ) 134 | } 135 | 136 | pub fn add_topic(&self, topic: &TopicId) -> Result<(), StorageError> { 137 | if !self.exists() { 138 | return Err(StorageError::BackendError); 139 | } 140 | self.store.put( 141 | Self::PREFIX, 142 | &to_vec(&self.id)?, 143 | Some(Self::TOPIC), 144 | to_vec(topic)?, 145 | ) 146 | } 147 | pub fn remove_topic(&self, topic: &TopicId) -> Result<(), StorageError> { 148 | self.store.del_property_value( 149 | Self::PREFIX, 150 | &to_vec(&self.id)?, 151 | Some(Self::TOPIC), 152 | to_vec(topic)?, 153 | ) 154 | } 155 | 156 | pub fn has_topic(&self, topic: &TopicId) -> Result<(), StorageError> { 157 | self.store.has_property_value( 158 | Self::PREFIX, 159 | &to_vec(&self.id)?, 160 | Some(Self::TOPIC), 161 | to_vec(topic)?, 162 | ) 163 | } 164 | 165 | pub fn secret(&self) -> Result { 166 | match self 167 | .store 168 | .get(Self::PREFIX, &to_vec(&self.id)?, Some(Self::SECRET)) 169 | { 170 | Ok(secret) => Ok(from_slice::(&secret)?), 171 | Err(e) => Err(e), 172 | } 173 | } 174 | 175 | pub fn metadata(&self) -> Result { 176 | match self 177 | .store 178 | .get(Self::PREFIX, &to_vec(&self.id)?, Some(Self::META)) 179 | { 180 | Ok(meta) => Ok(from_slice::(&meta)?), 181 | Err(e) => Err(e), 182 | } 183 | } 184 | pub fn set_metadata(&self, meta: &OverlayMeta) -> Result<(), StorageError> { 185 | if !self.exists() { 186 | return Err(StorageError::BackendError); 187 | } 188 | self.store.replace( 189 | Self::PREFIX, 190 | &to_vec(&self.id)?, 191 | Some(Self::META), 192 | to_vec(meta)?, 193 | ) 194 | } 195 | 196 | pub fn repo(&self) -> Result { 197 | match self 198 | .store 199 | .get(Self::PREFIX, &to_vec(&self.id)?, Some(Self::REPO)) 200 | { 201 | Ok(repo) => Ok(from_slice::(&repo)?), 202 | Err(e) => Err(e), 203 | } 204 | } 205 | 206 | pub fn del(&self) -> Result<(), StorageError> { 207 | self.store 208 | .del_all(Self::PREFIX, &to_vec(&self.id)?, &Self::ALL_PROPERTIES) 209 | } 210 | } 211 | -------------------------------------------------------------------------------- /lofire-broker/src/peer.rs: -------------------------------------------------------------------------------- 1 | //! Peer 2 | 3 | use lofire::brokerstore::BrokerStore; 4 | use lofire::store::*; 5 | use lofire::types::*; 6 | use lofire_net::types::*; 7 | use serde::{Deserialize, Serialize}; 8 | use serde_bare::{from_slice, to_vec}; 9 | 10 | pub struct Peer<'a> { 11 | /// Topic ID 12 | id: PeerId, 13 | store: &'a dyn BrokerStore, 14 | } 15 | 16 | impl<'a> Peer<'a> { 17 | const PREFIX: u8 = b"p"[0]; 18 | 19 | // propertie's suffixes 20 | const VERSION: u8 = b"v"[0]; 21 | const ADVERT: u8 = b"a"[0]; 22 | 23 | const ALL_PROPERTIES: [u8; 2] = [Self::VERSION, Self::ADVERT]; 24 | 25 | const SUFFIX_FOR_EXIST_CHECK: u8 = Self::VERSION; 26 | 27 | pub fn open(id: &PeerId, store: &'a dyn BrokerStore) -> Result, StorageError> { 28 | let opening = Peer { 29 | id: id.clone(), 30 | store, 31 | }; 32 | if !opening.exists() { 33 | return Err(StorageError::NotFound); 34 | } 35 | Ok(opening) 36 | } 37 | pub fn update_or_create( 38 | advert: &PeerAdvert, 39 | store: &'a dyn BrokerStore, 40 | ) -> Result, StorageError> { 41 | let id = advert.peer(); 42 | match Self::open(id, store) { 43 | Err(e) => { 44 | if e == StorageError::NotFound { 45 | Self::create(advert, store) 46 | } else { 47 | Err(StorageError::BackendError) 48 | } 49 | } 50 | Ok(p) => { 51 | p.update_advert(advert)?; 52 | Ok(p) 53 | } 54 | } 55 | } 56 | pub fn create( 57 | advert: &PeerAdvert, 58 | store: &'a dyn BrokerStore, 59 | ) -> Result, StorageError> { 60 | let id = advert.peer(); 61 | let acc = Peer { 62 | id: id.clone(), 63 | store, 64 | }; 65 | if acc.exists() { 66 | return Err(StorageError::BackendError); 67 | } 68 | store.put( 69 | Self::PREFIX, 70 | &to_vec(&id)?, 71 | Some(Self::VERSION), 72 | to_vec(&advert.version())?, 73 | )?; 74 | store.put( 75 | Self::PREFIX, 76 | &to_vec(&id)?, 77 | Some(Self::ADVERT), 78 | to_vec(&advert)?, 79 | )?; 80 | Ok(acc) 81 | } 82 | pub fn exists(&self) -> bool { 83 | self.store 84 | .get( 85 | Self::PREFIX, 86 | &to_vec(&self.id).unwrap(), 87 | Some(Self::SUFFIX_FOR_EXIST_CHECK), 88 | ) 89 | .is_ok() 90 | } 91 | pub fn id(&self) -> PeerId { 92 | self.id 93 | } 94 | pub fn version(&self) -> Result { 95 | match self 96 | .store 97 | .get(Self::PREFIX, &to_vec(&self.id)?, Some(Self::VERSION)) 98 | { 99 | Ok(ver) => Ok(from_slice::(&ver)?), 100 | Err(e) => Err(e), 101 | } 102 | } 103 | pub fn set_version(&self, version: u32) -> Result<(), StorageError> { 104 | if !self.exists() { 105 | return Err(StorageError::BackendError); 106 | } 107 | self.store.replace( 108 | Self::PREFIX, 109 | &to_vec(&self.id)?, 110 | Some(Self::VERSION), 111 | to_vec(&version)?, 112 | ) 113 | } 114 | pub fn update_advert(&self, advert: &PeerAdvert) -> Result<(), StorageError> { 115 | if advert.peer() != &self.id { 116 | return Err(StorageError::InvalidValue); 117 | } 118 | let current_advert = self.advert().map_err(|e| StorageError::BackendError)?; 119 | if current_advert.version() >= advert.version() { 120 | return Ok(()); 121 | } 122 | self.store.replace( 123 | Self::PREFIX, 124 | &to_vec(&self.id)?, 125 | Some(Self::ADVERT), 126 | to_vec(advert)?, 127 | ) 128 | } 129 | pub fn advert(&self) -> Result { 130 | match self 131 | .store 132 | .get(Self::PREFIX, &to_vec(&self.id)?, Some(Self::ADVERT)) 133 | { 134 | Ok(advert) => Ok(from_slice::(&advert)?), 135 | Err(e) => Err(e), 136 | } 137 | } 138 | pub fn set_advert(&self, advert: &PeerAdvert) -> Result<(), StorageError> { 139 | if !self.exists() { 140 | return Err(StorageError::BackendError); 141 | } 142 | self.store.replace( 143 | Self::PREFIX, 144 | &to_vec(&self.id)?, 145 | Some(Self::ADVERT), 146 | to_vec(advert)?, 147 | ) 148 | } 149 | 150 | pub fn del(&self) -> Result<(), StorageError> { 151 | self.store 152 | .del_all(Self::PREFIX, &to_vec(&self.id)?, &Self::ALL_PROPERTIES) 153 | } 154 | } 155 | -------------------------------------------------------------------------------- /lofire-broker/src/repostoreinfo.rs: -------------------------------------------------------------------------------- 1 | //! RepoStore information about each RepoStore 2 | //! It contains the symKeys to open the RepoStores 3 | //! A repoStore is identified by its repo pubkey if in local mode 4 | //! In core mode, it is identified by the overlayid. 5 | 6 | use lofire::brokerstore::BrokerStore; 7 | use lofire::store::*; 8 | use lofire::types::*; 9 | use lofire_net::types::*; 10 | use serde::{Deserialize, Serialize}; 11 | use serde_bare::{from_slice, to_vec}; 12 | 13 | #[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq, Hash)] 14 | pub enum RepoStoreId { 15 | Overlay(OverlayId), 16 | Repo(PubKey), 17 | } 18 | 19 | impl From for String { 20 | fn from(id: RepoStoreId) -> Self { 21 | hex::encode(to_vec(&id).unwrap()) 22 | } 23 | } 24 | 25 | pub struct RepoStoreInfo<'a> { 26 | /// RepoStore ID 27 | id: RepoStoreId, 28 | store: &'a dyn BrokerStore, 29 | } 30 | 31 | impl<'a> RepoStoreInfo<'a> { 32 | const PREFIX: u8 = b"r"[0]; 33 | 34 | // propertie's suffixes 35 | const KEY: u8 = b"k"[0]; 36 | 37 | const ALL_PROPERTIES: [u8; 1] = [Self::KEY]; 38 | 39 | const SUFFIX_FOR_EXIST_CHECK: u8 = Self::KEY; 40 | 41 | pub fn open( 42 | id: &RepoStoreId, 43 | store: &'a dyn BrokerStore, 44 | ) -> Result, StorageError> { 45 | let opening = RepoStoreInfo { 46 | id: id.clone(), 47 | store, 48 | }; 49 | if !opening.exists() { 50 | return Err(StorageError::NotFound); 51 | } 52 | Ok(opening) 53 | } 54 | pub fn create( 55 | id: &RepoStoreId, 56 | key: &SymKey, 57 | store: &'a dyn BrokerStore, 58 | ) -> Result, StorageError> { 59 | let acc = RepoStoreInfo { 60 | id: id.clone(), 61 | store, 62 | }; 63 | if acc.exists() { 64 | return Err(StorageError::BackendError); 65 | } 66 | store.put(Self::PREFIX, &to_vec(&id)?, Some(Self::KEY), to_vec(key)?)?; 67 | Ok(acc) 68 | } 69 | pub fn exists(&self) -> bool { 70 | self.store 71 | .get( 72 | Self::PREFIX, 73 | &to_vec(&self.id).unwrap(), 74 | Some(Self::SUFFIX_FOR_EXIST_CHECK), 75 | ) 76 | .is_ok() 77 | } 78 | pub fn id(&self) -> &RepoStoreId { 79 | &self.id 80 | } 81 | pub fn key(&self) -> Result { 82 | match self 83 | .store 84 | .get(Self::PREFIX, &to_vec(&self.id)?, Some(Self::KEY)) 85 | { 86 | Ok(k) => Ok(from_slice::(&k)?), 87 | Err(e) => Err(e), 88 | } 89 | } 90 | pub fn del(&self) -> Result<(), StorageError> { 91 | self.store 92 | .del_all(Self::PREFIX, &to_vec(&self.id)?, &Self::ALL_PROPERTIES) 93 | } 94 | } 95 | -------------------------------------------------------------------------------- /lofire-broker/src/topic.rs: -------------------------------------------------------------------------------- 1 | //! Topic 2 | 3 | use lofire::brokerstore::BrokerStore; 4 | use lofire::store::*; 5 | use lofire::types::*; 6 | use lofire_net::types::*; 7 | use serde::{Deserialize, Serialize}; 8 | use serde_bare::{from_slice, to_vec}; 9 | 10 | // TODO: versioning V0 11 | #[derive(Clone, Debug, Serialize, Deserialize, PartialEq)] 12 | pub struct TopicMeta { 13 | pub users: u32, 14 | } 15 | 16 | pub struct Topic<'a> { 17 | /// Topic ID 18 | id: TopicId, 19 | store: &'a dyn BrokerStore, 20 | } 21 | 22 | impl<'a> Topic<'a> { 23 | const PREFIX: u8 = b"t"[0]; 24 | 25 | // propertie's suffixes 26 | const ADVERT: u8 = b"a"[0]; 27 | const HEAD: u8 = b"h"[0]; 28 | const META: u8 = b"m"[0]; 29 | 30 | const ALL_PROPERTIES: [u8; 3] = [Self::ADVERT, Self::HEAD, Self::META]; 31 | 32 | const SUFFIX_FOR_EXIST_CHECK: u8 = Self::META; 33 | 34 | pub fn open(id: &TopicId, store: &'a dyn BrokerStore) -> Result, StorageError> { 35 | let opening = Topic { 36 | id: id.clone(), 37 | store, 38 | }; 39 | if !opening.exists() { 40 | return Err(StorageError::NotFound); 41 | } 42 | Ok(opening) 43 | } 44 | pub fn create(id: &TopicId, store: &'a dyn BrokerStore) -> Result, StorageError> { 45 | let acc = Topic { 46 | id: id.clone(), 47 | store, 48 | }; 49 | if acc.exists() { 50 | return Err(StorageError::BackendError); 51 | } 52 | let meta = TopicMeta { users: 0 }; 53 | store.put( 54 | Self::PREFIX, 55 | &to_vec(&id)?, 56 | Some(Self::META), 57 | to_vec(&meta)?, 58 | )?; 59 | Ok(acc) 60 | } 61 | pub fn exists(&self) -> bool { 62 | self.store 63 | .get( 64 | Self::PREFIX, 65 | &to_vec(&self.id).unwrap(), 66 | Some(Self::SUFFIX_FOR_EXIST_CHECK), 67 | ) 68 | .is_ok() 69 | } 70 | pub fn id(&self) -> TopicId { 71 | self.id 72 | } 73 | pub fn add_head(&self, head: &ObjectId) -> Result<(), StorageError> { 74 | if !self.exists() { 75 | return Err(StorageError::BackendError); 76 | } 77 | self.store.put( 78 | Self::PREFIX, 79 | &to_vec(&self.id)?, 80 | Some(Self::HEAD), 81 | to_vec(head)?, 82 | ) 83 | } 84 | pub fn remove_head(&self, head: &ObjectId) -> Result<(), StorageError> { 85 | self.store.del_property_value( 86 | Self::PREFIX, 87 | &to_vec(&self.id)?, 88 | Some(Self::HEAD), 89 | to_vec(head)?, 90 | ) 91 | } 92 | 93 | pub fn has_head(&self, head: &ObjectId) -> Result<(), StorageError> { 94 | self.store.has_property_value( 95 | Self::PREFIX, 96 | &to_vec(&self.id)?, 97 | Some(Self::HEAD), 98 | to_vec(head)?, 99 | ) 100 | } 101 | 102 | pub fn metadata(&self) -> Result { 103 | match self 104 | .store 105 | .get(Self::PREFIX, &to_vec(&self.id)?, Some(Self::META)) 106 | { 107 | Ok(meta) => Ok(from_slice::(&meta)?), 108 | Err(e) => Err(e), 109 | } 110 | } 111 | pub fn set_metadata(&self, meta: &TopicMeta) -> Result<(), StorageError> { 112 | if !self.exists() { 113 | return Err(StorageError::BackendError); 114 | } 115 | self.store.replace( 116 | Self::PREFIX, 117 | &to_vec(&self.id)?, 118 | Some(Self::META), 119 | to_vec(meta)?, 120 | ) 121 | } 122 | 123 | pub fn del(&self) -> Result<(), StorageError> { 124 | self.store 125 | .del_all(Self::PREFIX, &to_vec(&self.id)?, &Self::ALL_PROPERTIES) 126 | } 127 | } 128 | -------------------------------------------------------------------------------- /lofire-demo/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "lofire-demo" 3 | version = "0.1.0" 4 | edition = "2021" 5 | 6 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 7 | 8 | [dependencies] 9 | debug_print = "1.0.0" 10 | lofire = { path = "../lofire" } 11 | lofire-net = { path = "../lofire-net" } 12 | lofire-broker = { path = "../lofire-broker" } 13 | lofire-store-lmdb = { path = "../lofire-store-lmdb" } 14 | async-std = { version = "1.7.0", features = ["attributes"] } 15 | async-tungstenite = { version = "0.17.2", features = ["async-std-runtime","async-native-tls"] } 16 | futures = "0.3.24" 17 | xactor = "0.7.11" 18 | tempfile = "3" 19 | fastbloom-rs = "0.3.1" 20 | rand = "0.7" 21 | ed25519-dalek = "1.0.1" 22 | assert_cmd = "2.0.5" 23 | -------------------------------------------------------------------------------- /lofire-demo/src/main.rs: -------------------------------------------------------------------------------- 1 | use async_tungstenite::async_std::connect_async; 2 | use async_tungstenite::client_async; 3 | use async_tungstenite::tungstenite::{Error, Message}; 4 | use debug_print::*; 5 | use ed25519_dalek::*; 6 | use fastbloom_rs::{BloomFilter as Filter, FilterBuilder, Membership}; 7 | use futures::{future, pin_mut, stream, SinkExt, StreamExt}; 8 | use lofire::object::Object; 9 | use lofire::store::{store_max_value_size, store_valid_value_size, HashMapRepoStore, RepoStore}; 10 | use lofire_broker::config::ConfigMode; 11 | use lofire_store_lmdb::brokerstore::LmdbBrokerStore; 12 | use lofire_store_lmdb::repostore::LmdbRepoStore; 13 | use rand::rngs::OsRng; 14 | use std::collections::HashMap; 15 | 16 | use lofire::types::*; 17 | use lofire::utils::{generate_keypair, now_timestamp}; 18 | use lofire_broker::connection::*; 19 | use lofire_broker::server::*; 20 | use lofire_net::errors::*; 21 | use lofire_net::types::*; 22 | 23 | fn block_size() -> usize { 24 | store_max_value_size() 25 | //store_valid_value_size(0) 26 | } 27 | 28 | async fn test_sync(cnx: &mut impl BrokerConnection, user_pub_key: PubKey, userpriv_key: PrivKey) { 29 | fn add_obj( 30 | content: ObjectContent, 31 | deps: Vec, 32 | expiry: Option, 33 | repo_pubkey: PubKey, 34 | repo_secret: SymKey, 35 | store: &mut impl RepoStore, 36 | ) -> ObjectRef { 37 | let max_object_size = 4000; 38 | let obj = Object::new( 39 | content, 40 | deps, 41 | expiry, 42 | max_object_size, 43 | repo_pubkey, 44 | repo_secret, 45 | ); 46 | //println!(">>> add_obj"); 47 | println!(" id: {}", obj.id()); 48 | //println!(" deps: {:?}", obj.deps()); 49 | obj.save(store).unwrap(); 50 | obj.reference().unwrap() 51 | } 52 | 53 | fn add_commit( 54 | branch: ObjectRef, 55 | author_privkey: PrivKey, 56 | author_pubkey: PubKey, 57 | seq: u32, 58 | deps: Vec, 59 | acks: Vec, 60 | body_ref: ObjectRef, 61 | repo_pubkey: PubKey, 62 | repo_secret: SymKey, 63 | store: &mut impl RepoStore, 64 | ) -> ObjectRef { 65 | let mut obj_deps: Vec = vec![]; 66 | obj_deps.extend(deps.iter().map(|r| r.id)); 67 | obj_deps.extend(acks.iter().map(|r| r.id)); 68 | 69 | let obj_ref = ObjectRef { 70 | id: ObjectId::Blake3Digest32([1; 32]), 71 | key: SymKey::ChaCha20Key([2; 32]), 72 | }; 73 | let refs = vec![obj_ref]; 74 | let metadata = vec![5u8; 55]; 75 | let expiry = None; 76 | 77 | let commit = Commit::new( 78 | author_privkey, 79 | author_pubkey, 80 | seq, 81 | branch, 82 | deps, 83 | acks, 84 | refs, 85 | metadata, 86 | body_ref, 87 | expiry, 88 | ) 89 | .unwrap(); 90 | //println!("commit: {}", commit.id().unwrap()); 91 | add_obj( 92 | ObjectContent::Commit(commit), 93 | obj_deps, 94 | expiry, 95 | repo_pubkey, 96 | repo_secret, 97 | store, 98 | ) 99 | } 100 | 101 | fn add_body_branch( 102 | branch: Branch, 103 | repo_pubkey: PubKey, 104 | repo_secret: SymKey, 105 | store: &mut impl RepoStore, 106 | ) -> ObjectRef { 107 | let deps = vec![]; 108 | let expiry = None; 109 | let body = CommitBody::Branch(branch); 110 | //println!("body: {:?}", body); 111 | add_obj( 112 | ObjectContent::CommitBody(body), 113 | deps, 114 | expiry, 115 | repo_pubkey, 116 | repo_secret, 117 | store, 118 | ) 119 | } 120 | 121 | fn add_body_trans( 122 | deps: Vec, 123 | repo_pubkey: PubKey, 124 | repo_secret: SymKey, 125 | store: &mut impl RepoStore, 126 | ) -> ObjectRef { 127 | let expiry = None; 128 | let content = [7u8; 777].to_vec(); 129 | let body = CommitBody::Transaction(Transaction::V0(content)); 130 | //println!("body: {:?}", body); 131 | add_obj( 132 | ObjectContent::CommitBody(body), 133 | deps, 134 | expiry, 135 | repo_pubkey, 136 | repo_secret, 137 | store, 138 | ) 139 | } 140 | 141 | fn add_body_ack( 142 | deps: Vec, 143 | repo_pubkey: PubKey, 144 | repo_secret: SymKey, 145 | store: &mut impl RepoStore, 146 | ) -> ObjectRef { 147 | let expiry = None; 148 | let body = CommitBody::Ack(Ack::V0()); 149 | //println!("body: {:?}", body); 150 | add_obj( 151 | ObjectContent::CommitBody(body), 152 | deps, 153 | expiry, 154 | repo_pubkey, 155 | repo_secret, 156 | store, 157 | ) 158 | } 159 | 160 | let mut store = HashMapRepoStore::new(); 161 | let mut rng = OsRng {}; 162 | 163 | // repo 164 | 165 | let repo_keypair: Keypair = Keypair::generate(&mut rng); 166 | // println!( 167 | // "repo private key: ({}) {:?}", 168 | // repo_keypair.secret.as_bytes().len(), 169 | // repo_keypair.secret.as_bytes() 170 | // ); 171 | // println!( 172 | // "repo public key: ({}) {:?}", 173 | // repo_keypair.public.as_bytes().len(), 174 | // repo_keypair.public.as_bytes() 175 | // ); 176 | let _repo_privkey = PrivKey::Ed25519PrivKey(repo_keypair.secret.to_bytes()); 177 | let repo_pubkey = PubKey::Ed25519PubKey(repo_keypair.public.to_bytes()); 178 | let repo_secret = SymKey::ChaCha20Key([9; 32]); 179 | 180 | let repolink = RepoLink::V0(RepoLinkV0 { 181 | id: repo_pubkey, 182 | secret: repo_secret, 183 | peers: vec![], 184 | }); 185 | 186 | // branch 187 | 188 | let branch_keypair: Keypair = Keypair::generate(&mut rng); 189 | //println!("branch public key: {:?}", branch_keypair.public.as_bytes()); 190 | let branch_pubkey = PubKey::Ed25519PubKey(branch_keypair.public.to_bytes()); 191 | 192 | let member_keypair: Keypair = Keypair::generate(&mut rng); 193 | //println!("member public key: {:?}", member_keypair.public.as_bytes()); 194 | let member_privkey = PrivKey::Ed25519PrivKey(member_keypair.secret.to_bytes()); 195 | let member_pubkey = PubKey::Ed25519PubKey(member_keypair.public.to_bytes()); 196 | 197 | let metadata = [66u8; 64].to_vec(); 198 | let commit_types = vec![CommitType::Ack, CommitType::Transaction]; 199 | let secret = SymKey::ChaCha20Key([0; 32]); 200 | 201 | let member = MemberV0::new(member_pubkey, commit_types, metadata.clone()); 202 | let members = vec![member]; 203 | let mut quorum = HashMap::new(); 204 | quorum.insert(CommitType::Transaction, 3); 205 | let ack_delay = RelTime::Minutes(3); 206 | let tags = [99u8; 32].to_vec(); 207 | let branch = Branch::new( 208 | branch_pubkey, 209 | branch_pubkey, 210 | secret, 211 | members, 212 | quorum, 213 | ack_delay, 214 | tags, 215 | metadata, 216 | ); 217 | //println!("branch: {:?}", branch); 218 | 219 | println!("branch deps/acks:"); 220 | println!(""); 221 | println!(" br"); 222 | println!(" / \\"); 223 | println!(" t1 t2"); 224 | println!(" / \\ / \\"); 225 | println!(" a3 t4<--t5-->(t1)"); 226 | println!(" / \\"); 227 | println!(" a6 a7"); 228 | println!(""); 229 | 230 | // commit bodies 231 | 232 | let branch_body = add_body_branch( 233 | branch.clone(), 234 | repo_pubkey.clone(), 235 | repo_secret.clone(), 236 | &mut store, 237 | ); 238 | let ack_body = add_body_ack(vec![], repo_pubkey, repo_secret, &mut store); 239 | let trans_body = add_body_trans(vec![], repo_pubkey, repo_secret, &mut store); 240 | 241 | // create & add commits to store 242 | 243 | println!(">> br"); 244 | let br = add_commit( 245 | branch_body, 246 | member_privkey, 247 | member_pubkey, 248 | 0, 249 | vec![], 250 | vec![], 251 | branch_body, 252 | repo_pubkey, 253 | repo_secret, 254 | &mut store, 255 | ); 256 | 257 | println!(">> t1"); 258 | let t1 = add_commit( 259 | branch_body, 260 | member_privkey, 261 | member_pubkey, 262 | 1, 263 | vec![br], 264 | vec![], 265 | trans_body, 266 | repo_pubkey, 267 | repo_secret, 268 | &mut store, 269 | ); 270 | 271 | println!(">> t2"); 272 | let t2 = add_commit( 273 | branch_body, 274 | member_privkey, 275 | member_pubkey, 276 | 2, 277 | vec![br], 278 | vec![], 279 | trans_body, 280 | repo_pubkey, 281 | repo_secret, 282 | &mut store, 283 | ); 284 | 285 | println!(">> a3"); 286 | let a3 = add_commit( 287 | branch_body, 288 | member_privkey, 289 | member_pubkey, 290 | 3, 291 | vec![t1], 292 | vec![], 293 | ack_body, 294 | repo_pubkey, 295 | repo_secret, 296 | &mut store, 297 | ); 298 | 299 | println!(">> t4"); 300 | let t4 = add_commit( 301 | branch_body, 302 | member_privkey, 303 | member_pubkey, 304 | 4, 305 | vec![t2], 306 | vec![t1], 307 | trans_body, 308 | repo_pubkey, 309 | repo_secret, 310 | &mut store, 311 | ); 312 | 313 | println!(">> t5"); 314 | let t5 = add_commit( 315 | branch_body, 316 | member_privkey, 317 | member_pubkey, 318 | 5, 319 | vec![t1, t2], 320 | vec![t4], 321 | trans_body, 322 | repo_pubkey, 323 | repo_secret, 324 | &mut store, 325 | ); 326 | 327 | println!(">> a6"); 328 | let a6 = add_commit( 329 | branch_body, 330 | member_privkey, 331 | member_pubkey, 332 | 6, 333 | vec![t4], 334 | vec![], 335 | ack_body, 336 | repo_pubkey, 337 | repo_secret, 338 | &mut store, 339 | ); 340 | 341 | println!(">> a7"); 342 | let a7 = add_commit( 343 | branch_body, 344 | member_privkey, 345 | member_pubkey, 346 | 7, 347 | vec![t4], 348 | vec![], 349 | ack_body, 350 | repo_pubkey, 351 | repo_secret, 352 | &mut store, 353 | ); 354 | 355 | let mut public_overlay_cnx = cnx 356 | .overlay_connect(&repolink, true) 357 | .await 358 | .expect("overlay_connect failed"); 359 | 360 | // Sending everything to the broker 361 | for (v) in store.get_all() { 362 | //debug_println!("SENDING {}", k); 363 | let _ = public_overlay_cnx 364 | .put_block(&v) 365 | .await 366 | .expect("put_block failed"); 367 | } 368 | 369 | // Now emptying the local store of the client, and adding only 1 commit into it (br) 370 | // we also have received an commit (t5) but we don't know what to do with it... 371 | let mut store = HashMapRepoStore::new(); 372 | 373 | let br = add_commit( 374 | branch_body, 375 | member_privkey, 376 | member_pubkey, 377 | 0, 378 | vec![], 379 | vec![], 380 | branch_body, 381 | repo_pubkey, 382 | repo_secret, 383 | &mut store, 384 | ); 385 | 386 | let t5 = add_commit( 387 | branch_body, 388 | member_privkey, 389 | member_pubkey, 390 | 5, 391 | vec![t1, t2], 392 | vec![t4], 393 | trans_body, 394 | repo_pubkey, 395 | repo_secret, 396 | &mut store, 397 | ); 398 | 399 | debug_println!("LOCAL STORE HAS {} BLOCKS", store.get_len()); 400 | 401 | // Let's pretend that we know that the head of the branch in the broker is at commits a6 and a7. 402 | // normally it would be the pub/sub that notifies us of those heads. 403 | // now we want to synchronize with the broker. 404 | 405 | let mut filter = Filter::new(FilterBuilder::new(10, 0.01)); 406 | for commit_ref in [br, t5] { 407 | match commit_ref.id { 408 | ObjectId::Blake3Digest32(d) => filter.add(&d), 409 | } 410 | } 411 | let cfg = filter.config(); 412 | 413 | let known_commits = BloomFilter { 414 | k: cfg.hashes, 415 | f: filter.get_u8_array().to_vec(), 416 | }; 417 | 418 | let known_heads = [br.id]; 419 | 420 | let remote_heads = [a6.id, a7.id]; 421 | 422 | let mut synced_blocks_stream = public_overlay_cnx 423 | .sync_branch(remote_heads.to_vec(), known_heads.to_vec(), known_commits) 424 | .await 425 | .expect("sync_branch failed"); 426 | 427 | let mut i = 0; 428 | while let Some(b) = synced_blocks_stream.next().await { 429 | debug_println!("GOT BLOCK {}", b.id()); 430 | store.put(&b); 431 | i += 1; 432 | } 433 | 434 | debug_println!("SYNCED {} BLOCKS", i); 435 | 436 | debug_println!("LOCAL STORE HAS {} BLOCKS", store.get_len()); 437 | 438 | // now the client can verify the DAG and each commit. Then update its list of heads. 439 | } 440 | 441 | async fn test(cnx: &mut impl BrokerConnection, pub_key: PubKey, priv_key: PrivKey) -> Result<(), ProtocolError>{ 442 | 443 | cnx.add_user(PubKey::Ed25519PubKey([1; 32]), priv_key).await?; 444 | 445 | cnx.add_user(pub_key, priv_key).await?; 446 | //.expect("add_user 2 (myself) failed"); 447 | 448 | assert_eq!( 449 | cnx.add_user(PubKey::Ed25519PubKey([1; 32]), priv_key).await.err().unwrap(), 450 | ProtocolError::UserAlreadyExists 451 | ); 452 | 453 | let repo = RepoLink::V0(RepoLinkV0 { 454 | id: PubKey::Ed25519PubKey([1; 32]), 455 | secret: SymKey::ChaCha20Key([0; 32]), 456 | peers: vec![], 457 | }); 458 | let mut public_overlay_cnx = cnx 459 | .overlay_connect(&repo, true) 460 | .await?; 461 | 462 | let my_block_id = public_overlay_cnx 463 | .put_block(&Block::new( 464 | vec![], 465 | ObjectDeps::ObjectIdList(vec![]), 466 | None, 467 | vec![27; 150], 468 | None, 469 | )) 470 | .await?; 471 | 472 | debug_println!("added block_id to store {}", my_block_id); 473 | 474 | let object_id = public_overlay_cnx 475 | .put_object( 476 | ObjectContent::File(File::V0(FileV0 { 477 | content_type: vec![], 478 | metadata: vec![], 479 | content: vec![48; 69000], 480 | })), 481 | vec![], 482 | None, 483 | block_size(), 484 | repo.id(), 485 | repo.secret(), 486 | ) 487 | .await?; 488 | 489 | debug_println!("added object_id to store {}", object_id); 490 | 491 | let mut my_block_stream = public_overlay_cnx 492 | .get_block(my_block_id, true, None) 493 | .await?; 494 | //.expect("get_block failed"); 495 | 496 | while let Some(b) = my_block_stream.next().await { 497 | debug_println!("GOT BLOCK {}", b.id()); 498 | } 499 | 500 | let mut my_object_stream = public_overlay_cnx 501 | .get_block(object_id, true, None) 502 | .await?; 503 | //.expect("get_block for object failed"); 504 | 505 | while let Some(b) = my_object_stream.next().await { 506 | debug_println!("GOT BLOCK {}", b.id()); 507 | } 508 | 509 | let object = public_overlay_cnx 510 | .get_object(object_id, None) 511 | .await?; 512 | //.expect("get_object failed"); 513 | 514 | debug_println!("GOT OBJECT with ID {}", object.id()); 515 | 516 | // let object_id = public_overlay_cnx 517 | // .copy_object(object_id, Some(now_timestamp() + 60)) 518 | // .await 519 | // .expect("copy_object failed"); 520 | 521 | // debug_println!("COPIED OBJECT to OBJECT ID {}", object_id); 522 | 523 | public_overlay_cnx 524 | .delete_object(object_id) 525 | .await?; 526 | //.expect("delete_object failed"); 527 | 528 | let res = public_overlay_cnx 529 | .get_object(object_id, None) 530 | .await 531 | .unwrap_err(); 532 | 533 | debug_println!("result from get object after delete: {}", res); 534 | assert_eq!(res, ProtocolError::NotFound); 535 | 536 | //TODO test pin/unpin 537 | 538 | // TEST BRANCH SYNC 539 | 540 | test_sync(cnx, pub_key, priv_key).await; 541 | 542 | Ok(()) 543 | } 544 | 545 | async fn test_local_connection() { 546 | debug_println!("===== TESTING LOCAL API ====="); 547 | 548 | let root = tempfile::Builder::new() 549 | .prefix("node-daemon") 550 | .tempdir() 551 | .unwrap(); 552 | let master_key: [u8; 32] = [0; 32]; 553 | std::fs::create_dir_all(root.path()).unwrap(); 554 | println!("{}", root.path().to_str().unwrap()); 555 | let store = LmdbBrokerStore::open(root.path(), master_key); 556 | 557 | let mut server = BrokerServer::new(store, ConfigMode::Local).expect("starting broker"); 558 | 559 | let (priv_key, pub_key) = generate_keypair(); 560 | 561 | let mut cnx = server.local_connection(pub_key); 562 | 563 | test(&mut cnx, pub_key, priv_key).await; 564 | } 565 | 566 | async fn test_remote_connection() { 567 | debug_println!("===== TESTING REMOTE API ====="); 568 | 569 | let res = connect_async("ws://127.0.0.1:3012").await; 570 | 571 | match (res) { 572 | Ok((ws, _)) => { 573 | debug_println!("WebSocket handshake completed"); 574 | 575 | let (write, read) = ws.split(); 576 | let mut frames_stream_read = read.map(|msg_res| match msg_res { 577 | Err(e) => { 578 | debug_println!("ERROR {:?}", e); 579 | vec![] 580 | } 581 | Ok(message) => { 582 | if message.is_close() { 583 | debug_println!("CLOSE FROM SERVER"); 584 | vec![] 585 | } else { 586 | message.into_data() 587 | } 588 | } 589 | }); 590 | async fn transform(message: Vec) -> Result { 591 | if message.len() == 0 { 592 | debug_println!("sending CLOSE message to SERVER"); 593 | Ok(Message::Close(None)) 594 | } else { 595 | Ok(Message::binary(message)) 596 | } 597 | } 598 | let frames_stream_write = write 599 | .with(|message| transform(message)) 600 | .sink_map_err(|e| ProtocolError::WriteError); 601 | 602 | let (priv_key, pub_key) = generate_keypair(); 603 | let master_key: [u8; 32] = [0; 32]; 604 | let mut cnx_res = ConnectionRemote::open_broker_connection( 605 | frames_stream_write, 606 | frames_stream_read, 607 | pub_key, 608 | priv_key, 609 | PubKey::Ed25519PubKey([1; 32]), 610 | ) 611 | .await; 612 | 613 | match cnx_res { 614 | Ok(mut cnx) => { 615 | if let Err(e) = test(&mut cnx, pub_key, priv_key).await { 616 | debug_println!("error: {:?}", e) 617 | } 618 | else { 619 | cnx.close().await; 620 | 621 | } } 622 | Err(e) => { 623 | debug_println!("cannot connect {:?}", e); 624 | } 625 | } 626 | } 627 | Err(e) => { 628 | debug_println!("Cannot connect: {:?}", e); 629 | } 630 | } 631 | } 632 | 633 | #[xactor::main] 634 | async fn main() -> std::io::Result<()> { 635 | debug_println!("Starting LoFiRe app demo..."); 636 | 637 | test_local_connection().await; 638 | 639 | test_remote_connection().await; 640 | 641 | Ok(()) 642 | } 643 | 644 | #[cfg(test)] 645 | mod test { 646 | 647 | use assert_cmd::prelude::*; 648 | use futures::task::SpawnExt; 649 | use lofire::store::*; 650 | use lofire::types::*; 651 | use lofire::utils::*; 652 | use std::process::Command; 653 | #[allow(unused_imports)] 654 | use std::time::Duration; 655 | #[allow(unused_imports)] 656 | use std::{fs, thread}; 657 | use tempfile::Builder; // Run programs 658 | 659 | use crate::{test_local_connection, test_remote_connection}; 660 | 661 | #[async_std::test] 662 | pub async fn test_local_cnx() { 663 | xactor::block_on(test_local_connection()); 664 | } 665 | 666 | use async_std::net::{TcpListener, TcpStream}; 667 | use async_std::sync::Mutex; 668 | use async_std::task; 669 | use async_tungstenite::accept_async; 670 | use async_tungstenite::tungstenite::protocol::Message; 671 | use debug_print::*; 672 | use futures::{SinkExt, StreamExt}; 673 | use lofire_broker::config::ConfigMode; 674 | use lofire_broker::server::*; 675 | use lofire_store_lmdb::brokerstore::LmdbBrokerStore; 676 | use std::sync::Arc; 677 | 678 | // async fn connection_loop(tcp: TcpStream, mut handler: ProtocolHandler) -> std::io::Result<()> { 679 | // let mut ws = accept_async(tcp).await.unwrap(); 680 | // let (mut tx, mut rx) = ws.split(); 681 | 682 | // let mut tx_mutex = Arc::new(Mutex::new(tx)); 683 | 684 | // // setup the async frames task 685 | // let receiver = handler.async_frames_receiver(); 686 | // let ws_in_task = Arc::clone(&tx_mutex); 687 | // task::spawn(async move { 688 | // while let Ok(frame) = receiver.recv().await { 689 | // if ws_in_task 690 | // .lock() 691 | // .await 692 | // .send(Message::binary(frame)) 693 | // .await 694 | // .is_err() 695 | // { 696 | // //deal with sending errors (close the connection) 697 | // break; 698 | // } 699 | // } 700 | // debug_println!("end of async frames loop"); 701 | 702 | // let mut lock = ws_in_task.lock().await; 703 | // let _ = lock.send(Message::Close(None)).await; 704 | // let _ = lock.close(); 705 | // }); 706 | 707 | // while let Some(msg) = rx.next().await { 708 | // let msg = match msg { 709 | // Err(e) => { 710 | // debug_println!("Error on server stream: {:?}", e); 711 | // // Errors returned directly through the AsyncRead/Write API are fatal, generally an error on the underlying 712 | // // transport. 713 | // // TODO close connection 714 | // break; 715 | // } 716 | // Ok(m) => m, 717 | // }; 718 | // //TODO implement PING and CLOSE messages 719 | // if msg.is_close() { 720 | // debug_println!("CLOSE from client"); 721 | // break; 722 | // } else if msg.is_binary() { 723 | // //debug_println!("server received binary: {:?}", msg); 724 | 725 | // let replies = handler.handle_incoming(msg.into_data()).await; 726 | 727 | // match replies.0 { 728 | // Err(e) => { 729 | // debug_println!("Protocol Error: {:?}", e); 730 | // // dealing with ProtocolErrors (close the connection) 731 | // break; 732 | // } 733 | // Ok(r) => { 734 | // if tx_mutex 735 | // .lock() 736 | // .await 737 | // .send(Message::binary(r)) 738 | // .await 739 | // .is_err() 740 | // { 741 | // //deaingl with sending errors (close the connection) 742 | // break; 743 | // } 744 | // } 745 | // } 746 | // match replies.1.await { 747 | // Some(errcode) => { 748 | // if errcode > 0 { 749 | // debug_println!("Close due to error code : {:?}", errcode); 750 | // //close connection 751 | // break; 752 | // } 753 | // } 754 | // None => {} 755 | // } 756 | // } 757 | // } 758 | // let mut lock = tx_mutex.lock().await; 759 | // let _ = lock.send(Message::Close(None)).await; 760 | // let _ = lock.close(); 761 | // debug_println!("end of sync read+write loop"); 762 | // Ok(()) 763 | // } 764 | 765 | async fn run_server_accept_one() -> std::io::Result<()> { 766 | // let root = tempfile::Builder::new() 767 | // .prefix("node-daemon") 768 | // .tempdir() 769 | // .unwrap(); 770 | // let master_key: [u8; 32] = [0; 32]; 771 | // std::fs::create_dir_all(root.path()).unwrap(); 772 | // println!("{}", root.path().to_str().unwrap()); 773 | // let store = LmdbBrokerStore::open(root.path(), master_key); 774 | 775 | // let server: BrokerServer = 776 | // BrokerServer::new(store, ConfigMode::Local).expect("starting broker"); 777 | 778 | // let socket = TcpListener::bind("127.0.0.1:3012").await?; 779 | // debug_println!("Listening on 127.0.0.1:3012"); 780 | // let mut connections = socket.incoming(); 781 | // let server_arc = Arc::new(server); 782 | // let tcp = connections.next().await.unwrap()?; 783 | // let proto_handler = Arc::clone(&server_arc).protocol_handler(); 784 | // let _handle = task::spawn(connection_loop(tcp, proto_handler)); 785 | 786 | Ok(()) 787 | } 788 | 789 | #[async_std::test] 790 | pub async fn test_remote_cnx() -> Result<(), Box> { 791 | //let mut cmd = Command::cargo_bin("lofire-node")?; 792 | //cmd.spawn(); 793 | 794 | let thr = task::spawn(run_server_accept_one()); 795 | 796 | std::thread::sleep(std::time::Duration::from_secs(2)); 797 | 798 | xactor::block_on(test_remote_connection()); 799 | 800 | xactor::block_on(thr); 801 | 802 | Ok(()) 803 | } 804 | } 805 | -------------------------------------------------------------------------------- /lofire-net/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "lofire-net" 3 | version = "0.1.0" 4 | edition = "2021" 5 | 6 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 7 | 8 | [dependencies] 9 | lofire = { path = "../lofire" } 10 | serde = { version = "1.0", features = ["derive"] } 11 | serde_bare = "0.5.0" 12 | serde_bytes = "0.11.7" 13 | num_enum = "0.5.7" -------------------------------------------------------------------------------- /lofire-net/src/errors.rs: -------------------------------------------------------------------------------- 1 | use crate::types::BrokerMessage; 2 | use core::fmt; 3 | use lofire::object::ObjectParseError; 4 | use lofire::types::Block; 5 | use lofire::types::ObjectId; 6 | use num_enum::IntoPrimitive; 7 | use num_enum::TryFromPrimitive; 8 | use std::convert::From; 9 | use std::convert::TryFrom; 10 | use std::error::Error; 11 | 12 | #[derive(Debug, Eq, PartialEq, TryFromPrimitive, IntoPrimitive, Clone)] 13 | #[repr(u16)] 14 | pub enum ProtocolError { 15 | WriteError = 1, 16 | ActorError, 17 | InvalidState, 18 | SignatureError, 19 | InvalidSignature, 20 | SerializationError, 21 | PartialContent, 22 | AccessDenied, 23 | OverlayNotJoined, 24 | OverlayNotFound, 25 | BrokerError, 26 | NotFound, 27 | EndOfStream, 28 | StoreError, 29 | MissingBlocks, 30 | ObjectParseError, 31 | InvalidValue, 32 | UserAlreadyExists, 33 | RepoIdRequired, 34 | Closing, 35 | } 36 | 37 | impl ProtocolError { 38 | pub fn is_stream(&self) -> bool { 39 | *self == ProtocolError::PartialContent || *self == ProtocolError::EndOfStream 40 | } 41 | } 42 | 43 | impl Error for ProtocolError {} 44 | 45 | impl fmt::Display for ProtocolError { 46 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 47 | write!(f, "{:?}", self) 48 | } 49 | } 50 | 51 | impl From for ProtocolError { 52 | fn from(e: lofire::errors::LofireError) -> Self { 53 | match e { 54 | lofire::errors::LofireError::InvalidSignature => ProtocolError::InvalidSignature, 55 | lofire::errors::LofireError::SerializationError => ProtocolError::SerializationError, 56 | } 57 | } 58 | } 59 | 60 | impl From for ProtocolError { 61 | fn from(e: ObjectParseError) -> Self { 62 | ProtocolError::ObjectParseError 63 | } 64 | } 65 | 66 | impl From for ProtocolError { 67 | fn from(e: lofire::store::StorageError) -> Self { 68 | match e { 69 | lofire::store::StorageError::NotFound => ProtocolError::NotFound, 70 | lofire::store::StorageError::InvalidValue => ProtocolError::InvalidValue, 71 | _ => ProtocolError::StoreError, 72 | } 73 | } 74 | } 75 | 76 | impl From for ProtocolError { 77 | fn from(e: serde_bare::error::Error) -> Self { 78 | ProtocolError::SerializationError 79 | } 80 | } 81 | 82 | impl From for Result<(), ProtocolError> { 83 | fn from(msg: BrokerMessage) -> Self { 84 | if !msg.is_response() { 85 | panic!("BrokerMessage is not a response"); 86 | } 87 | match msg.result() { 88 | 0 => Ok(()), 89 | err => Err(ProtocolError::try_from(err).unwrap()), 90 | } 91 | } 92 | } 93 | 94 | impl From for Result { 95 | fn from(msg: BrokerMessage) -> Self { 96 | if !msg.is_response() { 97 | panic!("BrokerMessage is not a response"); 98 | } 99 | match msg.result() { 100 | 0 => Ok(msg.response_object_id()), 101 | err => Err(ProtocolError::try_from(err).unwrap()), 102 | } 103 | } 104 | } 105 | 106 | /// Option represents if a Block is available. cannot be returned here. call BrokerMessage.response_block() to get a reference to it. 107 | impl From for Result, ProtocolError> { 108 | fn from(msg: BrokerMessage) -> Self { 109 | if !msg.is_response() { 110 | panic!("BrokerMessage is not a response"); 111 | } 112 | //let partial: u16 = ProtocolError::PartialContent.into(); 113 | let res = msg.result(); 114 | if res == 0 || ProtocolError::try_from(res).unwrap().is_stream() { 115 | if msg.is_overlay() { 116 | match msg.response_block() { 117 | Some(_) => Ok(Some(res)), 118 | None => Ok(None), 119 | } 120 | } else { 121 | Ok(None) 122 | } 123 | } else { 124 | Err(ProtocolError::try_from(res).unwrap()) 125 | } 126 | } 127 | } 128 | 129 | /// Option represents if a Block is available. returns a clone. 130 | impl From for Result, ProtocolError> { 131 | fn from(msg: BrokerMessage) -> Self { 132 | if !msg.is_response() { 133 | panic!("BrokerMessage is not a response"); 134 | } 135 | //let partial: u16 = ProtocolError::PartialContent.into(); 136 | let res = msg.result(); 137 | if res == 0 || ProtocolError::try_from(res).unwrap().is_stream() { 138 | if msg.is_overlay() { 139 | match msg.response_block() { 140 | Some(b) => Ok(Some(b.clone())), 141 | None => Ok(None), 142 | } 143 | } else { 144 | Ok(None) 145 | } 146 | } else { 147 | Err(ProtocolError::try_from(res).unwrap()) 148 | } 149 | } 150 | } 151 | -------------------------------------------------------------------------------- /lofire-net/src/lib.rs: -------------------------------------------------------------------------------- 1 | pub mod types; 2 | 3 | pub mod errors; 4 | -------------------------------------------------------------------------------- /lofire-node/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "lofire-node" 3 | version = "0.1.0" 4 | edition = "2021" 5 | 6 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 7 | 8 | [dependencies] 9 | debug_print = "1.0.0" 10 | lofire = { path = "../lofire" } 11 | lofire-net = { path = "../lofire-net" } 12 | lofire-p2p = { path = "../lofire-p2p" } 13 | lofire-broker = { path = "../lofire-broker" } 14 | lofire-store-lmdb = { path = "../lofire-store-lmdb" } 15 | async-std = { version = "1.7.0", features = ["attributes"] } 16 | async-tungstenite = { version = "0.17.2", features = ["async-std-runtime","async-native-tls"] } 17 | futures = "0.3.24" 18 | tempfile = "3" 19 | -------------------------------------------------------------------------------- /lofire-node/src/main.rs: -------------------------------------------------------------------------------- 1 | use async_std::net::{TcpListener, TcpStream}; 2 | use async_std::sync::Mutex; 3 | use async_std::task; 4 | use async_tungstenite::accept_async; 5 | use async_tungstenite::tungstenite::protocol::Message; 6 | use debug_print::*; 7 | use futures::{SinkExt, StreamExt}; 8 | use lofire_broker::config::ConfigMode; 9 | use lofire_broker::server::*; 10 | use lofire_store_lmdb::brokerstore::LmdbBrokerStore; 11 | use lofire_store_lmdb::repostore::LmdbRepoStore; 12 | use std::fs; 13 | use std::sync::Arc; 14 | use tempfile::Builder; 15 | use std::{thread, time}; 16 | 17 | 18 | async fn connection_loop(tcp: TcpStream, mut handler: ProtocolHandler) -> std::io::Result<()> { 19 | let mut ws = accept_async(tcp).await.unwrap(); 20 | let (mut tx, mut rx) = ws.split(); 21 | 22 | let mut tx_mutex = Arc::new(Mutex::new(tx)); 23 | 24 | // setup the async frames task 25 | let receiver = handler.async_frames_receiver(); 26 | let ws_in_task = Arc::clone(&tx_mutex); 27 | task::spawn(async move { 28 | while let Ok(frame) = receiver.recv().await { 29 | let mut sink = ws_in_task 30 | .lock() 31 | .await; 32 | if sink.send(Message::binary(frame)) 33 | .await 34 | .is_err() 35 | { 36 | break; 37 | } 38 | } 39 | debug_println!("end of async frames loop"); 40 | 41 | let mut sink = ws_in_task.lock().await; 42 | let _ = sink.send(Message::Close(None)).await; 43 | let _ = sink.close().await; 44 | }); 45 | 46 | while let Some(msg) = rx.next().await { 47 | //debug_println!("RCV: {:?}", msg); 48 | let msg = match msg { 49 | Err(e) => { 50 | debug_println!("Error on server stream: {:?}", e); 51 | // Errors returned directly through the AsyncRead/Write API are fatal, generally an error on the underlying 52 | // transport. closing connection 53 | break; 54 | } 55 | Ok(m) => m, 56 | }; 57 | //TODO implement PING messages 58 | if msg.is_close() { 59 | debug_println!("CLOSE from CLIENT"); 60 | break; 61 | } else if msg.is_binary() { 62 | //debug_println!("server received binary: {:?}", msg); 63 | 64 | let replies = handler.handle_incoming(msg.into_data()).await; 65 | 66 | match replies.0 { 67 | Err(e) => { 68 | debug_println!("Protocol Error: {:?}", e); 69 | // dealing with ProtocolErrors (closing the connection) 70 | break; 71 | } 72 | Ok(r) => { 73 | if tx_mutex 74 | .lock() 75 | .await 76 | .send(Message::binary(r)) 77 | .await 78 | .is_err() 79 | { 80 | //dealing with sending errors (closing the connection) 81 | break; 82 | } 83 | } 84 | } 85 | match replies.1.await { 86 | Some(errcode) => { 87 | if errcode > 0 { 88 | debug_println!("Close due to error code : {:?}", errcode); 89 | //closing connection 90 | break; 91 | } 92 | } 93 | None => {} 94 | } 95 | } 96 | } 97 | let mut sink = tx_mutex.lock().await; 98 | let _ = sink.send(Message::Close(None)).await; 99 | let _ = sink.close().await; 100 | debug_println!("end of sync read+write loop"); 101 | Ok(()) 102 | } 103 | 104 | async fn run_server() -> std::io::Result<()> { 105 | let root = tempfile::Builder::new() 106 | .prefix("node-daemon") 107 | .tempdir() 108 | .unwrap(); 109 | let master_key: [u8; 32] = [0; 32]; 110 | std::fs::create_dir_all(root.path()).unwrap(); 111 | println!("{}", root.path().to_str().unwrap()); 112 | let store = LmdbBrokerStore::open(root.path(), master_key); 113 | 114 | let server: BrokerServer = 115 | BrokerServer::new(store, ConfigMode::Local).expect("starting broker"); 116 | 117 | let socket = TcpListener::bind("127.0.0.1:3012").await?; 118 | let mut connections = socket.incoming(); 119 | let server_arc = Arc::new(server); 120 | while let Some(tcp) = connections.next().await { 121 | let proto_handler = Arc::clone(&server_arc).protocol_handler(); 122 | let _handle = task::spawn(connection_loop(tcp.unwrap(), proto_handler)); 123 | } 124 | Ok(()) 125 | } 126 | 127 | #[async_std::main] 128 | async fn main() -> std::io::Result<()> { 129 | println!("Starting LoFiRe node daemon..."); 130 | 131 | run_server().await 132 | } 133 | -------------------------------------------------------------------------------- /lofire-p2p/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "lofire-p2p" 3 | version = "0.1.0" 4 | edition = "2021" 5 | 6 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 7 | 8 | [dependencies] 9 | lofire = { path = "../lofire" } 10 | lofire-net = { path = "../lofire-net" } 11 | blake3 = "1.3.1" 12 | chacha20 = "0.9.0" 13 | serde = { version = "1.0", features = ["derive"] } 14 | serde_bare = "0.5.0" 15 | serde_bytes = "0.11.7" 16 | -------------------------------------------------------------------------------- /lofire-p2p/src/lib.rs: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /lofire-store-lmdb/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "lofire-store-lmdb" 3 | version = "0.1.0" 4 | edition = "2021" 5 | 6 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 7 | 8 | [dependencies] 9 | lofire = { path = "../lofire" } 10 | debug_print = "1.0.0" 11 | serde = { version = "1.0.142", features = ["derive"] } 12 | serde_bare = "0.5.0" 13 | tempfile = "3" 14 | hex = "0.4.3" 15 | 16 | [dependencies.rkv] 17 | git = "https://github.com/p2pcollab/rkv.git" 18 | rev = "19fc8e0b686a21c591e3a137e623fea40445fdb8" 19 | features = [ "lmdb" ] 20 | -------------------------------------------------------------------------------- /lofire-store-lmdb/src/brokerstore.rs: -------------------------------------------------------------------------------- 1 | use lofire::brokerstore::*; 2 | use lofire::store::*; 3 | use lofire::types::*; 4 | use lofire::utils::*; 5 | 6 | use debug_print::*; 7 | use std::path::Path; 8 | use std::path::PathBuf; 9 | use std::sync::{Arc, RwLock}; 10 | 11 | use rkv::backend::{ 12 | BackendDatabaseFlags, BackendFlags, BackendIter, BackendWriteFlags, DatabaseFlags, Lmdb, 13 | LmdbDatabase, LmdbDatabaseFlags, LmdbEnvironment, LmdbRwTransaction, LmdbWriteFlags, 14 | }; 15 | use rkv::{ 16 | Manager, MultiStore, Rkv, SingleStore, StoreError, StoreOptions, Value, WriteFlags, Writer, 17 | }; 18 | 19 | use serde::{Deserialize, Serialize}; 20 | use serde_bare::error::Error; 21 | 22 | pub struct LmdbBrokerStore { 23 | /// the main store where all the properties of keys are stored 24 | main_store: MultiStore, 25 | /// the opened environment so we can create new transactions 26 | environment: Arc>>, 27 | /// path for the storage backend data 28 | path: String, 29 | } 30 | 31 | impl BrokerStore for LmdbBrokerStore { 32 | /// Load a single value property from the store. 33 | fn get(&self, prefix: u8, key: &Vec, suffix: Option) -> Result, StorageError> { 34 | let property = Self::compute_property(prefix, key, suffix); 35 | let lock = self.environment.read().unwrap(); 36 | let reader = lock.read().unwrap(); 37 | let mut iter = self 38 | .main_store 39 | .get(&reader, property) 40 | .map_err(|e| StorageError::BackendError)?; 41 | match iter.next() { 42 | Some(Ok(val)) => Ok(val.1.to_bytes().unwrap()), 43 | Some(Err(_e)) => Err(StorageError::BackendError), 44 | None => Err(StorageError::NotFound), 45 | } 46 | } 47 | 48 | /// Load all the values of a property from the store. 49 | fn get_all( 50 | &self, 51 | prefix: u8, 52 | key: &Vec, 53 | suffix: Option, 54 | ) -> Result>, StorageError> { 55 | let property = Self::compute_property(prefix, key, suffix); 56 | let lock = self.environment.read().unwrap(); 57 | let reader = lock.read().unwrap(); 58 | let mut iter = self 59 | .main_store 60 | .get(&reader, property) 61 | .map_err(|e| StorageError::BackendError)?; 62 | let mut vector: Vec> = vec![]; 63 | while let res = iter.next() { 64 | vector.push(match res { 65 | Some(Ok(val)) => val.1.to_bytes().unwrap(), 66 | Some(Err(_e)) => return Err(StorageError::BackendError), 67 | None => { 68 | break; 69 | } 70 | }); 71 | } 72 | Ok(vector) 73 | } 74 | 75 | /// Check if a specific value exists for a property from the store. 76 | fn has_property_value( 77 | &self, 78 | prefix: u8, 79 | key: &Vec, 80 | suffix: Option, 81 | value: Vec, 82 | ) -> Result<(), StorageError> { 83 | let property = Self::compute_property(prefix, key, suffix); 84 | let lock = self.environment.read().unwrap(); 85 | let reader = lock.read().unwrap(); 86 | let exists = self 87 | .main_store 88 | .get_key_value(&reader, property, &Value::Blob(value.as_slice())) 89 | .map_err(|e| StorageError::BackendError)?; 90 | if exists { 91 | Ok(()) 92 | } else { 93 | Err(StorageError::NotFound) 94 | } 95 | } 96 | 97 | /// Save a property value to the store. 98 | fn put( 99 | &self, 100 | prefix: u8, 101 | key: &Vec, 102 | suffix: Option, 103 | value: Vec, 104 | ) -> Result<(), StorageError> { 105 | let property = Self::compute_property(prefix, key, suffix); 106 | let lock = self.environment.read().unwrap(); 107 | let mut writer = lock.write().unwrap(); 108 | self.main_store 109 | .put(&mut writer, property, &Value::Blob(value.as_slice())) 110 | .map_err(|e| StorageError::BackendError)?; 111 | 112 | writer.commit().unwrap(); 113 | 114 | Ok(()) 115 | } 116 | 117 | /// Replace the property of a key (single value) to the store. 118 | fn replace( 119 | &self, 120 | prefix: u8, 121 | key: &Vec, 122 | suffix: Option, 123 | value: Vec, 124 | ) -> Result<(), StorageError> { 125 | let property = Self::compute_property(prefix, key, suffix); 126 | let lock = self.environment.read().unwrap(); 127 | let mut writer = lock.write().unwrap(); 128 | self.main_store 129 | .delete_all(&mut writer, property.clone()) 130 | .map_err(|e| StorageError::BackendError)?; 131 | 132 | self.main_store 133 | .put(&mut writer, property, &Value::Blob(value.as_slice())) 134 | .map_err(|e| StorageError::BackendError)?; 135 | 136 | writer.commit().unwrap(); 137 | 138 | Ok(()) 139 | } 140 | 141 | /// Delete a property from the store. 142 | fn del(&self, prefix: u8, key: &Vec, suffix: Option) -> Result<(), StorageError> { 143 | let property = Self::compute_property(prefix, key, suffix); 144 | let lock = self.environment.read().unwrap(); 145 | let mut writer = lock.write().unwrap(); 146 | self.main_store 147 | .delete_all(&mut writer, property) 148 | .map_err(|e| StorageError::BackendError)?; 149 | 150 | writer.commit().unwrap(); 151 | 152 | Ok(()) 153 | } 154 | 155 | /// Delete a specific value for a property from the store. 156 | fn del_property_value( 157 | &self, 158 | prefix: u8, 159 | key: &Vec, 160 | suffix: Option, 161 | value: Vec, 162 | ) -> Result<(), StorageError> { 163 | let property = Self::compute_property(prefix, key, suffix); 164 | let lock = self.environment.read().unwrap(); 165 | let mut writer = lock.write().unwrap(); 166 | self.main_store 167 | .delete(&mut writer, property, &Value::Blob(value.as_slice())) 168 | .map_err(|e| StorageError::BackendError)?; 169 | 170 | writer.commit().unwrap(); 171 | 172 | Ok(()) 173 | } 174 | 175 | /// Delete all properties of a key from the store. 176 | fn del_all(&self, prefix: u8, key: &Vec, all_suffixes: &[u8]) -> Result<(), StorageError> { 177 | for suffix in all_suffixes { 178 | self.del(prefix, key, Some(*suffix))?; 179 | } 180 | if all_suffixes.is_empty() { 181 | self.del(prefix, key, None)?; 182 | } 183 | Ok(()) 184 | } 185 | } 186 | 187 | impl LmdbBrokerStore { 188 | pub fn path(&self) -> PathBuf { 189 | PathBuf::from(&self.path) 190 | } 191 | 192 | fn compute_property(prefix: u8, key: &Vec, suffix: Option) -> Vec { 193 | let mut new: Vec = Vec::with_capacity(key.len() + 2); 194 | new.push(prefix); 195 | new.extend(key); 196 | if suffix.is_some() { 197 | new.push(suffix.unwrap()) 198 | } 199 | new 200 | } 201 | 202 | /// Opens the store and returns a BrokerStore object that should be kept and used to manipulate Accounts, Overlays, Topics and options 203 | /// The key is the encryption key for the data at rest. 204 | pub fn open<'a>(path: &Path, key: [u8; 32]) -> LmdbBrokerStore { 205 | let mut manager = Manager::::singleton().write().unwrap(); 206 | let shared_rkv = manager 207 | .get_or_create(path, |path| { 208 | //Rkv::new::(path) // use this instead to disable encryption 209 | Rkv::with_encryption_key_and_mapsize::(path, key, 2 * 1024 * 1024 * 1024) 210 | }) 211 | .unwrap(); 212 | let env = shared_rkv.read().unwrap(); 213 | 214 | println!("created env with LMDB Version: {}", env.version()); 215 | 216 | let main_store = env.open_multi("main", StoreOptions::create()).unwrap(); 217 | 218 | LmdbBrokerStore { 219 | environment: shared_rkv.clone(), 220 | main_store, 221 | path: path.to_str().unwrap().to_string(), 222 | } 223 | } 224 | } 225 | -------------------------------------------------------------------------------- /lofire-store-lmdb/src/lib.rs: -------------------------------------------------------------------------------- 1 | pub mod repostore; 2 | 3 | pub mod brokerstore; 4 | -------------------------------------------------------------------------------- /lofire/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "lofire" 3 | version = "0.1.0" 4 | edition = "2021" 5 | 6 | # See more keys and their definitions at 7 | # https://doc.rust-lang.org/cargo/reference/manifest.html 8 | 9 | [dependencies] 10 | blake3 = "1.3.1" 11 | chacha20 = "0.9.0" 12 | ed25519-dalek = "1.0.1" 13 | rand = "0.7" 14 | serde = { version = "1.0.142", features = ["derive"] } 15 | serde_bare = "0.5.0" 16 | serde_bytes = "0.11.7" 17 | fastbloom-rs = "0.3.1" 18 | debug_print = "1.0.0" 19 | hex = "0.4.3" 20 | -------------------------------------------------------------------------------- /lofire/src/block.rs: -------------------------------------------------------------------------------- 1 | //! Immutable Block 2 | 3 | use crate::types::*; 4 | 5 | impl BlockV0 { 6 | pub fn new( 7 | children: Vec, 8 | deps: ObjectDeps, 9 | expiry: Option, 10 | content: Vec, 11 | key: Option, 12 | ) -> BlockV0 { 13 | let mut b = BlockV0 { 14 | id: None, 15 | key, 16 | children, 17 | deps, 18 | expiry, 19 | content, 20 | }; 21 | let block = Block::V0(b.clone()); 22 | b.id = Some(block.get_id()); 23 | b 24 | } 25 | } 26 | 27 | impl Block { 28 | pub fn new( 29 | children: Vec, 30 | deps: ObjectDeps, 31 | expiry: Option, 32 | content: Vec, 33 | key: Option, 34 | ) -> Block { 35 | Block::V0(BlockV0::new(children, deps, expiry, content, key)) 36 | } 37 | 38 | /// Compute the ID 39 | pub fn get_id(&self) -> BlockId { 40 | let ser = serde_bare::to_vec(self).unwrap(); 41 | let hash = blake3::hash(ser.as_slice()); 42 | Digest::Blake3Digest32(hash.as_bytes().clone()) 43 | } 44 | 45 | /// Get the already computed ID 46 | pub fn id(&self) -> BlockId { 47 | match self { 48 | Block::V0(b) => match b.id { 49 | Some(id) => id, 50 | None => self.get_id(), 51 | }, 52 | } 53 | } 54 | 55 | /// Get the content 56 | pub fn content(&self) -> &Vec { 57 | match self { 58 | Block::V0(b) => &b.content, 59 | } 60 | } 61 | 62 | /// Get the children 63 | pub fn children(&self) -> &Vec { 64 | match self { 65 | Block::V0(b) => &b.children, 66 | } 67 | } 68 | 69 | /// Get the dependencies 70 | pub fn deps(&self) -> &ObjectDeps { 71 | match self { 72 | Block::V0(b) => &b.deps, 73 | } 74 | } 75 | 76 | /// Get the expiry 77 | pub fn expiry(&self) -> Option { 78 | match self { 79 | Block::V0(b) => b.expiry, 80 | } 81 | } 82 | 83 | pub fn set_expiry(&mut self, expiry: Option) { 84 | match self { 85 | Block::V0(b) => { 86 | b.id = None; 87 | b.expiry = expiry 88 | } 89 | } 90 | } 91 | 92 | /// Get the key 93 | pub fn key(&self) -> Option { 94 | match self { 95 | Block::V0(b) => b.key, 96 | } 97 | } 98 | 99 | /// Set the key 100 | pub fn set_key(&mut self, key: Option) { 101 | match self { 102 | Block::V0(b) => b.key = key, 103 | } 104 | } 105 | } 106 | -------------------------------------------------------------------------------- /lofire/src/branch.rs: -------------------------------------------------------------------------------- 1 | //! Branch of a Repository 2 | 3 | use debug_print::*; 4 | use std::collections::{HashMap, HashSet}; 5 | 6 | use fastbloom_rs::{BloomFilter as Filter, Membership}; 7 | 8 | use crate::object::*; 9 | use crate::store::*; 10 | use crate::types::*; 11 | 12 | impl MemberV0 { 13 | /// New member 14 | pub fn new(id: PubKey, commit_types: Vec, metadata: Vec) -> MemberV0 { 15 | MemberV0 { 16 | id, 17 | commit_types, 18 | metadata, 19 | } 20 | } 21 | 22 | /// Check whether this member has permission for the given commit type 23 | pub fn has_perm(&self, commit_type: CommitType) -> bool { 24 | self.commit_types.contains(&commit_type) 25 | } 26 | } 27 | 28 | impl Member { 29 | /// New member 30 | pub fn new(id: PubKey, commit_types: Vec, metadata: Vec) -> Member { 31 | Member::V0(MemberV0::new(id, commit_types, metadata)) 32 | } 33 | 34 | /// Check whether this member has permission for the given commit type 35 | pub fn has_perm(&self, commit_type: CommitType) -> bool { 36 | match self { 37 | Member::V0(m) => m.has_perm(commit_type), 38 | } 39 | } 40 | } 41 | 42 | impl BranchV0 { 43 | pub fn new( 44 | id: PubKey, 45 | topic: PubKey, 46 | secret: SymKey, 47 | members: Vec, 48 | quorum: HashMap, 49 | ack_delay: RelTime, 50 | tags: Vec, 51 | metadata: Vec, 52 | ) -> BranchV0 { 53 | BranchV0 { 54 | id, 55 | topic, 56 | secret, 57 | members, 58 | quorum, 59 | ack_delay, 60 | tags, 61 | metadata, 62 | } 63 | } 64 | } 65 | 66 | impl Branch { 67 | pub fn new( 68 | id: PubKey, 69 | topic: PubKey, 70 | secret: SymKey, 71 | members: Vec, 72 | quorum: HashMap, 73 | ack_delay: RelTime, 74 | tags: Vec, 75 | metadata: Vec, 76 | ) -> Branch { 77 | Branch::V0(BranchV0::new( 78 | id, topic, secret, members, quorum, ack_delay, tags, metadata, 79 | )) 80 | } 81 | 82 | /// Get member by ID 83 | pub fn get_member(&self, id: &PubKey) -> Option<&MemberV0> { 84 | match self { 85 | Branch::V0(b) => { 86 | for m in b.members.iter() { 87 | if m.id == *id { 88 | return Some(m); 89 | } 90 | } 91 | } 92 | } 93 | None 94 | } 95 | 96 | /// Branch sync request from another peer 97 | /// 98 | /// Return ObjectIds to send 99 | pub fn sync_req( 100 | our_heads: &[ObjectId], 101 | their_heads: &[ObjectId], 102 | their_filter: &BloomFilter, 103 | store: &impl RepoStore, 104 | ) -> Result, ObjectParseError> { 105 | //debug_println!(">> sync_req"); 106 | //debug_println!(" our_heads: {:?}", our_heads); 107 | //debug_println!(" their_heads: {:?}", their_heads); 108 | 109 | /// Load `Commit` `Object`s of a `Branch` from the `RepoStore` starting from the given `Object`, 110 | /// and collect `ObjectId`s starting from `our_heads` towards `their_heads` 111 | fn load_branch( 112 | cobj: &Object, 113 | store: &impl RepoStore, 114 | their_heads: &[ObjectId], 115 | visited: &mut HashSet, 116 | missing: &mut HashSet, 117 | ) -> Result { 118 | //debug_println!(">>> load_branch: {}", cobj.id()); 119 | let id = cobj.id(); 120 | 121 | // root has no deps 122 | let is_root = cobj.deps().len() == 0; 123 | //debug_println!(" deps: {:?}", cobj.deps()); 124 | 125 | // check if this commit object is present in their_heads 126 | let mut their_head_found = their_heads.contains(&id); 127 | 128 | // load deps, stop at the root or if this is a commit object from their_heads 129 | if !is_root && !their_head_found { 130 | visited.insert(id); 131 | for id in cobj.deps() { 132 | match Object::load(*id, None, store) { 133 | Ok(o) => { 134 | if !visited.contains(id) { 135 | if load_branch(&o, store, their_heads, visited, missing)? { 136 | their_head_found = true; 137 | } 138 | } 139 | } 140 | Err(ObjectParseError::MissingBlocks(m)) => { 141 | missing.extend(m); 142 | } 143 | Err(e) => return Err(e), 144 | } 145 | } 146 | } 147 | Ok(their_head_found) 148 | } 149 | 150 | // missing commits from our branch 151 | let mut missing = HashSet::new(); 152 | // our commits 153 | let mut ours = HashSet::new(); 154 | // their commits 155 | let mut theirs = HashSet::new(); 156 | 157 | // collect all commits reachable from our_heads 158 | // up to the root or until encountering a commit from their_heads 159 | for id in our_heads { 160 | let cobj = Object::load(*id, None, store)?; 161 | let mut visited = HashSet::new(); 162 | let their_head_found = 163 | load_branch(&cobj, store, their_heads, &mut visited, &mut missing)?; 164 | //debug_println!("<<< load_branch: {}", their_head_found); 165 | ours.extend(visited); // add if one of their_heads found 166 | } 167 | 168 | // collect all commits reachable from their_heads 169 | for id in their_heads { 170 | let cobj = Object::load(*id, None, store)?; 171 | let mut visited = HashSet::new(); 172 | let their_head_found = load_branch(&cobj, store, &[], &mut visited, &mut missing)?; 173 | //debug_println!("<<< load_branch: {}", their_head_found); 174 | theirs.extend(visited); // add if one of their_heads found 175 | } 176 | 177 | let mut result = &ours - &theirs; 178 | 179 | //debug_println!("!! ours: {:?}", ours); 180 | //debug_println!("!! theirs: {:?}", theirs); 181 | //debug_println!("!! result: {:?}", result); 182 | 183 | // remove their_commits from result 184 | let filter = Filter::from_u8_array(their_filter.f.as_slice(), their_filter.k.into()); 185 | for id in result.clone() { 186 | match id { 187 | Digest::Blake3Digest32(d) => { 188 | if filter.contains(&d) { 189 | result.remove(&id); 190 | } 191 | } 192 | } 193 | } 194 | //debug_println!("!! result filtered: {:?}", result); 195 | Ok(Vec::from_iter(result)) 196 | } 197 | } 198 | 199 | mod test { 200 | use std::collections::HashMap; 201 | 202 | use ed25519_dalek::*; 203 | use fastbloom_rs::{BloomFilter as Filter, FilterBuilder, Membership}; 204 | use rand::rngs::OsRng; 205 | 206 | use crate::branch::*; 207 | use crate::commit::*; 208 | use crate::object::*; 209 | use crate::repo; 210 | use crate::store::*; 211 | 212 | #[test] 213 | pub fn test_branch() { 214 | fn add_obj( 215 | content: ObjectContent, 216 | deps: Vec, 217 | expiry: Option, 218 | repo_pubkey: PubKey, 219 | repo_secret: SymKey, 220 | store: &mut impl RepoStore, 221 | ) -> ObjectRef { 222 | let max_object_size = 4000; 223 | let obj = Object::new( 224 | content, 225 | deps, 226 | expiry, 227 | max_object_size, 228 | repo_pubkey, 229 | repo_secret, 230 | ); 231 | println!(">>> add_obj"); 232 | println!(" id: {:?}", obj.id()); 233 | println!(" deps: {:?}", obj.deps()); 234 | obj.save(store).unwrap(); 235 | obj.reference().unwrap() 236 | } 237 | 238 | fn add_commit( 239 | branch: ObjectRef, 240 | author_privkey: PrivKey, 241 | author_pubkey: PubKey, 242 | seq: u32, 243 | deps: Vec, 244 | acks: Vec, 245 | body_ref: ObjectRef, 246 | repo_pubkey: PubKey, 247 | repo_secret: SymKey, 248 | store: &mut impl RepoStore, 249 | ) -> ObjectRef { 250 | let mut obj_deps: Vec = vec![]; 251 | obj_deps.extend(deps.iter().map(|r| r.id)); 252 | obj_deps.extend(acks.iter().map(|r| r.id)); 253 | 254 | let obj_ref = ObjectRef { 255 | id: ObjectId::Blake3Digest32([1; 32]), 256 | key: SymKey::ChaCha20Key([2; 32]), 257 | }; 258 | let refs = vec![obj_ref]; 259 | let metadata = vec![5u8; 55]; 260 | let expiry = None; 261 | 262 | let commit = Commit::new( 263 | author_privkey, 264 | author_pubkey, 265 | seq, 266 | branch, 267 | deps, 268 | acks, 269 | refs, 270 | metadata, 271 | body_ref, 272 | expiry, 273 | ) 274 | .unwrap(); 275 | //println!("commit: {:?}", commit); 276 | add_obj( 277 | ObjectContent::Commit(commit), 278 | obj_deps, 279 | expiry, 280 | repo_pubkey, 281 | repo_secret, 282 | store, 283 | ) 284 | } 285 | 286 | fn add_body_branch( 287 | branch: Branch, 288 | repo_pubkey: PubKey, 289 | repo_secret: SymKey, 290 | store: &mut impl RepoStore, 291 | ) -> ObjectRef { 292 | let deps = vec![]; 293 | let expiry = None; 294 | let body = CommitBody::Branch(branch); 295 | //println!("body: {:?}", body); 296 | add_obj( 297 | ObjectContent::CommitBody(body), 298 | deps, 299 | expiry, 300 | repo_pubkey, 301 | repo_secret, 302 | store, 303 | ) 304 | } 305 | 306 | fn add_body_trans( 307 | deps: Vec, 308 | repo_pubkey: PubKey, 309 | repo_secret: SymKey, 310 | store: &mut impl RepoStore, 311 | ) -> ObjectRef { 312 | let expiry = None; 313 | let content = [7u8; 777].to_vec(); 314 | let body = CommitBody::Transaction(Transaction::V0(content)); 315 | //println!("body: {:?}", body); 316 | add_obj( 317 | ObjectContent::CommitBody(body), 318 | deps, 319 | expiry, 320 | repo_pubkey, 321 | repo_secret, 322 | store, 323 | ) 324 | } 325 | 326 | fn add_body_ack( 327 | deps: Vec, 328 | repo_pubkey: PubKey, 329 | repo_secret: SymKey, 330 | store: &mut impl RepoStore, 331 | ) -> ObjectRef { 332 | let expiry = None; 333 | let body = CommitBody::Ack(Ack::V0()); 334 | //println!("body: {:?}", body); 335 | add_obj( 336 | ObjectContent::CommitBody(body), 337 | deps, 338 | expiry, 339 | repo_pubkey, 340 | repo_secret, 341 | store, 342 | ) 343 | } 344 | 345 | let mut store = HashMapRepoStore::new(); 346 | let mut rng = OsRng {}; 347 | 348 | // repo 349 | 350 | let repo_keypair: Keypair = Keypair::generate(&mut rng); 351 | println!( 352 | "repo private key: ({}) {:?}", 353 | repo_keypair.secret.as_bytes().len(), 354 | repo_keypair.secret.as_bytes() 355 | ); 356 | println!( 357 | "repo public key: ({}) {:?}", 358 | repo_keypair.public.as_bytes().len(), 359 | repo_keypair.public.as_bytes() 360 | ); 361 | let _repo_privkey = PrivKey::Ed25519PrivKey(repo_keypair.secret.to_bytes()); 362 | let repo_pubkey = PubKey::Ed25519PubKey(repo_keypair.public.to_bytes()); 363 | let repo_secret = SymKey::ChaCha20Key([9; 32]); 364 | 365 | // branch 366 | 367 | let branch_keypair: Keypair = Keypair::generate(&mut rng); 368 | println!("branch public key: {:?}", branch_keypair.public.as_bytes()); 369 | let branch_pubkey = PubKey::Ed25519PubKey(branch_keypair.public.to_bytes()); 370 | 371 | let member_keypair: Keypair = Keypair::generate(&mut rng); 372 | println!("member public key: {:?}", member_keypair.public.as_bytes()); 373 | let member_privkey = PrivKey::Ed25519PrivKey(member_keypair.secret.to_bytes()); 374 | let member_pubkey = PubKey::Ed25519PubKey(member_keypair.public.to_bytes()); 375 | 376 | let metadata = [66u8; 64].to_vec(); 377 | let commit_types = vec![CommitType::Ack, CommitType::Transaction]; 378 | let secret = SymKey::ChaCha20Key([0; 32]); 379 | 380 | let member = MemberV0::new(member_pubkey, commit_types, metadata.clone()); 381 | let members = vec![member]; 382 | let mut quorum = HashMap::new(); 383 | quorum.insert(CommitType::Transaction, 3); 384 | let ack_delay = RelTime::Minutes(3); 385 | let tags = [99u8; 32].to_vec(); 386 | let branch = Branch::new( 387 | branch_pubkey, 388 | branch_pubkey, 389 | secret, 390 | members, 391 | quorum, 392 | ack_delay, 393 | tags, 394 | metadata, 395 | ); 396 | //println!("branch: {:?}", branch); 397 | 398 | fn print_branch() { 399 | println!("branch deps/acks:"); 400 | println!(""); 401 | println!(" br"); 402 | println!(" / \\"); 403 | println!(" t1 t2"); 404 | println!(" / \\ / \\"); 405 | println!(" a3 t4<--t5-->(t1)"); 406 | println!(" / \\"); 407 | println!(" a6 a7"); 408 | println!(""); 409 | } 410 | 411 | print_branch(); 412 | 413 | // commit bodies 414 | 415 | let branch_body = add_body_branch( 416 | branch.clone(), 417 | repo_pubkey.clone(), 418 | repo_secret.clone(), 419 | &mut store, 420 | ); 421 | let ack_body = add_body_ack(vec![], repo_pubkey, repo_secret, &mut store); 422 | let trans_body = add_body_trans(vec![], repo_pubkey, repo_secret, &mut store); 423 | 424 | // create & add commits to store 425 | 426 | println!(">> br"); 427 | let br = add_commit( 428 | branch_body, 429 | member_privkey, 430 | member_pubkey, 431 | 0, 432 | vec![], 433 | vec![], 434 | branch_body, 435 | repo_pubkey, 436 | repo_secret, 437 | &mut store, 438 | ); 439 | 440 | println!(">> t1"); 441 | let t1 = add_commit( 442 | branch_body, 443 | member_privkey, 444 | member_pubkey, 445 | 1, 446 | vec![br], 447 | vec![], 448 | trans_body, 449 | repo_pubkey, 450 | repo_secret, 451 | &mut store, 452 | ); 453 | 454 | println!(">> t2"); 455 | let t2 = add_commit( 456 | branch_body, 457 | member_privkey, 458 | member_pubkey, 459 | 2, 460 | vec![br], 461 | vec![], 462 | trans_body, 463 | repo_pubkey, 464 | repo_secret, 465 | &mut store, 466 | ); 467 | 468 | println!(">> a3"); 469 | let a3 = add_commit( 470 | branch_body, 471 | member_privkey, 472 | member_pubkey, 473 | 3, 474 | vec![t1], 475 | vec![], 476 | ack_body, 477 | repo_pubkey, 478 | repo_secret, 479 | &mut store, 480 | ); 481 | 482 | println!(">> t4"); 483 | let t4 = add_commit( 484 | branch_body, 485 | member_privkey, 486 | member_pubkey, 487 | 4, 488 | vec![t2], 489 | vec![t1], 490 | trans_body, 491 | repo_pubkey, 492 | repo_secret, 493 | &mut store, 494 | ); 495 | 496 | println!(">> t5"); 497 | let t5 = add_commit( 498 | branch_body, 499 | member_privkey, 500 | member_pubkey, 501 | 5, 502 | vec![t1, t2], 503 | vec![t4], 504 | trans_body, 505 | repo_pubkey, 506 | repo_secret, 507 | &mut store, 508 | ); 509 | 510 | println!(">> a6"); 511 | let a6 = add_commit( 512 | branch_body, 513 | member_privkey, 514 | member_pubkey, 515 | 6, 516 | vec![t4], 517 | vec![], 518 | ack_body, 519 | repo_pubkey, 520 | repo_secret, 521 | &mut store, 522 | ); 523 | 524 | println!(">> a7"); 525 | let a7 = add_commit( 526 | branch_body, 527 | member_privkey, 528 | member_pubkey, 529 | 7, 530 | vec![t4], 531 | vec![], 532 | ack_body, 533 | repo_pubkey, 534 | repo_secret, 535 | &mut store, 536 | ); 537 | 538 | let c7 = Commit::load(a7, &store).unwrap(); 539 | c7.verify(&branch, &store).unwrap(); 540 | 541 | let mut filter = Filter::new(FilterBuilder::new(10, 0.01)); 542 | for commit_ref in [br, t1, t2, a3, t5, a6] { 543 | match commit_ref.id { 544 | ObjectId::Blake3Digest32(d) => filter.add(&d), 545 | } 546 | } 547 | let cfg = filter.config(); 548 | let their_commits = BloomFilter { 549 | k: cfg.hashes, 550 | f: filter.get_u8_array().to_vec(), 551 | }; 552 | 553 | print_branch(); 554 | println!(">> sync_req"); 555 | println!(" our_heads: [a3, t5, a6, a7]"); 556 | println!(" their_heads: [a3, t5]"); 557 | println!(" their_commits: [br, t1, t2, a3, t5, a6]"); 558 | 559 | let ids = Branch::sync_req( 560 | &[a3.id, t5.id, a6.id, a7.id], 561 | &[a3.id, t5.id], 562 | &their_commits, 563 | &store, 564 | ) 565 | .unwrap(); 566 | 567 | assert_eq!(ids.len(), 1); 568 | assert!(ids.contains(&a7.id)); 569 | } 570 | } 571 | -------------------------------------------------------------------------------- /lofire/src/brokerstore.rs: -------------------------------------------------------------------------------- 1 | use crate::store::{StorageError}; 2 | 3 | pub trait BrokerStore { 4 | /// Load a property from the store. 5 | fn get(&self, prefix: u8, key: &Vec, suffix: Option) -> Result, StorageError>; 6 | 7 | /// Load all the values of a property from the store. 8 | fn get_all( 9 | &self, 10 | prefix: u8, 11 | key: &Vec, 12 | suffix: Option, 13 | ) -> Result>, StorageError>; 14 | 15 | /// Check if a specific value exists for a property from the store. 16 | fn has_property_value( 17 | &self, 18 | prefix: u8, 19 | key: &Vec, 20 | suffix: Option, 21 | value: Vec, 22 | ) -> Result<(), StorageError>; 23 | 24 | /// Save a property value to the store. 25 | fn put( 26 | &self, 27 | prefix: u8, 28 | key: &Vec, 29 | suffix: Option, 30 | value: Vec, 31 | ) -> Result<(), StorageError>; 32 | 33 | /// Replace the property of a key (single value) to the store. 34 | fn replace( 35 | &self, 36 | prefix: u8, 37 | key: &Vec, 38 | suffix: Option, 39 | value: Vec, 40 | ) -> Result<(), StorageError>; 41 | 42 | /// Delete a property from the store. 43 | fn del(&self, prefix: u8, key: &Vec, suffix: Option) -> Result<(), StorageError>; 44 | 45 | /// Delete all properties of a key from the store. 46 | fn del_all(&self, prefix: u8, key: &Vec, all_suffixes: &[u8]) -> Result<(), StorageError>; 47 | 48 | /// Delete a specific value for a property from the store. 49 | fn del_property_value( 50 | &self, 51 | prefix: u8, 52 | key: &Vec, 53 | suffix: Option, 54 | value: Vec, 55 | ) -> Result<(), StorageError>; 56 | } 57 | -------------------------------------------------------------------------------- /lofire/src/commit.rs: -------------------------------------------------------------------------------- 1 | //! Commit 2 | 3 | use debug_print::*; 4 | use ed25519_dalek::*; 5 | 6 | use std::collections::HashSet; 7 | use std::iter::FromIterator; 8 | 9 | use crate::object::*; 10 | use crate::store::*; 11 | use crate::types::*; 12 | 13 | #[derive(Debug)] 14 | pub enum CommitLoadError { 15 | MissingBlocks(Vec), 16 | ObjectParseError, 17 | DeserializeError, 18 | } 19 | 20 | #[derive(Debug)] 21 | pub enum CommitVerifyError { 22 | InvalidSignature, 23 | PermissionDenied, 24 | BodyLoadError(CommitLoadError), 25 | DepLoadError(CommitLoadError), 26 | } 27 | impl CommitBody { 28 | /// Get CommitType corresponding to CommitBody 29 | pub fn to_type(&self) -> CommitType { 30 | match self { 31 | CommitBody::Ack(_) => CommitType::Ack, 32 | CommitBody::AddBranch(_) => CommitType::AddBranch, 33 | CommitBody::AddMembers(_) => CommitType::AddMembers, 34 | CommitBody::Branch(_) => CommitType::Branch, 35 | CommitBody::EndOfBranch(_) => CommitType::EndOfBranch, 36 | CommitBody::RemoveBranch(_) => CommitType::RemoveBranch, 37 | CommitBody::Repository(_) => CommitType::Repository, 38 | CommitBody::Snapshot(_) => CommitType::Snapshot, 39 | CommitBody::Transaction(_) => CommitType::Transaction, 40 | } 41 | } 42 | } 43 | 44 | impl CommitV0 { 45 | /// New commit 46 | pub fn new( 47 | author_privkey: PrivKey, 48 | author_pubkey: PubKey, 49 | seq: u32, 50 | branch: ObjectRef, 51 | deps: Vec, 52 | acks: Vec, 53 | refs: Vec, 54 | metadata: Vec, 55 | body: ObjectRef, 56 | expiry: Option, 57 | ) -> Result { 58 | let content = CommitContentV0 { 59 | author: author_pubkey, 60 | seq, 61 | branch, 62 | deps, 63 | acks, 64 | refs, 65 | metadata, 66 | body, 67 | expiry, 68 | }; 69 | let content_ser = serde_bare::to_vec(&content).unwrap(); 70 | 71 | // sign commit 72 | let kp = match (author_privkey, author_pubkey) { 73 | (PrivKey::Ed25519PrivKey(sk), PubKey::Ed25519PubKey(pk)) => [sk, pk].concat(), 74 | }; 75 | let keypair = Keypair::from_bytes(kp.as_slice())?; 76 | let sig_bytes = keypair.sign(content_ser.as_slice()).to_bytes(); 77 | let mut it = sig_bytes.chunks_exact(32); 78 | let mut ss: Ed25519Sig = [[0; 32], [0; 32]]; 79 | ss[0].copy_from_slice(it.next().unwrap()); 80 | ss[1].copy_from_slice(it.next().unwrap()); 81 | let sig = Sig::Ed25519Sig(ss); 82 | Ok(CommitV0 { 83 | content, 84 | sig, 85 | id: None, 86 | key: None, 87 | }) 88 | } 89 | } 90 | 91 | impl Commit { 92 | /// New commit 93 | pub fn new( 94 | author_privkey: PrivKey, 95 | author_pubkey: PubKey, 96 | seq: u32, 97 | branch: ObjectRef, 98 | deps: Vec, 99 | acks: Vec, 100 | refs: Vec, 101 | metadata: Vec, 102 | body: ObjectRef, 103 | expiry: Option, 104 | ) -> Result { 105 | CommitV0::new( 106 | author_privkey, 107 | author_pubkey, 108 | seq, 109 | branch, 110 | deps, 111 | acks, 112 | refs, 113 | metadata, 114 | body, 115 | expiry, 116 | ) 117 | .map(|c| Commit::V0(c)) 118 | } 119 | 120 | /// Load commit from store 121 | pub fn load(commit_ref: ObjectRef, store: &impl RepoStore) -> Result { 122 | let (id, key) = (commit_ref.id, commit_ref.key); 123 | match Object::load(id, Some(key), store) { 124 | Ok(obj) => { 125 | let content = obj 126 | .content() 127 | .map_err(|_e| CommitLoadError::ObjectParseError)?; 128 | let mut commit = match content { 129 | ObjectContent::Commit(c) => c, 130 | _ => return Err(CommitLoadError::DeserializeError), 131 | }; 132 | commit.set_id(id); 133 | commit.set_key(key); 134 | Ok(commit) 135 | } 136 | Err(ObjectParseError::MissingBlocks(missing)) => { 137 | Err(CommitLoadError::MissingBlocks(missing)) 138 | } 139 | Err(_) => Err(CommitLoadError::ObjectParseError), 140 | } 141 | } 142 | 143 | /// Load commit body from store 144 | pub fn load_body(&self, store: &impl RepoStore) -> Result { 145 | let content = self.content(); 146 | let (id, key) = (content.body.id, content.body.key); 147 | let obj = Object::load(id.clone(), Some(key.clone()), store).map_err(|e| match e { 148 | ObjectParseError::MissingBlocks(missing) => CommitLoadError::MissingBlocks(missing), 149 | _ => CommitLoadError::ObjectParseError, 150 | })?; 151 | let content = obj 152 | .content() 153 | .map_err(|_e| CommitLoadError::ObjectParseError)?; 154 | match content { 155 | ObjectContent::CommitBody(body) => Ok(body), 156 | _ => Err(CommitLoadError::DeserializeError), 157 | } 158 | } 159 | 160 | /// Get ID of parent `Object` 161 | pub fn id(&self) -> Option { 162 | match self { 163 | Commit::V0(c) => c.id, 164 | } 165 | } 166 | 167 | /// Set ID of parent `Object` 168 | pub fn set_id(&mut self, id: ObjectId) { 169 | match self { 170 | Commit::V0(c) => c.id = Some(id), 171 | } 172 | } 173 | 174 | /// Get key of parent `Object` 175 | pub fn key(&self) -> Option { 176 | match self { 177 | Commit::V0(c) => c.key, 178 | } 179 | } 180 | 181 | /// Set key of parent `Object` 182 | pub fn set_key(&mut self, key: SymKey) { 183 | match self { 184 | Commit::V0(c) => c.key = Some(key), 185 | } 186 | } 187 | 188 | /// Get commit signature 189 | pub fn sig(&self) -> &Sig { 190 | match self { 191 | Commit::V0(c) => &c.sig, 192 | } 193 | } 194 | 195 | /// Get commit content 196 | pub fn content(&self) -> &CommitContentV0 { 197 | match self { 198 | Commit::V0(c) => &c.content, 199 | } 200 | } 201 | 202 | /// Get acks 203 | pub fn acks(&self) -> Vec { 204 | match self { 205 | Commit::V0(c) => c.content.acks.clone(), 206 | } 207 | } 208 | 209 | /// Get deps 210 | pub fn deps(&self) -> Vec { 211 | match self { 212 | Commit::V0(c) => c.content.deps.clone(), 213 | } 214 | } 215 | 216 | /// Get all direct commit dependencies of the commit (`deps`, `acks`) 217 | pub fn deps_acks(&self) -> Vec { 218 | match self { 219 | Commit::V0(c) => [c.content.acks.clone(), c.content.deps.clone()].concat(), 220 | } 221 | } 222 | 223 | /// Get seq 224 | pub fn seq(&self) -> u32 { 225 | match self { 226 | Commit::V0(c) => c.content.seq, 227 | } 228 | } 229 | 230 | /// Verify commit signature 231 | pub fn verify_sig(&self) -> Result<(), SignatureError> { 232 | let c = match self { 233 | Commit::V0(c) => c, 234 | }; 235 | let content_ser = serde_bare::to_vec(&c.content).unwrap(); 236 | let pubkey = match c.content.author { 237 | PubKey::Ed25519PubKey(pk) => pk, 238 | }; 239 | let pk = PublicKey::from_bytes(&pubkey)?; 240 | let sig_bytes = match c.sig { 241 | Sig::Ed25519Sig(ss) => [ss[0], ss[1]].concat(), 242 | }; 243 | let sig = Signature::from_bytes(&sig_bytes)?; 244 | pk.verify_strict(&content_ser, &sig) 245 | } 246 | 247 | /// Verify commit permissions 248 | pub fn verify_perm(&self, body: &CommitBody, branch: &Branch) -> Result<(), CommitVerifyError> { 249 | let content = self.content(); 250 | match branch.get_member(&content.author) { 251 | Some(m) => { 252 | if m.has_perm(body.to_type()) { 253 | return Ok(()); 254 | } 255 | } 256 | None => (), 257 | } 258 | Err(CommitVerifyError::PermissionDenied) 259 | } 260 | 261 | /// Verify if the commit's `body` and dependencies (`deps` & `acks`) are available in the `store` 262 | pub fn verify_deps(&self, store: &impl RepoStore) -> Result, CommitLoadError> { 263 | //debug_println!(">> verify_deps: #{}", self.seq()); 264 | /// Load `Commit`s of a `Branch` from the `RepoStore` starting from the given `Commit`, 265 | /// and collect missing `ObjectId`s 266 | fn load_branch( 267 | commit: &Commit, 268 | store: &impl RepoStore, 269 | visited: &mut HashSet, 270 | missing: &mut HashSet, 271 | ) -> Result<(), CommitLoadError> { 272 | //debug_println!(">>> load_branch: #{}", commit.seq()); 273 | // the commit verify_deps() was called on may not have an ID set, 274 | // but the commits loaded from store should have it 275 | match commit.id() { 276 | Some(id) => { 277 | if visited.contains(&id) { 278 | return Ok(()); 279 | } 280 | visited.insert(id); 281 | } 282 | None => (), 283 | } 284 | 285 | // load body & check if it's the Branch commit at the root 286 | let is_root = match commit.load_body(store) { 287 | Ok(body) => body.to_type() == CommitType::Branch, 288 | Err(CommitLoadError::MissingBlocks(m)) => { 289 | missing.extend(m); 290 | false 291 | } 292 | Err(e) => return Err(e), 293 | }; 294 | debug_println!("!!! is_root: {}", is_root); 295 | 296 | // load deps 297 | if !is_root { 298 | for dep in commit.deps_acks() { 299 | match Commit::load(dep, store) { 300 | Ok(c) => { 301 | load_branch(&c, store, visited, missing)?; 302 | } 303 | Err(CommitLoadError::MissingBlocks(m)) => { 304 | missing.extend(m); 305 | } 306 | Err(e) => return Err(e), 307 | } 308 | } 309 | } 310 | Ok(()) 311 | } 312 | 313 | let mut visited = HashSet::new(); 314 | let mut missing = HashSet::new(); 315 | load_branch(self, store, &mut visited, &mut missing)?; 316 | 317 | if !missing.is_empty() { 318 | return Err(CommitLoadError::MissingBlocks(Vec::from_iter(missing))); 319 | } 320 | Ok(Vec::from_iter(visited)) 321 | } 322 | 323 | /// Verify signature, permissions, and dependencies 324 | pub fn verify(&self, branch: &Branch, store: &impl RepoStore) -> Result<(), CommitVerifyError> { 325 | self.verify_sig() 326 | .map_err(|_e| CommitVerifyError::InvalidSignature)?; 327 | let body = self 328 | .load_body(store) 329 | .map_err(|e| CommitVerifyError::BodyLoadError(e))?; 330 | self.verify_perm(&body, branch)?; 331 | self.verify_deps(store) 332 | .map_err(|e| CommitVerifyError::DepLoadError(e))?; 333 | Ok(()) 334 | } 335 | } 336 | 337 | mod test { 338 | use std::collections::HashMap; 339 | 340 | use ed25519_dalek::*; 341 | use rand::rngs::OsRng; 342 | 343 | use crate::branch::*; 344 | use crate::commit::*; 345 | use crate::store::*; 346 | use crate::types::*; 347 | 348 | #[test] 349 | pub fn test_commit() { 350 | let mut csprng = OsRng {}; 351 | let keypair: Keypair = Keypair::generate(&mut csprng); 352 | println!( 353 | "private key: ({}) {:?}", 354 | keypair.secret.as_bytes().len(), 355 | keypair.secret.as_bytes() 356 | ); 357 | println!( 358 | "public key: ({}) {:?}", 359 | keypair.public.as_bytes().len(), 360 | keypair.public.as_bytes() 361 | ); 362 | let ed_priv_key = keypair.secret.to_bytes(); 363 | let ed_pub_key = keypair.public.to_bytes(); 364 | let priv_key = PrivKey::Ed25519PrivKey(ed_priv_key); 365 | let pub_key = PubKey::Ed25519PubKey(ed_pub_key); 366 | let seq = 3; 367 | let obj_ref = ObjectRef { 368 | id: ObjectId::Blake3Digest32([1; 32]), 369 | key: SymKey::ChaCha20Key([2; 32]), 370 | }; 371 | let obj_refs = vec![obj_ref]; 372 | let branch = obj_ref.clone(); 373 | let deps = obj_refs.clone(); 374 | let acks = obj_refs.clone(); 375 | let refs = obj_refs.clone(); 376 | let metadata = vec![1, 2, 3]; 377 | let body_ref = obj_ref.clone(); 378 | let expiry = Some(2342); 379 | 380 | let commit = Commit::new( 381 | priv_key, pub_key, seq, branch, deps, acks, refs, metadata, body_ref, expiry, 382 | ) 383 | .unwrap(); 384 | println!("commit: {:?}", commit); 385 | 386 | let store = HashMapRepoStore::new(); 387 | let metadata = [66u8; 64].to_vec(); 388 | let commit_types = vec![CommitType::Ack, CommitType::Transaction]; 389 | let key: [u8; 32] = [0; 32]; 390 | let secret = SymKey::ChaCha20Key(key); 391 | let member = MemberV0::new(pub_key, commit_types, metadata.clone()); 392 | let members = vec![member]; 393 | let mut quorum = HashMap::new(); 394 | quorum.insert(CommitType::Transaction, 3); 395 | let ack_delay = RelTime::Minutes(3); 396 | let tags = [99u8; 32].to_vec(); 397 | let branch = Branch::new( 398 | pub_key.clone(), 399 | pub_key.clone(), 400 | secret, 401 | members, 402 | quorum, 403 | ack_delay, 404 | tags, 405 | metadata, 406 | ); 407 | //println!("branch: {:?}", branch); 408 | let body = CommitBody::Ack(Ack::V0()); 409 | //println!("body: {:?}", body); 410 | 411 | match commit.load_body(&store) { 412 | Ok(_b) => panic!("Body should not exist"), 413 | Err(CommitLoadError::MissingBlocks(missing)) => { 414 | assert_eq!(missing.len(), 1); 415 | } 416 | Err(e) => panic!("Commit verify error: {:?}", e), 417 | } 418 | 419 | let content = commit.content(); 420 | println!("content: {:?}", content); 421 | 422 | commit.verify_sig().expect("Invalid signature"); 423 | commit 424 | .verify_perm(&body, &branch) 425 | .expect("Permission denied"); 426 | 427 | match commit.verify_deps(&store) { 428 | Ok(_) => panic!("Commit should not be Ok"), 429 | Err(CommitLoadError::MissingBlocks(missing)) => { 430 | assert_eq!(missing.len(), 1); 431 | } 432 | Err(e) => panic!("Commit verify error: {:?}", e), 433 | } 434 | 435 | match commit.verify(&branch, &store) { 436 | Ok(_) => panic!("Commit should not be Ok"), 437 | Err(CommitVerifyError::BodyLoadError(CommitLoadError::MissingBlocks(missing))) => { 438 | assert_eq!(missing.len(), 1); 439 | } 440 | Err(e) => panic!("Commit verify error: {:?}", e), 441 | } 442 | } 443 | } 444 | -------------------------------------------------------------------------------- /lofire/src/errors.rs: -------------------------------------------------------------------------------- 1 | //! Errors 2 | 3 | pub enum LofireError { 4 | InvalidSignature, 5 | SerializationError, 6 | } 7 | 8 | impl From for LofireError { 9 | fn from(e: serde_bare::error::Error) -> Self { 10 | LofireError::SerializationError 11 | } 12 | } 13 | 14 | impl From for LofireError { 15 | fn from(e: ed25519_dalek::ed25519::Error) -> Self { 16 | LofireError::InvalidSignature 17 | } 18 | } 19 | -------------------------------------------------------------------------------- /lofire/src/lib.rs: -------------------------------------------------------------------------------- 1 | pub mod types; 2 | 3 | pub mod store; 4 | 5 | pub mod block; 6 | 7 | pub mod object; 8 | 9 | pub mod commit; 10 | 11 | pub mod branch; 12 | 13 | pub mod repo; 14 | 15 | pub mod utils; 16 | 17 | pub mod errors; 18 | 19 | pub mod brokerstore; 20 | -------------------------------------------------------------------------------- /lofire/src/object.rs: -------------------------------------------------------------------------------- 1 | //! Merkle hash tree of Objects 2 | 3 | use std::collections::{HashMap, HashSet}; 4 | 5 | use debug_print::*; 6 | 7 | use chacha20::cipher::{KeyIvInit, StreamCipher}; 8 | use chacha20::ChaCha20; 9 | 10 | use crate::store::*; 11 | use crate::types::*; 12 | 13 | /// Size of a serialized empty Block 14 | const EMPTY_BLOCK_SIZE: usize = 12; 15 | /// Size of a serialized BlockId 16 | const BLOCK_ID_SIZE: usize = 33; 17 | /// Size of serialized SymKey 18 | const BLOCK_KEY_SIZE: usize = 33; 19 | /// Size of serialized Object with deps reference. 20 | const EMPTY_ROOT_SIZE_DEPSREF: usize = 77; 21 | /// Extra size needed if depsRef used instead of deps list. 22 | const DEPSREF_OVERLOAD: usize = EMPTY_ROOT_SIZE_DEPSREF - EMPTY_BLOCK_SIZE; 23 | /// Varint extra bytes when reaching the maximum value we will ever use 24 | const BIG_VARINT_EXTRA: usize = 3; 25 | /// Varint extra bytes when reaching the maximum size of data byte arrays. 26 | const DATA_VARINT_EXTRA: usize = 4; 27 | /// Max extra space used by the deps list 28 | const MAX_DEPS_SIZE: usize = 8 * BLOCK_ID_SIZE; 29 | 30 | #[derive(Debug)] 31 | pub struct Object { 32 | /// Blocks of the Object (nodes of the tree) 33 | blocks: Vec, 34 | 35 | /// Dependencies 36 | deps: Vec, 37 | } 38 | 39 | /// Object parsing errors 40 | #[derive(Debug)] 41 | pub enum ObjectParseError { 42 | /// Missing blocks 43 | MissingBlocks(Vec), 44 | /// Missing root key 45 | MissingRootKey, 46 | /// Invalid BlockId encountered in the tree 47 | InvalidBlockId, 48 | /// Too many or too few children of a block 49 | InvalidChildren, 50 | /// Number of keys does not match number of children of a block 51 | InvalidKeys, 52 | /// Invalid DepList object content 53 | InvalidDeps, 54 | /// Error deserializing content of a block 55 | BlockDeserializeError, 56 | /// Error deserializing content of the object 57 | ObjectDeserializeError, 58 | } 59 | 60 | /// Object copy error 61 | #[derive(Debug)] 62 | pub enum ObjectCopyError { 63 | NotFound, 64 | ParseError, 65 | } 66 | 67 | impl Object { 68 | fn convergence_key(repo_pubkey: PubKey, repo_secret: SymKey) -> [u8; blake3::OUT_LEN] { 69 | let key_material = match (repo_pubkey, repo_secret) { 70 | (PubKey::Ed25519PubKey(pubkey), SymKey::ChaCha20Key(secret)) => { 71 | [pubkey, secret].concat() 72 | } 73 | }; 74 | blake3::derive_key("LoFiRe Data BLAKE3 key", key_material.as_slice()) 75 | } 76 | 77 | fn make_block( 78 | content: &[u8], 79 | conv_key: &[u8; blake3::OUT_LEN], 80 | children: Vec, 81 | deps: ObjectDeps, 82 | expiry: Option, 83 | ) -> Block { 84 | let key_hash = blake3::keyed_hash(conv_key, content); 85 | let nonce = [0u8; 12]; 86 | let key = key_hash.as_bytes(); 87 | let mut cipher = ChaCha20::new(key.into(), &nonce.into()); 88 | let mut content_enc = Vec::from(content); 89 | let mut content_enc_slice = &mut content_enc.as_mut_slice(); 90 | cipher.apply_keystream(&mut content_enc_slice); 91 | let key = SymKey::ChaCha20Key(key.clone()); 92 | let block = Block::new(children, deps, expiry, content_enc, Some(key)); 93 | //debug_println!(">>> make_block:"); 94 | //debug_println!("!! id: {:?}", obj.id()); 95 | //debug_println!("!! children: ({}) {:?}", children.len(), children); 96 | block 97 | } 98 | 99 | fn make_deps( 100 | deps_vec: Vec, 101 | object_size: usize, 102 | repo_pubkey: PubKey, 103 | repo_secret: SymKey, 104 | ) -> ObjectDeps { 105 | if deps_vec.len() <= 8 { 106 | ObjectDeps::ObjectIdList(deps_vec) 107 | } else { 108 | let dep_list = DepList::V0(deps_vec); 109 | let dep_obj = Object::new( 110 | ObjectContent::DepList(dep_list), 111 | vec![], 112 | None, 113 | object_size, 114 | repo_pubkey, 115 | repo_secret, 116 | ); 117 | let dep_ref = ObjectRef { 118 | id: dep_obj.id(), 119 | key: dep_obj.key().unwrap(), 120 | }; 121 | ObjectDeps::DepListRef(dep_ref) 122 | } 123 | } 124 | 125 | /// Build tree from leaves, returns parent nodes 126 | fn make_tree( 127 | leaves: &[Block], 128 | conv_key: &ChaCha20Key, 129 | root_deps: &ObjectDeps, 130 | expiry: Option, 131 | arity: usize, 132 | ) -> Vec { 133 | let mut parents = vec![]; 134 | let chunks = leaves.chunks(arity); 135 | let mut it = chunks.peekable(); 136 | while let Some(nodes) = it.next() { 137 | let keys = nodes.iter().map(|block| block.key().unwrap()).collect(); 138 | let children = nodes.iter().map(|block| block.id()).collect(); 139 | let content = BlockContentV0::InternalNode(keys); 140 | let content_ser = serde_bare::to_vec(&content).unwrap(); 141 | let child_deps = ObjectDeps::ObjectIdList(vec![]); 142 | let deps = if parents.is_empty() && it.peek().is_none() { 143 | root_deps.clone() 144 | } else { 145 | child_deps 146 | }; 147 | parents.push(Self::make_block( 148 | content_ser.as_slice(), 149 | conv_key, 150 | children, 151 | deps, 152 | expiry, 153 | )); 154 | } 155 | //debug_println!("parents += {}", parents.len()); 156 | 157 | if 1 < parents.len() { 158 | let mut great_parents = 159 | Self::make_tree(parents.as_slice(), conv_key, root_deps, expiry, arity); 160 | parents.append(&mut great_parents); 161 | } 162 | parents 163 | } 164 | 165 | /// Create new Object from given content 166 | /// 167 | /// The Object is chunked and stored in a Merkle tree 168 | /// The arity of the Merkle tree is the maximum that fits in the given `max_object_size` 169 | /// 170 | /// Arguments: 171 | /// * `content`: Object content 172 | /// * `deps`: Dependencies of the object 173 | /// * `block_size`: Desired block size for chunking content, rounded up to nearest valid block size 174 | /// * `repo_pubkey`: Repository public key 175 | /// * `repo_secret`: Repository secret 176 | pub fn new( 177 | content: ObjectContent, 178 | deps: Vec, 179 | expiry: Option, 180 | block_size: usize, 181 | repo_pubkey: PubKey, 182 | repo_secret: SymKey, 183 | ) -> Object { 184 | // create blocks by chunking + encrypting content 185 | let valid_block_size = store_valid_value_size(block_size); 186 | let data_chunk_size = valid_block_size - EMPTY_BLOCK_SIZE - DATA_VARINT_EXTRA; 187 | 188 | let mut blocks: Vec = vec![]; 189 | let conv_key = Self::convergence_key(repo_pubkey, repo_secret); 190 | 191 | let obj_deps = Self::make_deps(deps.clone(), valid_block_size, repo_pubkey, repo_secret); 192 | 193 | let content_ser = serde_bare::to_vec(&content).unwrap(); 194 | 195 | if EMPTY_BLOCK_SIZE + DATA_VARINT_EXTRA + BLOCK_ID_SIZE * deps.len() + content_ser.len() 196 | <= valid_block_size 197 | { 198 | // content fits in root node 199 | let data_chunk = BlockContentV0::DataChunk(content_ser.clone()); 200 | let content_ser = serde_bare::to_vec(&data_chunk).unwrap(); 201 | blocks.push(Self::make_block( 202 | content_ser.as_slice(), 203 | &conv_key, 204 | vec![], 205 | obj_deps, 206 | expiry, 207 | )); 208 | } else { 209 | // chunk content and create leaf nodes 210 | for chunk in content_ser.chunks(data_chunk_size) { 211 | let data_chunk = BlockContentV0::DataChunk(chunk.to_vec()); 212 | let content_ser = serde_bare::to_vec(&data_chunk).unwrap(); 213 | blocks.push(Self::make_block( 214 | content_ser.as_slice(), 215 | &conv_key, 216 | vec![], 217 | ObjectDeps::ObjectIdList(vec![]), 218 | expiry, 219 | )); 220 | } 221 | 222 | // internal nodes 223 | // arity: max number of ObjectRefs that fit inside an InternalNode Object within the object_size limit 224 | let arity: usize = 225 | (valid_block_size - EMPTY_BLOCK_SIZE - BIG_VARINT_EXTRA * 2 - MAX_DEPS_SIZE) 226 | / (BLOCK_ID_SIZE + BLOCK_KEY_SIZE); 227 | let mut parents = 228 | Self::make_tree(blocks.as_slice(), &conv_key, &obj_deps, expiry, arity); 229 | blocks.append(&mut parents); 230 | } 231 | 232 | Object { blocks, deps } 233 | } 234 | 235 | pub fn copy( 236 | &self, 237 | expiry: Option, 238 | repo_pubkey: PubKey, 239 | repo_secret: SymKey, 240 | ) -> Result { 241 | // getting the old object from store 242 | let leaves: Vec = self.leaves().map_err(|_e| ObjectCopyError::ParseError)?; 243 | 244 | let conv_key = Self::convergence_key(repo_pubkey, repo_secret); 245 | let block_size = leaves.first().unwrap().content().len(); 246 | let valid_block_size = store_valid_value_size(block_size); 247 | 248 | let mut blocks: Vec = vec![]; 249 | for block in leaves { 250 | let mut copy = block.clone(); 251 | copy.set_expiry(expiry); 252 | blocks.push(copy); 253 | } 254 | 255 | // internal nodes 256 | // arity: max number of ObjectRefs that fit inside an InternalNode Object within the object_size limit 257 | let arity: usize = 258 | (valid_block_size - EMPTY_BLOCK_SIZE - BIG_VARINT_EXTRA * 2 - MAX_DEPS_SIZE) 259 | / (BLOCK_ID_SIZE + BLOCK_KEY_SIZE); 260 | let mut parents = Self::make_tree( 261 | blocks.as_slice(), 262 | &conv_key, 263 | self.root().deps(), 264 | expiry, 265 | arity, 266 | ); 267 | blocks.append(&mut parents); 268 | 269 | Ok(Object { 270 | blocks, 271 | deps: self.deps().clone(), 272 | }) 273 | } 274 | 275 | /// Load an Object from RepoStore 276 | /// 277 | /// Returns Ok(Object) or an Err(Vec) of missing BlockIds 278 | pub fn load( 279 | id: ObjectId, 280 | key: Option, 281 | store: &impl RepoStore, 282 | ) -> Result { 283 | fn load_tree( 284 | parents: Vec, 285 | store: &impl RepoStore, 286 | blocks: &mut Vec, 287 | missing: &mut Vec, 288 | ) { 289 | let mut children: Vec = vec![]; 290 | for id in parents { 291 | match store.get(&id) { 292 | Ok(block) => { 293 | blocks.insert(0, block.clone()); 294 | match block { 295 | Block::V0(o) => { 296 | children.extend(o.children.iter().rev()); 297 | } 298 | } 299 | } 300 | Err(_) => missing.push(id.clone()), 301 | } 302 | } 303 | if !children.is_empty() { 304 | load_tree(children, store, blocks, missing); 305 | } 306 | } 307 | 308 | let mut blocks: Vec = vec![]; 309 | let mut missing: Vec = vec![]; 310 | 311 | load_tree(vec![id], store, &mut blocks, &mut missing); 312 | 313 | if !missing.is_empty() { 314 | return Err(ObjectParseError::MissingBlocks(missing)); 315 | } 316 | 317 | let root = blocks.last_mut().unwrap(); 318 | if key.is_some() { 319 | root.set_key(key); 320 | } 321 | 322 | let deps = match root.deps().clone() { 323 | ObjectDeps::ObjectIdList(deps_vec) => deps_vec, 324 | ObjectDeps::DepListRef(deps_ref) => { 325 | let obj = Object::load(deps_ref.id, Some(deps_ref.key), store)?; 326 | match obj.content()? { 327 | ObjectContent::DepList(DepList::V0(deps_vec)) => deps_vec, 328 | _ => return Err(ObjectParseError::InvalidDeps), 329 | } 330 | } 331 | }; 332 | 333 | Ok(Object { blocks, deps }) 334 | } 335 | 336 | /// Save blocks of the object in the store 337 | pub fn save(&self, store: &mut impl RepoStore) -> Result<(), StorageError> { 338 | let mut deduplicated: HashSet = HashSet::new(); 339 | for block in &self.blocks { 340 | let id = block.id(); 341 | if deduplicated.get(&id).is_none() { 342 | store.put(block)?; 343 | deduplicated.insert(id); 344 | } 345 | } 346 | Ok(()) 347 | } 348 | 349 | /// Get the ID of the Object 350 | pub fn id(&self) -> ObjectId { 351 | self.blocks.last().unwrap().id() 352 | } 353 | 354 | /// Get the key for the Object 355 | pub fn key(&self) -> Option { 356 | self.blocks.last().unwrap().key() 357 | } 358 | 359 | /// Get an `ObjectRef` for the root object 360 | pub fn reference(&self) -> Option { 361 | if self.key().is_some() { 362 | Some(ObjectRef { 363 | id: self.id(), 364 | key: self.key().unwrap(), 365 | }) 366 | } else { 367 | None 368 | } 369 | } 370 | 371 | pub fn root(&self) -> &Block { 372 | self.blocks.last().unwrap() 373 | } 374 | 375 | pub fn expiry(&self) -> Option { 376 | self.blocks.last().unwrap().expiry() 377 | } 378 | 379 | pub fn deps(&self) -> &Vec { 380 | &self.deps 381 | } 382 | 383 | pub fn blocks(&self) -> &Vec { 384 | &self.blocks 385 | } 386 | 387 | pub fn to_hashmap(&self) -> HashMap { 388 | let mut map: HashMap = HashMap::new(); 389 | for block in &self.blocks { 390 | map.insert(block.id(), block.clone()); 391 | } 392 | map 393 | } 394 | 395 | /// Collect leaves from the tree 396 | fn collect_leaves( 397 | blocks: &Vec, 398 | parents: &Vec<(ObjectId, SymKey)>, 399 | parent_index: usize, 400 | leaves: &mut Option<&mut Vec>, 401 | obj_content: &mut Option<&mut Vec>, 402 | ) -> Result<(), ObjectParseError> { 403 | /*debug_println!( 404 | ">>> collect_leaves: #{}..{}", 405 | parent_index, 406 | parent_index + parents.len() - 1 407 | );*/ 408 | let mut children: Vec<(ObjectId, SymKey)> = vec![]; 409 | let mut i = parent_index; 410 | 411 | for (id, key) in parents { 412 | //debug_println!("!!! parent: #{}", i); 413 | let block = &blocks[i]; 414 | i += 1; 415 | 416 | // verify object ID 417 | if *id != block.id() { 418 | debug_println!("Invalid ObjectId.\nExp: {:?}\nGot: {:?}", *id, block.id()); 419 | return Err(ObjectParseError::InvalidBlockId); 420 | } 421 | 422 | match block { 423 | Block::V0(b) => { 424 | // decrypt content 425 | let mut content_dec = b.content.clone(); 426 | match key { 427 | SymKey::ChaCha20Key(key) => { 428 | let nonce = [0u8; 12]; 429 | let mut cipher = ChaCha20::new(key.into(), &nonce.into()); 430 | let mut content_dec_slice = &mut content_dec.as_mut_slice(); 431 | cipher.apply_keystream(&mut content_dec_slice); 432 | } 433 | } 434 | 435 | // deserialize content 436 | let content: BlockContentV0; 437 | match serde_bare::from_slice(content_dec.as_slice()) { 438 | Ok(c) => content = c, 439 | Err(e) => { 440 | debug_println!("Block deserialize error: {}", e); 441 | return Err(ObjectParseError::BlockDeserializeError); 442 | } 443 | } 444 | 445 | // parse content 446 | match content { 447 | BlockContentV0::InternalNode(keys) => { 448 | if keys.len() != b.children.len() { 449 | debug_println!( 450 | "Invalid keys length: got {}, expected {}", 451 | keys.len(), 452 | b.children.len() 453 | ); 454 | debug_println!("!!! children: {:?}", b.children); 455 | debug_println!("!!! keys: {:?}", keys); 456 | return Err(ObjectParseError::InvalidKeys); 457 | } 458 | 459 | for (id, key) in b.children.iter().zip(keys.iter()) { 460 | children.push((id.clone(), key.clone())); 461 | } 462 | } 463 | BlockContentV0::DataChunk(chunk) => { 464 | if leaves.is_some() { 465 | let mut leaf = block.clone(); 466 | leaf.set_key(Some(*key)); 467 | let l = &mut **leaves.as_mut().unwrap(); 468 | l.push(leaf); 469 | } 470 | if obj_content.is_some() { 471 | let c = &mut **obj_content.as_mut().unwrap(); 472 | c.extend_from_slice(chunk.as_slice()); 473 | } 474 | } 475 | } 476 | } 477 | } 478 | } 479 | if !children.is_empty() { 480 | if parent_index < children.len() { 481 | return Err(ObjectParseError::InvalidChildren); 482 | } 483 | match Self::collect_leaves( 484 | blocks, 485 | &children, 486 | parent_index - children.len(), 487 | leaves, 488 | obj_content, 489 | ) { 490 | Ok(_) => (), 491 | Err(e) => return Err(e), 492 | } 493 | } 494 | Ok(()) 495 | } 496 | 497 | /// Parse the Object and return the leaf Blocks with decryption key set 498 | pub fn leaves(&self) -> Result, ObjectParseError> { 499 | let mut leaves: Vec = vec![]; 500 | let parents = vec![(self.id(), self.key().unwrap())]; 501 | match Self::collect_leaves( 502 | &self.blocks, 503 | &parents, 504 | self.blocks.len() - 1, 505 | &mut Some(&mut leaves), 506 | &mut None, 507 | ) { 508 | Ok(_) => Ok(leaves), 509 | Err(e) => Err(e), 510 | } 511 | } 512 | 513 | /// Parse the Object and return the decrypted content assembled from Blocks 514 | pub fn content(&self) -> Result { 515 | if self.key().is_none() { 516 | return Err(ObjectParseError::MissingRootKey); 517 | } 518 | let mut obj_content: Vec = vec![]; 519 | let parents = vec![(self.id(), self.key().unwrap())]; 520 | match Self::collect_leaves( 521 | &self.blocks, 522 | &parents, 523 | self.blocks.len() - 1, 524 | &mut None, 525 | &mut Some(&mut obj_content), 526 | ) { 527 | Ok(_) => { 528 | let content: ObjectContent; 529 | match serde_bare::from_slice(obj_content.as_slice()) { 530 | Ok(c) => Ok(c), 531 | Err(e) => { 532 | debug_println!("Object deserialize error: {}", e); 533 | Err(ObjectParseError::ObjectDeserializeError) 534 | } 535 | } 536 | } 537 | Err(e) => Err(e), 538 | } 539 | } 540 | } 541 | 542 | #[cfg(test)] 543 | mod test { 544 | 545 | use crate::object::*; 546 | use crate::store::*; 547 | use crate::types::*; 548 | 549 | // Those constants are calculated with RepoStore::get_max_value_size 550 | 551 | /// Maximum arity of branch containing max number of leaves 552 | const MAX_ARITY_LEAVES: usize = 31774; 553 | /// Maximum arity of root branch 554 | const MAX_ARITY_ROOT: usize = 31770; 555 | /// Maximum data that can fit in object.content 556 | const MAX_DATA_PAYLOAD_SIZE: usize = 2097112; 557 | 558 | /// Test tree API 559 | #[test] 560 | pub fn test_object() { 561 | let file = File::V0(FileV0 { 562 | content_type: Vec::from("file/test"), 563 | metadata: Vec::from("some meta data here"), 564 | content: [(0..255).collect::>().as_slice(); 320].concat(), 565 | }); 566 | let content = ObjectContent::File(file); 567 | 568 | let deps: Vec = vec![Digest::Blake3Digest32([9; 32])]; 569 | let exp = Some(2u32.pow(31)); 570 | let max_object_size = 0; 571 | 572 | let repo_secret = SymKey::ChaCha20Key([0; 32]); 573 | let repo_pubkey = PubKey::Ed25519PubKey([1; 32]); 574 | 575 | let obj = Object::new( 576 | content.clone(), 577 | deps.clone(), 578 | exp, 579 | max_object_size, 580 | repo_pubkey, 581 | repo_secret, 582 | ); 583 | 584 | println!("obj.id: {:?}", obj.id()); 585 | println!("obj.key: {:?}", obj.key()); 586 | println!("obj.deps: {:?}", obj.deps()); 587 | println!("obj.blocks.len: {:?}", obj.blocks().len()); 588 | 589 | let mut i = 0; 590 | for node in obj.blocks() { 591 | println!("#{}: {:?}", i, node.id()); 592 | i += 1; 593 | } 594 | 595 | assert_eq!(*obj.deps(), deps); 596 | 597 | match obj.content() { 598 | Ok(cnt) => { 599 | assert_eq!(content, cnt); 600 | } 601 | Err(e) => panic!("Object parse error: {:?}", e), 602 | } 603 | let mut store = HashMapRepoStore::new(); 604 | 605 | obj.save(&mut store).expect("Object save error"); 606 | 607 | let obj2 = Object::load(obj.id(), obj.key(), &store).unwrap(); 608 | 609 | println!("obj2.id: {:?}", obj2.id()); 610 | println!("obj2.key: {:?}", obj2.key()); 611 | println!("obj2.deps: {:?}", obj2.deps()); 612 | println!("obj2.blocks.len: {:?}", obj2.blocks().len()); 613 | let mut i = 0; 614 | for node in obj2.blocks() { 615 | println!("#{}: {:?}", i, node.id()); 616 | i += 1; 617 | } 618 | 619 | assert_eq!(*obj2.deps(), deps); 620 | assert_eq!(*obj2.deps(), deps); 621 | 622 | match obj2.content() { 623 | Ok(cnt) => { 624 | assert_eq!(content, cnt); 625 | } 626 | Err(e) => panic!("Object2 parse error: {:?}", e), 627 | } 628 | 629 | let obj3 = Object::load(obj.id(), None, &store).unwrap(); 630 | 631 | println!("obj3.id: {:?}", obj3.id()); 632 | println!("obj3.key: {:?}", obj3.key()); 633 | println!("obj3.deps: {:?}", obj3.deps()); 634 | println!("obj3.blocks.len: {:?}", obj3.blocks().len()); 635 | let mut i = 0; 636 | for node in obj3.blocks() { 637 | println!("#{}: {:?}", i, node.id()); 638 | i += 1; 639 | } 640 | 641 | assert_eq!(*obj3.deps(), deps); 642 | 643 | match obj3.content() { 644 | Err(ObjectParseError::MissingRootKey) => (), 645 | Err(e) => panic!("Object3 parse error: {:?}", e), 646 | Ok(_) => panic!("Object3 should not return content"), 647 | } 648 | 649 | let exp4 = Some(2342); 650 | let obj4 = obj.copy(exp4, repo_pubkey, repo_secret).unwrap(); 651 | obj4.save(&mut store).unwrap(); 652 | 653 | assert_eq!(obj4.expiry(), exp4); 654 | assert_eq!(*obj.deps(), deps); 655 | 656 | match obj4.content() { 657 | Ok(cnt) => { 658 | assert_eq!(content, cnt); 659 | } 660 | Err(e) => panic!("Object3 parse error: {:?}", e), 661 | } 662 | } 663 | 664 | /// Checks that a content that fits the root node, will not be chunked into children nodes 665 | #[test] 666 | pub fn test_depth_1() { 667 | let deps: Vec = vec![Digest::Blake3Digest32([9; 32])]; 668 | 669 | let empty_file = ObjectContent::File(File::V0(FileV0 { 670 | content_type: vec![], 671 | metadata: vec![], 672 | content: vec![], 673 | })); 674 | let empty_file_ser = serde_bare::to_vec(&empty_file).unwrap(); 675 | println!("empty file size: {}", empty_file_ser.len()); 676 | 677 | let size = store_max_value_size() 678 | - EMPTY_BLOCK_SIZE 679 | - DATA_VARINT_EXTRA 680 | - BLOCK_ID_SIZE * deps.len() 681 | - empty_file_ser.len() 682 | - DATA_VARINT_EXTRA; 683 | println!("file size: {}", size); 684 | 685 | let content = ObjectContent::File(File::V0(FileV0 { 686 | content_type: vec![], 687 | metadata: vec![], 688 | content: vec![99; size], 689 | })); 690 | let content_ser = serde_bare::to_vec(&content).unwrap(); 691 | println!("content len: {}", content_ser.len()); 692 | 693 | let expiry = Some(2u32.pow(31)); 694 | let max_object_size = store_max_value_size(); 695 | 696 | let repo_secret = SymKey::ChaCha20Key([0; 32]); 697 | let repo_pubkey = PubKey::Ed25519PubKey([1; 32]); 698 | 699 | let object = Object::new( 700 | content, 701 | deps, 702 | expiry, 703 | max_object_size, 704 | repo_pubkey, 705 | repo_secret, 706 | ); 707 | 708 | println!("root_id: {:?}", object.id()); 709 | println!("root_key: {:?}", object.key().unwrap()); 710 | println!("nodes.len: {:?}", object.blocks().len()); 711 | //println!("root: {:?}", tree.root()); 712 | //println!("nodes: {:?}", object.blocks); 713 | assert_eq!(object.blocks.len(), 1); 714 | } 715 | 716 | #[test] 717 | pub fn test_block_size() { 718 | let max_block_size = store_max_value_size(); 719 | println!("max_object_size: {}", max_block_size); 720 | 721 | let id = Digest::Blake3Digest32([0u8; 32]); 722 | let key = SymKey::ChaCha20Key([0u8; 32]); 723 | 724 | let one_key = BlockContentV0::InternalNode(vec![key]); 725 | let one_key_ser = serde_bare::to_vec(&one_key).unwrap(); 726 | 727 | let two_keys = BlockContentV0::InternalNode(vec![key, key]); 728 | let two_keys_ser = serde_bare::to_vec(&two_keys).unwrap(); 729 | 730 | let max_keys = BlockContentV0::InternalNode(vec![key; MAX_ARITY_LEAVES]); 731 | let max_keys_ser = serde_bare::to_vec(&max_keys).unwrap(); 732 | 733 | let data = BlockContentV0::DataChunk(vec![]); 734 | let data_ser = serde_bare::to_vec(&data).unwrap(); 735 | 736 | let data_full = BlockContentV0::DataChunk(vec![0; MAX_DATA_PAYLOAD_SIZE]); 737 | let data_full_ser = serde_bare::to_vec(&data_full).unwrap(); 738 | 739 | let leaf_empty = Block::new( 740 | vec![], 741 | ObjectDeps::ObjectIdList(vec![]), 742 | Some(2342), 743 | data_ser.clone(), 744 | None, 745 | ); 746 | let leaf_empty_ser = serde_bare::to_vec(&leaf_empty).unwrap(); 747 | 748 | let leaf_full_data = Block::new( 749 | vec![], 750 | ObjectDeps::ObjectIdList(vec![]), 751 | Some(2342), 752 | data_full_ser.clone(), 753 | None, 754 | ); 755 | let leaf_full_data_ser = serde_bare::to_vec(&leaf_full_data).unwrap(); 756 | 757 | let root_depsref = Block::new( 758 | vec![], 759 | ObjectDeps::DepListRef(ObjectRef { id: id, key: key }), 760 | Some(2342), 761 | data_ser.clone(), 762 | None, 763 | ); 764 | 765 | let root_depsref_ser = serde_bare::to_vec(&root_depsref).unwrap(); 766 | 767 | let internal_max = Block::new( 768 | vec![id; MAX_ARITY_LEAVES], 769 | ObjectDeps::ObjectIdList(vec![]), 770 | Some(2342), 771 | max_keys_ser.clone(), 772 | None, 773 | ); 774 | let internal_max_ser = serde_bare::to_vec(&internal_max).unwrap(); 775 | 776 | let internal_one = Block::new( 777 | vec![id; 1], 778 | ObjectDeps::ObjectIdList(vec![]), 779 | Some(2342), 780 | one_key_ser.clone(), 781 | None, 782 | ); 783 | let internal_one_ser = serde_bare::to_vec(&internal_one).unwrap(); 784 | 785 | let internal_two = Block::new( 786 | vec![id; 2], 787 | ObjectDeps::ObjectIdList(vec![]), 788 | Some(2342), 789 | two_keys_ser.clone(), 790 | None, 791 | ); 792 | let internal_two_ser = serde_bare::to_vec(&internal_two).unwrap(); 793 | 794 | let root_one = Block::new( 795 | vec![id; 1], 796 | ObjectDeps::ObjectIdList(vec![id; 8]), 797 | Some(2342), 798 | one_key_ser.clone(), 799 | None, 800 | ); 801 | let root_one_ser = serde_bare::to_vec(&root_one).unwrap(); 802 | 803 | let root_two = Block::new( 804 | vec![id; 2], 805 | ObjectDeps::ObjectIdList(vec![id; 8]), 806 | Some(2342), 807 | two_keys_ser.clone(), 808 | None, 809 | ); 810 | let root_two_ser = serde_bare::to_vec(&root_two).unwrap(); 811 | 812 | println!( 813 | "range of valid value sizes {} {}", 814 | store_valid_value_size(0), 815 | store_max_value_size() 816 | ); 817 | 818 | println!( 819 | "max_data_payload_of_object: {}", 820 | max_block_size - EMPTY_BLOCK_SIZE - DATA_VARINT_EXTRA 821 | ); 822 | 823 | println!( 824 | "max_data_payload_depth_1: {}", 825 | max_block_size - EMPTY_BLOCK_SIZE - DATA_VARINT_EXTRA - MAX_DEPS_SIZE 826 | ); 827 | 828 | println!( 829 | "max_data_payload_depth_2: {}", 830 | MAX_ARITY_ROOT * MAX_DATA_PAYLOAD_SIZE 831 | ); 832 | 833 | println!( 834 | "max_data_payload_depth_3: {}", 835 | MAX_ARITY_ROOT * MAX_ARITY_LEAVES * MAX_DATA_PAYLOAD_SIZE 836 | ); 837 | 838 | let max_arity_leaves = (max_block_size - EMPTY_BLOCK_SIZE - BIG_VARINT_EXTRA * 2) 839 | / (BLOCK_ID_SIZE + BLOCK_KEY_SIZE); 840 | println!("max_arity_leaves: {}", max_arity_leaves); 841 | assert_eq!(max_arity_leaves, MAX_ARITY_LEAVES); 842 | assert_eq!( 843 | max_block_size - EMPTY_BLOCK_SIZE - DATA_VARINT_EXTRA, 844 | MAX_DATA_PAYLOAD_SIZE 845 | ); 846 | let max_arity_root = 847 | (max_block_size - EMPTY_BLOCK_SIZE - MAX_DEPS_SIZE - BIG_VARINT_EXTRA * 2) 848 | / (BLOCK_ID_SIZE + BLOCK_KEY_SIZE); 849 | println!("max_arity_root: {}", max_arity_root); 850 | assert_eq!(max_arity_root, MAX_ARITY_ROOT); 851 | println!("store_max_value_size: {}", leaf_full_data_ser.len()); 852 | assert_eq!(leaf_full_data_ser.len(), max_block_size); 853 | println!("leaf_empty: {}", leaf_empty_ser.len()); 854 | assert_eq!(leaf_empty_ser.len(), EMPTY_BLOCK_SIZE); 855 | println!("root_depsref: {}", root_depsref_ser.len()); 856 | assert_eq!(root_depsref_ser.len(), EMPTY_ROOT_SIZE_DEPSREF); 857 | println!("internal_max: {}", internal_max_ser.len()); 858 | assert_eq!( 859 | internal_max_ser.len(), 860 | EMPTY_BLOCK_SIZE 861 | + BIG_VARINT_EXTRA * 2 862 | + MAX_ARITY_LEAVES * (BLOCK_ID_SIZE + BLOCK_KEY_SIZE) 863 | ); 864 | assert!(internal_max_ser.len() < max_block_size); 865 | println!("internal_one: {}", internal_one_ser.len()); 866 | assert_eq!( 867 | internal_one_ser.len(), 868 | EMPTY_BLOCK_SIZE + 1 * BLOCK_ID_SIZE + 1 * BLOCK_KEY_SIZE 869 | ); 870 | println!("internal_two: {}", internal_two_ser.len()); 871 | assert_eq!( 872 | internal_two_ser.len(), 873 | EMPTY_BLOCK_SIZE + 2 * BLOCK_ID_SIZE + 2 * BLOCK_KEY_SIZE 874 | ); 875 | println!("root_one: {}", root_one_ser.len()); 876 | assert_eq!( 877 | root_one_ser.len(), 878 | EMPTY_BLOCK_SIZE + 8 * BLOCK_ID_SIZE + 1 * BLOCK_ID_SIZE + 1 * BLOCK_KEY_SIZE 879 | ); 880 | println!("root_two: {}", root_two_ser.len()); 881 | assert_eq!( 882 | root_two_ser.len(), 883 | EMPTY_BLOCK_SIZE + 8 * BLOCK_ID_SIZE + 2 * BLOCK_ID_SIZE + 2 * BLOCK_KEY_SIZE 884 | ); 885 | 886 | // let object_size_1 = 4096 * 1 - VALUE_HEADER_SIZE; 887 | // let object_size_512 = 4096 * MAX_PAGES_PER_VALUE - VALUE_HEADER_SIZE; 888 | // let arity_1: usize = 889 | // (object_size_1 - 8 * OBJECT_ID_SIZE) / (OBJECT_ID_SIZE + OBJECT_KEY_SIZE); 890 | // let arity_512: usize = 891 | // (object_size_512 - 8 * OBJECT_ID_SIZE) / (OBJECT_ID_SIZE + OBJECT_KEY_SIZE); 892 | 893 | // println!("1-page object_size: {}", object_size_1); 894 | // println!("512-page object_size: {}", object_size_512); 895 | // println!("max arity of 1-page object: {}", arity_1); 896 | // println!("max arity of 512-page object: {}", arity_512); 897 | } 898 | } 899 | -------------------------------------------------------------------------------- /lofire/src/repo.rs: -------------------------------------------------------------------------------- 1 | //! Repository 2 | 3 | use crate::types::*; 4 | 5 | impl RepositoryV0 { 6 | pub fn new( 7 | id: &PubKey, 8 | branches: &Vec, 9 | allow_ext_requests: bool, 10 | metadata: &Vec, 11 | ) -> RepositoryV0 { 12 | RepositoryV0 { 13 | id: id.clone(), 14 | branches: branches.clone(), 15 | allow_ext_requests, 16 | metadata: metadata.clone(), 17 | } 18 | } 19 | } 20 | 21 | impl Repository { 22 | pub fn new( 23 | id: &PubKey, 24 | branches: &Vec, 25 | allow_ext_requests: bool, 26 | metadata: &Vec, 27 | ) -> Repository { 28 | Repository::V0(RepositoryV0::new( 29 | id, 30 | branches, 31 | allow_ext_requests, 32 | metadata, 33 | )) 34 | } 35 | } 36 | -------------------------------------------------------------------------------- /lofire/src/store.rs: -------------------------------------------------------------------------------- 1 | //! Block store 2 | 3 | use crate::types::*; 4 | 5 | use std::{ 6 | cmp::min, 7 | collections::{hash_map::Iter, HashMap}, 8 | mem::size_of_val, 9 | }; 10 | use std::sync::{Arc, RwLock}; 11 | 12 | pub trait RepoStore { 13 | /// Load a block from the store. 14 | fn get(&self, id: &BlockId) -> Result; 15 | 16 | /// Save a block to the store. 17 | fn put(&self, block: &Block) -> Result; 18 | 19 | /// Delete a block from the store. 20 | fn del(&self, id: &BlockId) -> Result<(Block, usize), StorageError>; 21 | } 22 | 23 | #[derive(Debug, PartialEq)] 24 | pub enum StorageError { 25 | NotFound, 26 | InvalidValue, 27 | BackendError, 28 | SerializationError, 29 | } 30 | 31 | impl From for StorageError { 32 | fn from(e: serde_bare::error::Error) -> Self { 33 | StorageError::SerializationError 34 | } 35 | } 36 | 37 | const MIN_SIZE: usize = 4072; 38 | const PAGE_SIZE: usize = 4096; 39 | const HEADER: usize = PAGE_SIZE - MIN_SIZE; 40 | const MAX_FACTOR: usize = 512; 41 | 42 | /// Returns a valid/optimal value size for the entries of the storage backend. 43 | pub fn store_valid_value_size(size: usize) -> usize { 44 | min( 45 | ((size + HEADER) as f32 / PAGE_SIZE as f32).ceil() as usize, 46 | MAX_FACTOR, 47 | ) * PAGE_SIZE 48 | - HEADER 49 | } 50 | 51 | /// Returns the maximum value size for the entries of the storage backend. 52 | pub const fn store_max_value_size() -> usize { 53 | MAX_FACTOR * PAGE_SIZE - HEADER 54 | } 55 | 56 | /// Store with a HashMap backend 57 | pub struct HashMapRepoStore { 58 | blocks: RwLock>, 59 | } 60 | 61 | impl HashMapRepoStore { 62 | pub fn new() -> HashMapRepoStore { 63 | HashMapRepoStore { 64 | blocks: RwLock::new(HashMap::new()), 65 | } 66 | } 67 | 68 | pub fn get_len(&self) -> usize { 69 | self.blocks.read().unwrap().len() 70 | } 71 | 72 | pub fn get_all(&self) -> Vec { 73 | self.blocks.read().unwrap().values().map(|x| x.clone()).collect() 74 | } 75 | } 76 | 77 | impl RepoStore for HashMapRepoStore { 78 | fn get(&self, id: &BlockId) -> Result { 79 | match self.blocks.read().unwrap().get(id) { 80 | Some(block) => Ok(block.clone()), 81 | None => Err(StorageError::NotFound), 82 | } 83 | } 84 | 85 | fn put(&self, block: &Block) -> Result { 86 | let id = block.id(); 87 | let mut b = block.clone(); 88 | b.set_key(None); 89 | self.blocks.write().unwrap().insert(id, b); 90 | Ok(id) 91 | } 92 | 93 | fn del(&self, id: &BlockId) -> Result<(Block, usize), StorageError> { 94 | let block = self.blocks.write().unwrap().remove(id).ok_or(StorageError::NotFound)?; 95 | let size = size_of_val(&block); 96 | Ok((block, size)) 97 | } 98 | } 99 | -------------------------------------------------------------------------------- /lofire/src/types.rs: -------------------------------------------------------------------------------- 1 | //! LoFiRe types 2 | //! 3 | //! Corresponds to the BARE schema 4 | 5 | use core::fmt; 6 | use serde::{Deserialize, Serialize}; 7 | use std::collections::HashMap; 8 | use std::hash::Hash; 9 | 10 | // 11 | // COMMON TYPES 12 | // 13 | 14 | /// 32-byte Blake3 hash digest 15 | pub type Blake3Digest32 = [u8; 32]; 16 | 17 | /// Hash digest 18 | #[derive(Clone, Copy, Debug, Serialize, Deserialize, PartialEq, Eq, Hash)] 19 | pub enum Digest { 20 | Blake3Digest32(Blake3Digest32), 21 | } 22 | 23 | impl fmt::Display for Digest { 24 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 25 | match self { 26 | Digest::Blake3Digest32(d) => write!(f, "{}", hex::encode(d)), 27 | } 28 | } 29 | } 30 | 31 | /// ChaCha20 symmetric key 32 | pub type ChaCha20Key = [u8; 32]; 33 | 34 | /// Symmetric cryptographic key 35 | #[derive(Clone, Copy, Debug, Serialize, Deserialize, PartialEq, Eq, Hash)] 36 | pub enum SymKey { 37 | ChaCha20Key(ChaCha20Key), 38 | } 39 | 40 | impl SymKey { 41 | pub fn slice(&self) -> &[u8; 32] { 42 | match self { 43 | SymKey::ChaCha20Key(o) => o, 44 | } 45 | } 46 | } 47 | 48 | /// Curve25519 public key 49 | pub type Ed25519PubKey = [u8; 32]; 50 | 51 | /// Curve25519 private key 52 | pub type Ed25519PrivKey = [u8; 32]; 53 | 54 | /// Public key 55 | #[derive(Clone, Copy, Debug, Serialize, Deserialize, PartialEq, Eq, Hash)] 56 | pub enum PubKey { 57 | Ed25519PubKey(Ed25519PubKey), 58 | } 59 | 60 | impl PubKey { 61 | pub fn slice(&self) -> &[u8; 32] { 62 | match self { 63 | PubKey::Ed25519PubKey(o) => o, 64 | } 65 | } 66 | } 67 | 68 | impl fmt::Display for PubKey { 69 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 70 | match self { 71 | PubKey::Ed25519PubKey(d) => write!(f, "{}", hex::encode(d)), 72 | } 73 | } 74 | } 75 | 76 | /// Private key 77 | #[derive(Clone, Copy, Debug, Serialize, Deserialize, PartialEq, Eq)] 78 | pub enum PrivKey { 79 | Ed25519PrivKey(Ed25519PrivKey), 80 | } 81 | 82 | /// Ed25519 signature 83 | pub type Ed25519Sig = [[u8; 32]; 2]; 84 | 85 | /// Cryptographic signature 86 | #[derive(Clone, Copy, Debug, Serialize, Deserialize, PartialEq, Eq)] 87 | pub enum Sig { 88 | Ed25519Sig(Ed25519Sig), 89 | } 90 | 91 | /// Timestamp: absolute time in minutes since 2022-02-22 22:22 UTC 92 | pub type Timestamp = u32; 93 | 94 | pub const EPOCH_AS_UNIX_TIMESTAMP: u64 = 1645568520; 95 | 96 | /// Relative time (e.g. delay from current time) 97 | #[derive(Clone, Copy, Debug, Serialize, Deserialize, PartialEq, Eq)] 98 | pub enum RelTime { 99 | Seconds(u8), 100 | Minutes(u8), 101 | Hours(u8), 102 | Days(u8), 103 | } 104 | 105 | /// Bloom filter (variable size) 106 | #[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)] 107 | pub struct BloomFilter { 108 | /// Number of hash functions 109 | pub k: u32, 110 | 111 | /// Filter 112 | #[serde(with = "serde_bytes")] 113 | pub f: Vec, 114 | } 115 | 116 | /// Bloom filter (128 B) 117 | /// 118 | /// (m=1024; k=7; p=0.01; n=107) 119 | pub type BloomFilter128 = [[u8; 32]; 4]; 120 | 121 | /// Bloom filter (1 KiB) 122 | /// 123 | /// (m=8192; k=7; p=0.01; n=855) 124 | pub type BloomFilter1K = [[u8; 32]; 32]; 125 | 126 | // 127 | // REPOSITORY TYPES 128 | // 129 | 130 | /// Block ID: 131 | /// BLAKE3 hash over the serialized Object with encrypted content 132 | pub type BlockId = Digest; 133 | 134 | /// Block reference 135 | #[derive(Clone, Copy, Debug, Serialize, Deserialize, PartialEq, Eq, Hash)] 136 | pub struct BlockRef { 137 | /// Object ID 138 | pub id: BlockId, 139 | 140 | /// Key for decrypting the Object 141 | pub key: SymKey, 142 | } 143 | 144 | /// Object ID 145 | pub type ObjectId = BlockId; 146 | 147 | /// Object reference 148 | pub type ObjectRef = BlockRef; 149 | 150 | /// Internal node of a Merkle tree 151 | pub type InternalNode = Vec; 152 | 153 | /// Content of BlockV0: a Merkle tree node 154 | #[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)] 155 | pub enum BlockContentV0 { 156 | /// Internal node with references to children 157 | InternalNode(InternalNode), 158 | 159 | #[serde(with = "serde_bytes")] 160 | DataChunk(Vec), 161 | } 162 | 163 | /// List of ObjectId dependencies as encrypted Object content 164 | #[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)] 165 | pub enum DepList { 166 | V0(Vec), 167 | } 168 | 169 | /// Dependencies of an Object 170 | #[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)] 171 | pub enum ObjectDeps { 172 | /// List of Object IDs (max. 8), 173 | ObjectIdList(Vec), 174 | 175 | /// Reference to an Object that contains a DepList 176 | DepListRef(ObjectRef), 177 | } 178 | 179 | /// Immutable block with encrypted content 180 | /// 181 | /// `ObjectContent` is chunked and stored as `Block`s in a Merkle tree. 182 | /// A Block is a Merkle tree node. 183 | #[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)] 184 | pub struct BlockV0 { 185 | /// Block ID 186 | #[serde(skip)] 187 | pub id: Option, 188 | 189 | /// Block Key 190 | #[serde(skip)] 191 | pub key: Option, 192 | 193 | /// Block IDs for child nodes in the Merkle tree 194 | pub children: Vec, 195 | 196 | /// Other objects this object depends on (e.g. Commit deps & acks) 197 | /// Only set for the root block 198 | pub deps: ObjectDeps, 199 | 200 | /// Expiry time of this object and all of its children 201 | /// when the object should be deleted by all replicas 202 | /// Only set for the root block 203 | pub expiry: Option, 204 | 205 | /// Encrypted ObjectContentV0 206 | /// 207 | /// Encrypted using convergent encryption with ChaCha20: 208 | /// - convergence_key: BLAKE3 derive_key ("LoFiRe Data BLAKE3 key", 209 | /// repo_pubkey + repo_secret) 210 | /// - key: BLAKE3 keyed hash (convergence_key, plain_object_content) 211 | /// - nonce: 0 212 | #[serde(with = "serde_bytes")] 213 | pub content: Vec, 214 | } 215 | 216 | /// Immutable object with encrypted content 217 | #[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)] 218 | pub enum Block { 219 | V0(BlockV0), 220 | } 221 | 222 | /// Repository definition 223 | /// 224 | /// Published in root branch, where: 225 | /// - branch_pubkey: repo_pubkey 226 | /// - branch_secret: BLAKE3 derive_key ("LoFiRe Root Branch secret", 227 | /// repo_pubkey + repo_secret) 228 | #[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)] 229 | pub struct RepositoryV0 { 230 | /// Repo public key ID 231 | pub id: PubKey, 232 | 233 | /// List of branches 234 | pub branches: Vec, 235 | 236 | /// Whether or not to allow external requests 237 | pub allow_ext_requests: bool, 238 | 239 | /// App-specific metadata 240 | #[serde(with = "serde_bytes")] 241 | pub metadata: Vec, 242 | } 243 | 244 | /// Repository definition 245 | #[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)] 246 | pub enum Repository { 247 | V0(RepositoryV0), 248 | } 249 | 250 | /// Add a branch to the repository 251 | #[derive(Clone, Copy, Debug, Serialize, Deserialize, PartialEq, Eq)] 252 | pub enum AddBranch { 253 | V0(ObjectRef), 254 | } 255 | 256 | /// Remove a branch from the repository 257 | #[derive(Clone, Copy, Debug, Serialize, Deserialize, PartialEq, Eq)] 258 | pub enum RemoveBranch { 259 | V0(ObjectRef), 260 | } 261 | 262 | /// Commit object types 263 | #[derive(Clone, Copy, Debug, Serialize, Deserialize, PartialEq, Eq, Hash)] 264 | pub enum CommitType { 265 | Repository, 266 | AddBranch, 267 | RemoveBranch, 268 | Branch, 269 | AddMembers, 270 | EndOfBranch, 271 | Transaction, 272 | Snapshot, 273 | Ack, 274 | } 275 | 276 | /// Member of a Branch 277 | #[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)] 278 | pub struct MemberV0 { 279 | /// Member public key ID 280 | pub id: PubKey, 281 | 282 | /// Commit types the member is allowed to publish in the branch 283 | pub commit_types: Vec, 284 | 285 | /// App-specific metadata 286 | /// (role, permissions, cryptographic material, etc) 287 | #[serde(with = "serde_bytes")] 288 | pub metadata: Vec, 289 | } 290 | 291 | /// Member of a branch 292 | #[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)] 293 | pub enum Member { 294 | V0(MemberV0), 295 | } 296 | 297 | /// Branch definition 298 | /// 299 | /// First commit in a branch, signed by branch key 300 | /// In case of a fork, the commit deps indicat 301 | /// the previous branch heads. 302 | #[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)] 303 | pub struct BranchV0 { 304 | /// Branch public key ID 305 | pub id: PubKey, 306 | 307 | /// Pub/sub topic for publishing events 308 | pub topic: PubKey, 309 | 310 | /// Branch secret key 311 | pub secret: SymKey, 312 | 313 | /// Members with permissions 314 | pub members: Vec, 315 | 316 | /// Number of acks required for a commit to be valid 317 | pub quorum: HashMap, 318 | 319 | /// Delay to send explicit acks, 320 | /// if not enough implicit acks arrived by then 321 | pub ack_delay: RelTime, 322 | 323 | /// Tags for organizing branches within the repository 324 | #[serde(with = "serde_bytes")] 325 | pub tags: Vec, 326 | 327 | /// App-specific metadata (validation rules, etc) 328 | #[serde(with = "serde_bytes")] 329 | pub metadata: Vec, 330 | } 331 | 332 | /// Branch definition 333 | #[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)] 334 | pub enum Branch { 335 | V0(BranchV0), 336 | } 337 | 338 | /// Add members to an existing branch 339 | #[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)] 340 | pub struct AddMembersV0 { 341 | /// Members to add, with permissions 342 | pub members: Vec, 343 | 344 | /// New quorum 345 | pub quorum: Option>, 346 | 347 | /// New ackDelay 348 | pub ack_delay: Option, 349 | } 350 | 351 | /// Add members to an existing branch 352 | /// 353 | /// If a member already exists, it overwrites the previous definition, 354 | /// in that case this can only be used for adding new permissions, 355 | /// not to remove existing ones. 356 | /// The quorum and ackDelay can be changed as well 357 | #[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)] 358 | pub enum AddMembers { 359 | V0(AddMembersV0), 360 | } 361 | 362 | /// ObjectRef for EndOfBranch 363 | #[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)] 364 | pub enum PlainOrEncryptedObjectRef { 365 | Plain(ObjectRef), 366 | Encrypted(Vec), 367 | } 368 | 369 | /// End of branch 370 | /// 371 | /// No more commits accepted afterwards, only acks of this commit 372 | /// May reference a fork where the branch continues 373 | /// with possibly different members, permissions, validation rules. 374 | #[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)] 375 | pub struct EndOfBranchV0 { 376 | /// (Encrypted) reference to forked branch (optional) 377 | pub fork: Option, 378 | 379 | /// Expiry time when all commits in the branch should be deleted 380 | pub expiry: Timestamp, 381 | } 382 | 383 | /// End of branch 384 | #[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)] 385 | pub enum EndOfBranch { 386 | V0(EndOfBranchV0), 387 | } 388 | 389 | /// Transaction with CRDT operations 390 | #[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)] 391 | pub enum Transaction { 392 | #[serde(with = "serde_bytes")] 393 | V0(Vec), 394 | } 395 | 396 | /// Snapshot of a Branch 397 | /// 398 | /// Contains a data structure 399 | /// computed from the commits at the specified head. 400 | #[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)] 401 | pub struct SnapshotV0 { 402 | /// Branch heads the snapshot was made from 403 | pub heads: Vec, 404 | 405 | /// Snapshot data structure 406 | #[serde(with = "serde_bytes")] 407 | pub content: Vec, 408 | } 409 | 410 | /// Snapshot of a Branch 411 | #[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)] 412 | pub enum Snapshot { 413 | V0(SnapshotV0), 414 | } 415 | 416 | /// Acknowledgement of another Commit 417 | #[derive(Clone, Copy, Debug, Serialize, Deserialize, PartialEq, Eq)] 418 | pub enum Ack { 419 | V0(), 420 | } 421 | 422 | /// Commit body, corresponds to CommitType 423 | #[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)] 424 | pub enum CommitBody { 425 | Repository(Repository), 426 | AddBranch(AddBranch), 427 | RemoveBranch(RemoveBranch), 428 | Branch(Branch), 429 | AddMembers(AddMembers), 430 | EndOfBranch(EndOfBranch), 431 | Transaction(Transaction), 432 | Snapshot(Snapshot), 433 | Ack(Ack), 434 | } 435 | 436 | /// Content of a Commit 437 | #[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)] 438 | pub struct CommitContentV0 { 439 | /// Commit author 440 | pub author: PubKey, 441 | 442 | /// Author's commit sequence number in this branch 443 | pub seq: u32, 444 | 445 | /// Branch the commit belongs to 446 | pub branch: ObjectRef, 447 | 448 | /// Direct dependencies of this commit 449 | pub deps: Vec, 450 | 451 | /// Not directly dependent heads to acknowledge 452 | pub acks: Vec, 453 | 454 | /// Files the commit references 455 | pub refs: Vec, 456 | 457 | /// App-specific metadata (commit message, creation time, etc) 458 | #[serde(with = "serde_bytes")] 459 | pub metadata: Vec, 460 | 461 | /// Object with a CommitBody inside 462 | pub body: ObjectRef, 463 | 464 | /// Expiry time of the body object 465 | pub expiry: Option, 466 | } 467 | 468 | /// Commit object 469 | /// Signed by branch key, or a member key authorized to publish this commit type 470 | #[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)] 471 | pub struct CommitV0 { 472 | /// ID of parent Object 473 | #[serde(skip)] 474 | pub id: Option, 475 | 476 | /// Key of parent Object 477 | #[serde(skip)] 478 | pub key: Option, 479 | 480 | /// Commit content 481 | pub content: CommitContentV0, 482 | 483 | /// Signature over the content by the author 484 | pub sig: Sig, 485 | } 486 | 487 | /// Commit Object 488 | #[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)] 489 | pub enum Commit { 490 | V0(CommitV0), 491 | } 492 | 493 | /// File Object 494 | #[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)] 495 | pub struct FileV0 { 496 | #[serde(with = "serde_bytes")] 497 | pub content_type: Vec, 498 | 499 | #[serde(with = "serde_bytes")] 500 | pub metadata: Vec, 501 | 502 | #[serde(with = "serde_bytes")] 503 | pub content: Vec, 504 | } 505 | 506 | /// A file stored in an Object 507 | #[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)] 508 | pub enum File { 509 | V0(FileV0), 510 | } 511 | 512 | /// Immutable data stored encrypted in a Merkle tree 513 | #[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)] 514 | pub enum ObjectContent { 515 | Commit(Commit), 516 | CommitBody(CommitBody), 517 | File(File), 518 | DepList(DepList), 519 | } 520 | -------------------------------------------------------------------------------- /lofire/src/utils.rs: -------------------------------------------------------------------------------- 1 | use crate::errors::*; 2 | use crate::types::*; 3 | 4 | use ed25519_dalek::*; 5 | use rand::rngs::OsRng; 6 | use std::time::{SystemTime, UNIX_EPOCH}; 7 | 8 | pub fn sign( 9 | author_privkey: PrivKey, 10 | author_pubkey: PubKey, 11 | content: &Vec, 12 | ) -> Result { 13 | let kp = match (author_privkey, author_pubkey) { 14 | (PrivKey::Ed25519PrivKey(sk), PubKey::Ed25519PubKey(pk)) => [sk, pk].concat(), 15 | }; 16 | let keypair = Keypair::from_bytes(kp.as_slice())?; 17 | let sig_bytes = keypair.sign(content.as_slice()).to_bytes(); 18 | let mut it = sig_bytes.chunks_exact(32); 19 | let mut ss: Ed25519Sig = [[0; 32], [0; 32]]; 20 | ss[0].copy_from_slice(it.next().unwrap()); 21 | ss[1].copy_from_slice(it.next().unwrap()); 22 | Ok(Sig::Ed25519Sig(ss)) 23 | } 24 | 25 | pub fn verify(content: &Vec, sig: Sig, pub_key: PubKey) -> Result<(), LofireError> { 26 | let pubkey = match pub_key { 27 | PubKey::Ed25519PubKey(pk) => pk, 28 | }; 29 | let pk = PublicKey::from_bytes(&pubkey)?; 30 | let sig_bytes = match sig { 31 | Sig::Ed25519Sig(ss) => [ss[0], ss[1]].concat(), 32 | }; 33 | let sig = Signature::from_bytes(&sig_bytes)?; 34 | Ok(pk.verify_strict(content, &sig)?) 35 | } 36 | 37 | pub fn generate_keypair() -> (PrivKey, PubKey) { 38 | let mut csprng = OsRng {}; 39 | let keypair: Keypair = Keypair::generate(&mut csprng); 40 | // println!( 41 | // "private key: ({}) {:?}", 42 | // keypair.secret.as_bytes().len(), 43 | // keypair.secret.as_bytes() 44 | // ); 45 | // println!( 46 | // "public key: ({}) {:?}", 47 | // keypair.public.as_bytes().len(), 48 | // keypair.public.as_bytes() 49 | // ); 50 | let ed_priv_key = keypair.secret.to_bytes(); 51 | let ed_pub_key = keypair.public.to_bytes(); 52 | let priv_key = PrivKey::Ed25519PrivKey(ed_priv_key); 53 | let pub_key = PubKey::Ed25519PubKey(ed_pub_key); 54 | (priv_key, pub_key) 55 | } 56 | 57 | /// returns the Lofire Timestamp of now. 58 | pub fn now_timestamp() -> Timestamp { 59 | ((SystemTime::now() 60 | .duration_since(UNIX_EPOCH) 61 | .unwrap() 62 | .as_secs() 63 | - EPOCH_AS_UNIX_TIMESTAMP) 64 | / 60) 65 | .try_into() 66 | .unwrap() 67 | } 68 | --------------------------------------------------------------------------------