├── .github └── workflows │ └── rust.yml ├── .gitignore ├── Cargo.toml ├── LICENCE ├── README.md ├── examples ├── simple_blocking_chat.rs └── simple_chat.rs ├── src ├── configuration.rs ├── database │ ├── authorisation_service.rs │ ├── authorisation_service_test.rs │ ├── daily_log.rs │ ├── deletion.rs │ ├── edge.rs │ ├── graph_database.rs │ ├── mod.rs │ ├── mutation_query.rs │ ├── node.rs │ ├── query.rs │ ├── query_language │ │ ├── data_model.pest │ │ ├── data_model_parser.rs │ │ ├── data_model_parser_test.rs │ │ ├── deletion.pest │ │ ├── deletion_parser.rs │ │ ├── mod.rs │ │ ├── mutation.pest │ │ ├── mutation_parser.rs │ │ ├── parameter.rs │ │ ├── query.pest │ │ ├── query_parser.rs │ │ └── query_parser_test.rs │ ├── query_test.rs │ ├── room.rs │ ├── room_node.rs │ ├── sqlite_database.rs │ └── system_entities.rs ├── date_utils.rs ├── discret.rs ├── event_service.rs ├── lib.rs ├── network │ ├── beacon.rs │ ├── endpoint.rs │ ├── mod.rs │ ├── multicast.rs │ ├── peer_manager.rs │ └── shared_buffers.rs ├── peer_connection_service.rs ├── security.rs ├── signature_verification_service.rs └── synchronisation │ ├── mod.rs │ ├── peer_inbound_service.rs │ ├── peer_outbound_service.rs │ └── room_locking_service.rs └── tests ├── mutations.rs └── synchronisation.rs /.github/workflows/rust.yml: -------------------------------------------------------------------------------- 1 | name: Rust 2 | 3 | on: 4 | push: 5 | branches: [ "master" ] 6 | pull_request: 7 | branches: [ "master" ] 8 | 9 | env: 10 | CARGO_TERM_COLOR: always 11 | 12 | jobs: 13 | build: 14 | 15 | runs-on: ubuntu-latest 16 | 17 | steps: 18 | - uses: actions/checkout@v4 19 | - name: Build 20 | run: cargo build --verbose 21 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | /Cargo.lock 3 | /test_data 4 | /justfile 5 | /todo.txt 6 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | 2 | [package] 3 | name = "discret" 4 | homepage = "https://discretlib.github.io/doc/" 5 | version = "0.6.2" 6 | license-file = "LICENCE" 7 | description = "A backend to create peer to peer (P2P) applications, using a GraphQL inspired syntax " 8 | authors = ["Adrien Salais "] 9 | keywords = ["p2p", "GraphQl", "QUIC"] 10 | edition = "2021" 11 | readme = "README.md" 12 | 13 | [dependencies] 14 | #database 15 | rusqlite = { version = "0.32.1", features = [ 16 | "bundled-sqlcipher-vendored-openssl", 17 | "backup", 18 | "hooks", 19 | "window", 20 | ] } 21 | 22 | ## Crypto 23 | rustls = { version = "0.23.10", default-features = false, features = [ 24 | "ring", 25 | "std", 26 | ] } 27 | rcgen = "0.13.1" 28 | rust-argon2 = "2.1.0" 29 | blake3 = "1.5.4" 30 | ed25519-dalek = { version = "2.1.1", features = ["batch"] } 31 | x25519-dalek = { version = "2.0.1", features = ["static_secrets", "serde"] } 32 | rand = "0.8.5" 33 | 34 | ## Network 35 | quinn = { version = "0.11.4", default-features = false, features = [ 36 | "rustls", 37 | "ring", 38 | "runtime-tokio", 39 | ] } 40 | socket2 = "0.5.7" 41 | 42 | ## Serialisation 43 | serde = { version = "1.0.209", features = ["derive"] } 44 | bincode = "1.3.3" 45 | serde_json = "1.0.127" 46 | hex = "0.4.3" 47 | base64 = "0.22.1" 48 | pest = "2.7.10" 49 | pest_derive = "2.7.10" 50 | 51 | ## Other 52 | thiserror = "1.0.61" 53 | tokio = { version = "1.40.0", features = ["full"] } 54 | futures = "0.3.30" 55 | flume = "0.11.0" 56 | lazy_static = "1.5.0" 57 | chrono = "0.4.38" 58 | lru = "0.12.3" 59 | sysinfo = "0.30.12" 60 | log = { version = "0.4.22", optional = true } 61 | 62 | [dev-dependencies] 63 | 64 | discret = { path = ".", features = ["log"] } 65 | 66 | [features] 67 | default = [] 68 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Discret: Create local first, peer to peer application (P2P) using a GraphQL inspired API 2 | 3 | *Discret* hides the complexity of peer to peer networks and reduces it to a data access problem. 4 | 5 | It provides the following features: 6 | - A database layer based on SQLite that is managed using a GraphQL inspired API, 7 | - An authentication and authorization layer to define who can access data, 8 | - A Peer to Peer layer that allows you to invite Peers. 9 | 10 | *Discret* will automatically synchronize your data with other peers, based on the access rights you have defined. 11 | 12 | While not intended to solve Internet scale problem, it should be useful to quickly create application that don't requires a lot of users. 13 | 14 | More details and tutorials are available in the [documentation site](https://discretlib.github.io/doc/) 15 | # Usage 16 | *Discret* is published as a cargo crate. 17 | 18 | ``` 19 | cargo add discret 20 | ``` 21 | 22 | # Example 23 | The following example creates a very basic chat application. If you build and run this program on several different folder or local network devices 24 | you should be able to chat with yourself. 25 | 26 | ```rust 27 | use std::{io, path::PathBuf}; 28 | use discret::{ 29 | derive_pass_phrase, zero_uid, Configuration, Discret, 30 | Parameters, ParametersAdd, ResultParser, 31 | }; 32 | use serde::Deserialize; 33 | //the application unique identifier 34 | const APPLICATION_KEY: &str = "github.com/discretlib/rust_example_simple_chat"; 35 | #[tokio::main] 36 | async fn main() { 37 | //define a datamodel 38 | let model = "chat { 39 | Message{ 40 | content:String 41 | } 42 | }"; 43 | //this struct is used to parse the query result 44 | #[derive(Deserialize)] 45 | struct Chat { 46 | pub id: String, 47 | pub mdate: i64, 48 | pub content: String, 49 | } 50 | let path: PathBuf = "test_data".into(); //where data is stored 51 | //used to derives all necessary secrets 52 | let key_material: [u8; 32] = derive_pass_phrase("my login", "my password"); 53 | //start the discret application 54 | let app: Discret = Discret::new( 55 | model, 56 | APPLICATION_KEY, 57 | &key_material, 58 | path, 59 | Configuration::default(), 60 | ) 61 | .await 62 | .unwrap(); 63 | //listen for events 64 | let mut events = app.subscribe_for_events().await; 65 | let event_app: Discret = app.clone(); 66 | tokio::spawn(async move { 67 | let mut last_date = 0; 68 | let mut last_id = zero_uid(); 69 | let private_room: String = event_app.private_room(); 70 | while let Ok(event) = events.recv().await { 71 | match event { 72 | //triggered when data is modified 73 | discret::Event::DataChanged(_) => { 74 | let mut param = Parameters::new(); 75 | param.add("mdate", last_date).unwrap(); 76 | param.add("id", last_id.clone()).unwrap(); 77 | param.add("room_id", private_room.clone()).unwrap(); 78 | //get the latest data, the result is in the JSON format 79 | let result: String = event_app 80 | .query( 81 | "query { 82 | res: chat.Message( 83 | order_by(mdate asc, id asc), 84 | after($mdate, $id), 85 | room_id = $room_id 86 | ) { 87 | id 88 | mdate 89 | content 90 | } 91 | }", 92 | Some(param), 93 | ) 94 | .await 95 | .unwrap(); 96 | let mut query_result = ResultParser::new(&result).unwrap(); 97 | let res: Vec = query_result.take_array("res").unwrap(); 98 | for msg in res { 99 | last_date = msg.mdate; 100 | last_id = msg.id; 101 | println!("you said: {}", msg.content); 102 | } 103 | } 104 | _ => {} //ignores other events 105 | } 106 | } 107 | }); 108 | //data is inserted in your private room 109 | let private_room: String = app.private_room(); 110 | let stdin = io::stdin(); 111 | let mut line = String::new(); 112 | println!("{}", "Write Something!"); 113 | loop { 114 | stdin.read_line(&mut line).unwrap(); 115 | if line.starts_with("/q") { 116 | break; 117 | } 118 | line.pop(); 119 | let mut params = Parameters::new(); 120 | params.add("message", line.clone()).unwrap(); 121 | params.add("room_id", private_room.clone()).unwrap(); 122 | app.mutate( 123 | "mutate { 124 | chat.Message { 125 | room_id:$room_id 126 | content: $message 127 | } 128 | }", 129 | Some(params), 130 | ) 131 | .await 132 | .unwrap(); 133 | line.clear(); 134 | } 135 | } 136 | ``` 137 | # Features 138 | *Discret* provides a blocking (DiscretBlocking) and a non blocking (Discret) API. 139 | 140 | On local network, peer connection happens without requiring any server. 141 | For peer to peer connection over the Internet, a discovery server is needed to allow peers to discover each others. 142 | The discret lib provides an implementation of the discovery server named Beacon. 143 | 144 | The library provides strong security features out of the box: 145 | - data is encrypted at rest by using the SQLCipher database 146 | - encrypted communication using the QUIC protocol 147 | - data integrity: each rows is signed with the peer signing key, making it very hard to synchronize bad data 148 | - access control via Rooms 149 | 150 | # Limitations 151 | As data lives on your devices, Discret should only be used for applications with data generated by "real person", with hundreds of peers at most. 152 | It is not suited for large scale applications and communities with thousands of peoples. 153 | 154 | It currently only support text data but supports for file synchronization is planned. 155 | 156 | Connection over the internet is not 100% guaranteed to work, because certain types of enterprise firewalls will block the connection attempts. 157 | 158 | Please, be warned that P2P connections leaks your IP address and should only be used with trusted peer. 159 | This leak exposes you to the following threats: 160 | - Distributed denial of service (DDOS) 161 | - Leak of your "Real World" location via geolocation services. 162 | - State sponsored surveillance: A state watching the network could determine which peer connect to which, giving a lot of knowledge about your social network. 163 | 164 | # Platform Support 165 | - Linux: Tested 166 | - Windows: Tested 167 | - macOS: not tested, should work 168 | - Android: works on arch64 architecture. Architectures i686 and x86_64 have some low level linker issues when working with Flutter. 169 | - iOS: not tested 170 | -------------------------------------------------------------------------------- /examples/simple_blocking_chat.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | io, 3 | path::PathBuf, 4 | thread::{self}, 5 | }; 6 | 7 | use discret::{ 8 | derive_pass_phrase, zero_uid, Configuration, DiscretBlocking, Parameters, ParametersAdd, 9 | ResultParser, 10 | }; 11 | use serde::Deserialize; 12 | 13 | //application unique identifier 14 | const APPLICATION_KEY: &str = "github.com/discretlib/rust_example_simple_chat"; 15 | 16 | /// 17 | /// a basic chat that uses the Blocking API 18 | /// 19 | fn main() { 20 | //define a datamodel 21 | let model = "chat { 22 | Message{ 23 | content:String 24 | } 25 | }"; 26 | //this struct is used to parse the query result 27 | #[derive(Deserialize)] 28 | struct Chat { 29 | pub id: String, 30 | pub mdate: i64, 31 | pub content: String, 32 | } 33 | 34 | let path: PathBuf = "test_data".into(); //where data is stored 35 | 36 | //used to derives all necessary secrets 37 | let key_material: [u8; 32] = derive_pass_phrase("my login", "my password"); 38 | 39 | //start the discret application 40 | let app = DiscretBlocking::new( 41 | model, 42 | APPLICATION_KEY, 43 | &key_material, 44 | path, 45 | Configuration::default(), 46 | ) 47 | .unwrap(); 48 | 49 | //listen for events 50 | let mut events = app.subscribe_for_events(); 51 | let event_app = app.clone(); 52 | thread::spawn(move || { 53 | let mut last_date = 0; 54 | let mut last_id = zero_uid(); 55 | 56 | let private_room: String = event_app.private_room(); 57 | while let Ok(event) = events.blocking_recv() { 58 | match event { 59 | //triggered when data is modified 60 | discret::Event::DataChanged(_) => { 61 | let mut param = Parameters::new(); 62 | param.add("mdate", last_date).unwrap(); 63 | param.add("id", last_id.clone()).unwrap(); 64 | param.add("room_id", private_room.clone()).unwrap(); 65 | 66 | //get the latest data, the result is in the JSON format 67 | let result: String = event_app 68 | .query( 69 | "query { 70 | res: chat.Message( 71 | order_by(mdate asc, id asc), 72 | after($mdate, $id), 73 | room_id = $room_id 74 | ) { 75 | id 76 | mdate 77 | content 78 | } 79 | }", 80 | Some(param), 81 | ) 82 | .unwrap(); 83 | let mut query_result = ResultParser::new(&result).unwrap(); 84 | let res: Vec = query_result.take_array("res").unwrap(); 85 | for msg in res { 86 | last_date = msg.mdate; 87 | last_id = msg.id; 88 | println!("you said: {}", msg.content); 89 | } 90 | } 91 | _ => {} //ignores other events 92 | } 93 | } 94 | }); 95 | 96 | //data is inserted in your private room 97 | let private_room: String = app.private_room(); 98 | let stdin = io::stdin(); 99 | let mut line = String::new(); 100 | println!("{}", "Write Something!"); 101 | loop { 102 | stdin.read_line(&mut line).unwrap(); 103 | if line.starts_with("/q") { 104 | break; 105 | } 106 | line.pop(); 107 | let mut params = Parameters::new(); 108 | params.add("message", line.clone()).unwrap(); 109 | params.add("room_id", private_room.clone()).unwrap(); 110 | app.mutate( 111 | "mutate { 112 | chat.Message { 113 | room_id:$room_id 114 | content: $message 115 | } 116 | }", 117 | Some(params), 118 | ) 119 | .unwrap(); 120 | line.clear(); 121 | } 122 | } 123 | -------------------------------------------------------------------------------- /examples/simple_chat.rs: -------------------------------------------------------------------------------- 1 | use discret::{ 2 | derive_pass_phrase, zero_uid, Configuration, Discret, Parameters, ParametersAdd, ResultParser, 3 | }; 4 | use serde::Deserialize; 5 | use std::{io, path::PathBuf}; 6 | 7 | //the application unique identifier 8 | const APPLICATION_KEY: &str = "github.com/discretlib/rust_example_simple_chat"; 9 | 10 | #[tokio::main] 11 | async fn main() { 12 | //define a datamodel 13 | let model = "chat { 14 | Message{ 15 | content:String 16 | } 17 | }"; 18 | //this struct is used to parse the query result 19 | #[derive(Deserialize)] 20 | struct Chat { 21 | pub id: String, 22 | pub mdate: i64, 23 | pub content: String, 24 | } 25 | 26 | let path: PathBuf = "test_data".into(); //where data is stored 27 | 28 | //used to derives all necessary secrets 29 | let key_material: [u8; 32] = derive_pass_phrase("my login", "my password"); 30 | 31 | //start the discret application 32 | let app: Discret = Discret::new( 33 | model, 34 | APPLICATION_KEY, 35 | &key_material, 36 | path, 37 | Configuration::default(), 38 | ) 39 | .await 40 | .unwrap(); 41 | 42 | //listen for events 43 | let mut events = app.subscribe_for_events().await; 44 | let event_app: Discret = app.clone(); 45 | tokio::spawn(async move { 46 | let mut last_date = 0; 47 | let mut last_id = zero_uid(); 48 | 49 | let private_room: String = event_app.private_room(); 50 | while let Ok(event) = events.recv().await { 51 | match event { 52 | //triggered when data is modified 53 | discret::Event::DataChanged(_) => { 54 | let mut param = Parameters::new(); 55 | param.add("mdate", last_date).unwrap(); 56 | param.add("id", last_id.clone()).unwrap(); 57 | param.add("room_id", private_room.clone()).unwrap(); 58 | 59 | //get the latest data, the result is in the JSON format 60 | let result: String = event_app 61 | .query( 62 | "query { 63 | res: chat.Message( 64 | order_by(mdate asc, id asc), 65 | after($mdate, $id), 66 | room_id = $room_id 67 | ) { 68 | id 69 | mdate 70 | content 71 | } 72 | }", 73 | Some(param), 74 | ) 75 | .await 76 | .unwrap(); 77 | let mut query_result = ResultParser::new(&result).unwrap(); 78 | let res: Vec = query_result.take_array("res").unwrap(); 79 | for msg in res { 80 | last_date = msg.mdate; 81 | last_id = msg.id; 82 | println!("you said: {}", msg.content); 83 | } 84 | } 85 | _ => {} //ignores other events 86 | } 87 | } 88 | }); 89 | 90 | //data is inserted in your private room 91 | let private_room: String = app.private_room(); 92 | let stdin = io::stdin(); 93 | let mut line = String::new(); 94 | println!("{}", "Write Something!"); 95 | loop { 96 | stdin.read_line(&mut line).unwrap(); 97 | if line.starts_with("/q") { 98 | break; 99 | } 100 | line.pop(); 101 | let mut params = Parameters::new(); 102 | params.add("message", line.clone()).unwrap(); 103 | params.add("room_id", private_room.clone()).unwrap(); 104 | app.mutate( 105 | "mutate { 106 | chat.Message { 107 | room_id:$room_id 108 | content: $message 109 | } 110 | }", 111 | Some(params), 112 | ) 113 | .await 114 | .unwrap(); 115 | line.clear(); 116 | } 117 | } 118 | -------------------------------------------------------------------------------- /src/configuration.rs: -------------------------------------------------------------------------------- 1 | use serde::{Deserialize, Serialize}; 2 | 3 | /// 4 | /// Global configuration for the discret lib 5 | /// 6 | /// Default configuration is defined to try to limit the RAM memory usage to about 1 Gb at worst 7 | /// 8 | #[derive(Debug, Serialize, Deserialize, Clone)] 9 | pub struct Configuration { 10 | /// 11 | /// default: 4 12 | /// 13 | /// defines the global parellism capabilities. 14 | /// 15 | /// this number impact: 16 | ///- the maximum number of room that can be synchronized in parralel, 17 | ///- the number of database readings threads 18 | ///- the number of signature verification threads 19 | ///- the number of shared buffers used for reading and writing data on the network 20 | ///- the depth of the channels that are used to transmit message accross services 21 | /// 22 | /// larger numbers will provides better performances at the cost of more memory usage. 23 | /// having a number larger that the number of CPU might not provides increasing performances 24 | /// TODO: can be changed at runtime?, for example to accomodate for device status changes (for example: on metered network or wifi, on battery or recharging ) 25 | /// 26 | pub parallelism: usize, 27 | 28 | /// 29 | /// default: true, (enabled) 30 | /// 31 | /// When connecting with the same key_material on different devices, 32 | /// thoses devices exchanges their hardaware fingerprint to check wether they are allowed to connect. 33 | /// This add an extra layer of security in the unlucky case where your secret material is shared by another person on the internet 34 | /// (which could be relatively frequent as users tends use weak passwords.) 35 | /// 36 | /// When connecting over the internet, new harware is allways silently rejected. 37 | /// 38 | /// However, on local network we trust new hardware by default. This behaviors can be disabled by setting 'auto_accept_local_device' to false. 39 | /// In this case, when a new device is detected on the local network: 40 | /// - a sys.AllowedHardware will be created with the status:'pending' 41 | /// - a PendingHardware Event will be triggered 42 | /// - the current coonection attempt will be rejected 43 | /// 44 | /// It is up to the developer to intercept the event and decides what to do by updating the status to 'enabled' or 'disabled' 45 | pub auto_accept_local_device: bool, 46 | 47 | /// 48 | /// default: false, (disabled) 49 | /// Defines the behavior of the system when it discover a new peer while synchronizing a room. 50 | /// 51 | /// auto_allow_new_peers=true: 52 | /// - I implicitely trust friends of my friends. It is easy to setup, but could cause problems. 53 | /// 54 | /// auto_allow_new_peers=false: 55 | /// - Trust is given on a case by case basis, this is the recommended configuration. 56 | /// 57 | /// Let's imagine that you have manually invited Bob to chat with you. Bob want's you to meet Alice and creates a group chat with both of you. 58 | /// During the synchronisation, you device detects a new peer(Alice), and add it to the sys.Peer list. 59 | /// 60 | /// If auto_allow_new_peers is set to 'true', you're device will allow Alice to directly connect with you. 61 | /// It makes the network stronger, as Alice will be able to see your message even if Bob is not connected. 62 | /// But it comes at the cost of some privacy, because you now share your IP adress with Alice. 63 | /// In case of large communities, this setup will make your allowed peers very large, increasing the number of network connections, and increase ressources usage. 64 | /// 65 | /// If auto_allow_new_peers is set to 'true', 66 | /// - a sys.AllowedPeer object is created in the private room, with the status set to 'pending' 67 | /// - a PendingPeer event is triggered 68 | /// 69 | /// It is up to the developer to intercept the event and decides what to do by updating the status to 'enabled' or 'disabled' 70 | /// 71 | /// 72 | pub auto_allow_new_peers: bool, 73 | 74 | /// 75 | /// Default 256kb 76 | /// 77 | /// **!!WARNING!!** once your program is in production, decreasing this value will break the system. 78 | /// No data will be lost but the system will not be able to synchronized objects that are larger than the reduced value. 79 | /// //TODO: put it in the sys.Configuration and sanity check on startup 80 | /// 81 | /// Define the maximum size of an entity object. 82 | /// Object size should be kept relatively small to ensure efficient synchronisation. 83 | /// 84 | /// This parameter has a direct impact on the size of the buffers used to read and write data on the network 85 | /// Increasing this value will increase the RAM usage of the application 86 | /// 87 | /// 88 | pub max_object_size_in_kb: u64, 89 | 90 | /// 91 | /// Default 2048 92 | /// set the maximum cache size for the database reading threads. increasing it can improve performances 93 | /// Every read threads consumes up to that amount, meaning that increasing the "parallelism" configuration will increase the memory usage 94 | /// 95 | pub read_cache_size_in_kb: usize, 96 | 97 | /// 98 | /// Default 2048 99 | /// set the maximum of cache size for the database writing thread. increasing it may improvee performances 100 | /// 101 | pub write_cache_size_in_kb: usize, 102 | 103 | /// 104 | /// Default: 1024 105 | /// 106 | /// Write queries are buffered while the database thread is working. 107 | /// When the database thread is ready, the buffer is sent and is processed in one single transaction 108 | /// This greatly increase insertion and update rate, compared to autocommit. 109 | /// To get an idea of the perforance difference, 110 | /// a very simple benchmak on a laptop with 100 000 insertions gives: 111 | /// Buffer size: 1 Insert/seconds: 55 <- this is equivalent to autocommit 112 | /// Buffer size: 10 Insert/seconds: 500 113 | /// Buffer size: 100 Insert/seconds: 3000 114 | /// Buffer size: 1000 Insert/seconds: 32000 115 | /// 116 | /// If one a buffered query fails, the transaction will be rolled back and every other queries in the buffer will fail too. 117 | /// This should not be an issue as INSERT query are not expected to fail. 118 | /// The only reasons to fail an insertion are a bugs or a system failure (like no more space available on disk), 119 | /// And in both case, it is ok to fail the last insertions batch. 120 | /// 121 | /// This parameter can increase RAM usage. 122 | /// 123 | pub write_buffer_length: usize, 124 | 125 | /// 126 | /// default 60000ms (60 seconds) 127 | /// how often an annouces are sent over the network 128 | /// 129 | pub announce_frequency_in_ms: u64, 130 | 131 | /// 132 | /// enbable multicast discovery 133 | /// 134 | pub enable_multicast: bool, 135 | /// 136 | /// default: 0.0.0.0 137 | /// 138 | /// Discret uses the IP multicast feature to discover peers on local networks. 139 | /// on systems with multiple network interfaces, it might be necessary to provide the right ip adress for multicast to work properly 140 | /// the default (let the OS choose for you) should work on most cases. 141 | /// 142 | pub multicast_ipv4_interface: String, 143 | 144 | /// 145 | /// default: 224.0.0.224:22402 146 | /// the multicast group that is used to perform peer discovery 147 | /// 148 | pub multicast_ipv4_group: String, 149 | 150 | /// 151 | /// default: true 152 | /// enable beacon peer discovery 153 | /// 154 | pub enable_beacons: bool, 155 | 156 | /// 157 | /// list of Beacon servers that are used for peer discovery 158 | /// 159 | pub beacons: Vec, 160 | 161 | /// 162 | /// Default: false (disabled) 163 | /// 164 | /// Enable_memory_security: Prevents memory to be written into swap and zeroise memory after free 165 | /// When this feature is disabled, locking/unlocking of the memory address only occur for the internal SQLCipher 166 | /// data structures used to store key material, and cryptographic structures. 167 | /// source: 168 | /// 169 | /// Disabled by default because of a huge performance impact (about 50%). 170 | /// Should only be used if you're system requires a "paranoid" level of security. 171 | /// 172 | pub enable_database_memory_security: bool, 173 | } 174 | impl Default for Configuration { 175 | fn default() -> Self { 176 | Self { 177 | parallelism: 4, 178 | auto_accept_local_device: true, 179 | auto_allow_new_peers: false, 180 | max_object_size_in_kb: 256, 181 | read_cache_size_in_kb: 2048, 182 | write_cache_size_in_kb: 2048, 183 | write_buffer_length: 1024, 184 | announce_frequency_in_ms: 60000, 185 | enable_multicast: true, 186 | multicast_ipv4_interface: "0.0.0.0".to_string(), 187 | multicast_ipv4_group: "224.0.0.224:22402".to_string(), 188 | enable_beacons: true, 189 | beacons: Vec::new(), 190 | enable_database_memory_security: false, 191 | } 192 | } 193 | } 194 | 195 | /// 196 | /// A beacon server 197 | /// 198 | /// Beacons servers are used to allow peer to discover others on the Internet 199 | /// 200 | #[derive(Debug, Serialize, Deserialize, Clone)] 201 | pub struct BeaconConfig { 202 | /// the server hostname 203 | pub hostname: String, 204 | /// the hash of the Beacon config certificate 205 | pub cert_hash: String, 206 | } 207 | -------------------------------------------------------------------------------- /src/database/deletion.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | date_utils::now, 3 | security::{uid_decode, Uid}, 4 | }; 5 | use std::sync::Arc; 6 | 7 | use super::{ 8 | daily_log::DailyMutations, 9 | edge::{Edge, EdgeDeletionEntry}, 10 | node::{Node, NodeDeletionEntry}, 11 | query_language::{deletion_parser::DeletionParser, parameter::Parameters}, 12 | sqlite_database::Writeable, 13 | Result, 14 | }; 15 | #[derive(Debug)] 16 | pub struct NodeDelete { 17 | pub node: Node, 18 | pub name: String, 19 | // pub short_name: String, 20 | pub date: i64, 21 | } 22 | #[derive(Debug)] 23 | pub struct EdgeDelete { 24 | pub edge: Edge, 25 | pub src_name: String, 26 | pub room_id: Option, 27 | pub date: i64, 28 | } 29 | #[derive(Debug)] 30 | pub struct DeletionQuery { 31 | pub nodes: Vec, 32 | pub node_log: Vec, 33 | pub updated_nodes: Vec, 34 | pub edges: Vec, 35 | pub edge_log: Vec, 36 | } 37 | impl DeletionQuery { 38 | pub fn build( 39 | parameters: &mut Parameters, 40 | deletion: Arc, 41 | conn: &rusqlite::Connection, 42 | ) -> Result { 43 | let date = now(); 44 | deletion.variables.validate_params(parameters)?; 45 | let mut deletion_query = Self { 46 | nodes: Vec::new(), 47 | node_log: Vec::new(), 48 | updated_nodes: Vec::new(), 49 | edges: Vec::new(), 50 | edge_log: Vec::new(), 51 | }; 52 | for del in &deletion.deletions { 53 | let src = parameters 54 | .params 55 | .get(&del.id_param) 56 | .unwrap() 57 | .as_string() 58 | .unwrap(); 59 | 60 | let src = uid_decode(src)?; 61 | let node = Node::get_with_entity(&src, &del.short_name, conn)?; 62 | if let Some(node) = node { 63 | if del.references.is_empty() { 64 | deletion_query.nodes.push(NodeDelete { 65 | node: *node, 66 | name: del.name.clone(), 67 | // short_name: del.short_name.clone(), 68 | date, 69 | }) 70 | } else { 71 | for edge_deletion in &del.references { 72 | let dest = parameters 73 | .params 74 | .get(&edge_deletion.dest_param) 75 | .unwrap() 76 | .as_string() 77 | .unwrap(); 78 | 79 | let dest = uid_decode(dest)?; 80 | 81 | let edge = Edge::get(&src, &edge_deletion.label, &dest, conn)?; 82 | if let Some(edge) = edge { 83 | deletion_query.edges.push(EdgeDelete { 84 | edge: *edge, 85 | src_name: del.name.clone(), 86 | room_id: node.room_id, 87 | date, 88 | }); 89 | } 90 | } 91 | let mut node = *node; 92 | node.mdate = date; 93 | deletion_query.updated_nodes.push(node); 94 | } 95 | } 96 | } 97 | Ok(deletion_query) 98 | } 99 | 100 | pub fn delete( 101 | &mut self, 102 | conn: &rusqlite::Connection, 103 | ) -> std::result::Result<(), rusqlite::Error> { 104 | for edg in &self.edges { 105 | edg.edge.delete(conn)?; 106 | } 107 | for log in &mut self.edge_log { 108 | log.write(conn)?; 109 | } 110 | for nod in &self.nodes { 111 | Node::delete(&nod.node.id, conn)?; 112 | Edge::delete_src(&nod.node.id, conn)?; 113 | Edge::delete_dest(&nod.node.id, conn)?; 114 | } 115 | for update in &mut self.updated_nodes { 116 | update.write(conn, false, &None, &None)?; 117 | } 118 | for log in &mut self.node_log { 119 | log.write(conn)?; 120 | } 121 | Ok(()) 122 | } 123 | 124 | pub fn update_daily_logs(&self, daily_log: &mut DailyMutations) { 125 | for edg in &self.edge_log { 126 | daily_log.set_need_update(edg.room_id, &edg.src_entity, edg.deletion_date); 127 | } 128 | for log in &self.node_log { 129 | daily_log.set_need_update(log.room_id, &log.entity, log.mdate); 130 | daily_log.set_need_update(log.room_id, &log.entity, log.deletion_date); 131 | } 132 | } 133 | } 134 | 135 | #[cfg(test)] 136 | mod tests { 137 | 138 | use crate::database::{ 139 | mutation_query::MutationQuery, 140 | query::{PreparedQueries, Query}, 141 | query_language::{ 142 | data_model_parser::DataModel, mutation_parser::MutationParser, 143 | parameter::ParametersAdd, query_parser::QueryParser, 144 | }, 145 | sqlite_database::{prepare_connection, Writeable}, 146 | }; 147 | use rusqlite::Connection; 148 | 149 | use super::*; 150 | #[test] 151 | fn delete_node() { 152 | let mut data_model = DataModel::new(); 153 | data_model 154 | .update( 155 | " 156 | { 157 | Person { 158 | name : String, 159 | } 160 | }", 161 | ) 162 | .unwrap(); 163 | 164 | let mutation = MutationParser::parse( 165 | r#" 166 | mutate { 167 | P1: Person { name:"John" } 168 | P2: Person { name:"Alice" } 169 | P3: Person { name:"Bob" } 170 | } "#, 171 | &data_model, 172 | ) 173 | .unwrap(); 174 | let conn = Connection::open_in_memory().unwrap(); 175 | prepare_connection(&conn).unwrap(); 176 | 177 | let mut param = Parameters::new(); 178 | let mutation = Arc::new(mutation); 179 | let mut mutation_query = MutationQuery::execute(&mut param, mutation, &conn).unwrap(); 180 | mutation_query.write(&conn).unwrap(); 181 | 182 | let query_parser = QueryParser::parse( 183 | r#" 184 | query sample{ 185 | Person { 186 | id 187 | name 188 | } 189 | } 190 | "#, 191 | &data_model, 192 | ) 193 | .unwrap(); 194 | 195 | let query = PreparedQueries::build(&query_parser).unwrap(); 196 | let param = Parameters::new(); 197 | let mut sql = Query { 198 | parameters: param, 199 | parser: Arc::new(query_parser), 200 | sql_queries: Arc::new(query), 201 | }; 202 | 203 | let result = sql.read(&conn).unwrap(); 204 | let value: serde_json::Value = serde_json::from_str(&result).unwrap(); 205 | let value = value.as_object().unwrap(); 206 | let persons = value.get("Person").unwrap().as_array().unwrap(); 207 | 208 | let mut param = Parameters::new(); 209 | for i in 0..persons.len() { 210 | let pers = persons[i].as_object().unwrap(); 211 | let id = pers.get("id").unwrap().as_str().unwrap(); 212 | param.add(&format!("id{}", i), id.to_string()).unwrap(); 213 | } 214 | 215 | let deletion = DeletionParser::parse( 216 | " 217 | delete delete_person { 218 | Person { $id0 } 219 | Person { $id1 } 220 | Person { $id2 } 221 | } 222 | ", 223 | &data_model, 224 | ) 225 | .unwrap(); 226 | let deletion = Arc::new(deletion); 227 | 228 | let mut delete = DeletionQuery::build(&mut param, deletion, &conn).unwrap(); 229 | delete.delete(&conn).unwrap(); 230 | 231 | let query_parser = QueryParser::parse( 232 | r#" 233 | query sample{ 234 | Person { 235 | id 236 | name 237 | } 238 | } 239 | "#, 240 | &data_model, 241 | ) 242 | .unwrap(); 243 | 244 | let query = PreparedQueries::build(&query_parser).unwrap(); 245 | let param = Parameters::new(); 246 | let mut sql = Query { 247 | parameters: param, 248 | parser: Arc::new(query_parser), 249 | sql_queries: Arc::new(query), 250 | }; 251 | 252 | let result = sql.read(&conn).unwrap(); 253 | let expected = "{\n\"Person\":[]\n}"; 254 | assert_eq!(result, expected); 255 | } 256 | 257 | #[test] 258 | fn delete_edge() { 259 | let mut data_model = DataModel::new(); 260 | data_model 261 | .update( 262 | " 263 | { 264 | Person { 265 | name : String, 266 | parents : [Person] 267 | } 268 | }", 269 | ) 270 | .unwrap(); 271 | 272 | let mutation = MutationParser::parse( 273 | r#" 274 | mutate { 275 | Person { name:"John" parents:[{name:"Alice"},{name:"Bob"}] } 276 | } "#, 277 | &data_model, 278 | ) 279 | .unwrap(); 280 | let conn = Connection::open_in_memory().unwrap(); 281 | prepare_connection(&conn).unwrap(); 282 | 283 | let mut param = Parameters::new(); 284 | let mutation = Arc::new(mutation); 285 | let mut mutation_query = MutationQuery::execute(&mut param, mutation, &conn).unwrap(); 286 | mutation_query.write(&conn).unwrap(); 287 | 288 | let query_parser = QueryParser::parse( 289 | r#" 290 | query sample{ 291 | Person(name="John") { 292 | id 293 | parents{id name} 294 | } 295 | } 296 | "#, 297 | &data_model, 298 | ) 299 | .unwrap(); 300 | 301 | let query = PreparedQueries::build(&query_parser).unwrap(); 302 | let param = Parameters::new(); 303 | let mut sql = Query { 304 | parameters: param, 305 | parser: Arc::new(query_parser), 306 | sql_queries: Arc::new(query), 307 | }; 308 | 309 | let result = sql.read(&conn).unwrap(); 310 | let value: serde_json::Value = serde_json::from_str(&result).unwrap(); 311 | let value = value.as_object().unwrap(); 312 | let persons = value.get("Person").unwrap().as_array().unwrap(); 313 | 314 | let john = persons[0].as_object().unwrap(); 315 | let src = john.get("id").unwrap().as_str().unwrap(); 316 | 317 | let mut param = Parameters::new(); 318 | param.add("src", src.to_string()).unwrap(); 319 | 320 | let parents = john.get("parents").unwrap().as_array().unwrap(); 321 | 322 | for i in 0..parents.len() { 323 | let pers = parents[i].as_object().unwrap(); 324 | let id = pers.get("id").unwrap().as_str().unwrap(); 325 | param.add(&format!("p{}", i), id.to_string()).unwrap(); 326 | } 327 | 328 | let deletion = DeletionParser::parse( 329 | " 330 | delete delete_person { 331 | Person { $src parents[$p0,$p1] } 332 | } 333 | ", 334 | &data_model, 335 | ) 336 | .unwrap(); 337 | let deletion = Arc::new(deletion); 338 | 339 | let mut delete = DeletionQuery::build(&mut param, deletion, &conn).unwrap(); 340 | delete.delete(&conn).unwrap(); 341 | 342 | let query_parser = QueryParser::parse( 343 | r#" 344 | query sample{ 345 | Person(name="John") { 346 | name 347 | parents{name} 348 | } 349 | } 350 | "#, 351 | &data_model, 352 | ) 353 | .unwrap(); 354 | 355 | let query = PreparedQueries::build(&query_parser).unwrap(); 356 | let param = Parameters::new(); 357 | let mut sql = Query { 358 | parameters: param, 359 | parser: Arc::new(query_parser), 360 | sql_queries: Arc::new(query), 361 | }; 362 | 363 | let result = sql.read(&conn).unwrap(); 364 | let expected = "{\n\"Person\":[]\n}"; 365 | assert_eq!(result, expected); 366 | } 367 | } 368 | -------------------------------------------------------------------------------- /src/database/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod authorisation_service; 2 | pub mod authorisation_service_test; 3 | pub mod daily_log; 4 | pub mod deletion; 5 | pub mod edge; 6 | pub mod graph_database; 7 | pub mod mutation_query; 8 | pub mod node; 9 | pub mod query; 10 | pub mod query_language; 11 | pub mod query_test; 12 | pub mod room; 13 | pub mod room_node; 14 | 15 | pub mod sqlite_database; 16 | pub mod system_entities; 17 | use std::collections::HashMap; 18 | 19 | use serde::{de::DeserializeOwned, Serialize}; 20 | use serde_json::Value; 21 | use thiserror::Error; 22 | 23 | use crate::security::{base64_encode, Uid}; 24 | pub type Result = std::result::Result; 25 | 26 | pub const VEC_OVERHEAD: u64 = 4; 27 | pub const MESSAGE_OVERHEAD: usize = 16; 28 | 29 | /// 30 | /// Helper structure to parse the JSON results 31 | /// 32 | pub struct ResultParser { 33 | parsed: Value, 34 | } 35 | impl ResultParser { 36 | pub fn new(result: &str) -> std::result::Result { 37 | let parsed: Value = serde_json::from_str(result)?; 38 | Ok(Self { parsed }) 39 | } 40 | /// 41 | /// Consumes the array found for the field and convert it to an array of the generic type T 42 | /// used to parse **query** results 43 | /// 44 | pub fn take_array( 45 | &mut self, 46 | field: &str, 47 | ) -> std::result::Result, crate::Error> { 48 | let mut re = Vec::new(); 49 | let obj = self.parsed.as_object_mut(); 50 | if obj.is_none() { 51 | return Err(crate::Error::from(Error::InvalidJsonObject("".to_string()))); 52 | } 53 | let obj = obj.unwrap(); 54 | let f = obj.remove(field); 55 | if f.is_none() { 56 | return Err(crate::Error::from(Error::MissingJsonField( 57 | field.to_string(), 58 | ))); 59 | } 60 | let f = f.unwrap(); 61 | 62 | if let Value::Array(field_array) = f { 63 | for value in field_array { 64 | let entry: T = serde_json::from_value(value)?; 65 | re.push(entry); 66 | } 67 | } else { 68 | return Err(crate::Error::from(Error::InvalidJSonArray( 69 | field.to_string(), 70 | ))); 71 | } 72 | 73 | Ok(re) 74 | } 75 | 76 | /// 77 | /// Consumes the object found for the field and convert it to an object of the generic type T 78 | /// used to parse **Mutate** query results 79 | /// 80 | pub fn take_object( 81 | &mut self, 82 | field: &str, 83 | ) -> std::result::Result { 84 | let obj = self.parsed.as_object_mut(); 85 | if obj.is_none() { 86 | return Err(crate::Error::from(Error::InvalidJsonObject("".to_string()))); 87 | } 88 | let obj = obj.unwrap(); 89 | let f = obj.remove(field); 90 | if f.is_none() { 91 | return Err(crate::Error::from(Error::MissingJsonField( 92 | field.to_string(), 93 | ))); 94 | } 95 | let f = f.unwrap(); 96 | 97 | let obj: T = serde_json::from_value(f)?; 98 | 99 | Ok(obj) 100 | } 101 | } 102 | 103 | #[derive(Serialize)] 104 | /// 105 | /// DataModification is the struct sent by the Event::DataChanged event 106 | /// the room map contains: 107 | /// - key is the identifier of the *Rooms* that have been modified 108 | /// - the data contains the modified Entity name and the mutation days (date without hour:minutes:second). 109 | /// 110 | pub struct DataModification { 111 | pub rooms: HashMap>>, 112 | } 113 | impl DataModification { 114 | pub fn add(&mut self, room: Uid, entity: String, date: i64) { 115 | let room = self.rooms.entry(base64_encode(&room)).or_default(); 116 | let entity = room.entry(entity).or_default(); 117 | entity.push(date); 118 | } 119 | } 120 | 121 | #[derive(Error, Debug)] 122 | pub enum Error { 123 | #[error(transparent)] 124 | Cryptography(#[from] crate::security::Error), 125 | 126 | #[error(transparent)] 127 | Database(#[from] rusqlite::Error), 128 | 129 | #[error(transparent)] 130 | Parsing(#[from] query_language::Error), 131 | 132 | #[error(transparent)] 133 | Json(#[from] serde_json::Error), 134 | 135 | #[error(transparent)] 136 | OneshotAsyncRecv(#[from] tokio::sync::oneshot::error::RecvError), 137 | 138 | #[error(transparent)] 139 | Io(#[from] std::io::Error), 140 | 141 | #[error(transparent)] 142 | Utf8(#[from] std::str::Utf8Error), 143 | 144 | #[error(transparent)] 145 | Bincode(#[from] Box), 146 | 147 | #[error("Node len:{0} is larger than the maximum authorised: {1}")] 148 | NodeTooBig(u64, u64), 149 | 150 | #[error("Edge len:{0} is larger than the maximum authorised: {1}")] 151 | EdgeTooBig(usize, usize), 152 | 153 | #[error("Invalid JSON Object {0}")] 154 | InvalidJsonObject(String), 155 | 156 | #[error("Cannot parse field {0} value into a {1}")] 157 | InvalidJsonFieldValue(String, String), 158 | 159 | #[error("Missing json field {0}")] 160 | MissingJsonField(String), 161 | 162 | #[error("Field is not an array {0}")] 163 | InvalidJSonArray(String), 164 | 165 | #[error("{0}")] 166 | DatabaseWrite(String), 167 | 168 | #[error("{0}")] 169 | InvalidNode(String), 170 | 171 | #[error("{0}")] 172 | ChannelSend(String), 173 | 174 | #[error("Entity cannot be empty")] 175 | EmptyNodeEntity(), 176 | 177 | #[error("Edge label cannot be empty")] 178 | EmptyEdgeLabel(), 179 | 180 | #[error("entity {0} with id {1} could not be found and cannot be updated")] 181 | InvalidMutationId(String, String), 182 | 183 | #[error("could not find id while querying entity {0}")] 184 | InvalidId(String), 185 | 186 | #[error("unknown entity {0} with id {1} and cannot be inserted in field {2}.{3}")] 187 | UnknownFieldEntity(String, String, String, String), 188 | 189 | #[error("unknown entity {0} with id {1}")] 190 | UnknownEntity(String, String), 191 | 192 | #[error("{0}")] 193 | Query(String), 194 | 195 | #[error("Missing parameter: '{0}', Cannot build SQL query parameters")] 196 | MissingParameter(String), 197 | 198 | #[error("Authorisation allready exists for this room")] 199 | AuthorisationExists(), 200 | 201 | #[error("This reference does not belong to this room")] 202 | NotBelongsTo(), 203 | 204 | #[error("Rights allreday exits for entity '{0}'")] 205 | RightsExists(String), 206 | 207 | #[error("user not found in room {0}")] 208 | InvalidUser(String), 209 | 210 | #[error("A more recent User definition exists")] 211 | InvalidUserDate(), 212 | 213 | #[error("Entity Right validity date is set before an existing credential validity")] 214 | InvalidRightDate(), 215 | 216 | #[error("system entity '{0}' cannot be mutated ouside a Room mutation")] 217 | InvalidAuthorisationMutation(String), 218 | 219 | #[error("not enough right to mutate entity '{0}' in room '{1}' ")] 220 | AuthorisationRejected(String, String), 221 | 222 | #[error("Authorisation model forbids deletion of {0} in entity {1}")] 223 | CannotRemove(String, String), 224 | 225 | #[error("Unknown room id {0} ")] 226 | UnknownRoom(String), 227 | 228 | #[error("{0} Entity cannot have a room_id defined")] 229 | ForbiddenRoomId(String), 230 | 231 | #[error("Updates not allowed, Only inserts can be performed for this entity")] 232 | UpdateNotAllowed(), 233 | 234 | #[error("Deletes not allowed, Only inserts can be performed for this entity")] 235 | DeleteNotAllowed(), 236 | 237 | #[error("Entity right is missing an entity name")] 238 | EntityRightMissingName(), 239 | 240 | #[error("{0}")] 241 | InvalidFullNode(String), 242 | 243 | #[error("The requested node does not belong to the right room")] 244 | InvalidNodeRequest(), 245 | 246 | #[error("{0}")] 247 | InvalidPeerNode(String), 248 | 249 | #[error("Unknown Peer")] 250 | UnknownPeer(), 251 | 252 | #[error("{0}")] 253 | QueryParsing(String), 254 | 255 | #[error("An error occured while computing daily logs: {0}")] 256 | ComputeDailyLog(String), 257 | } 258 | #[cfg(test)] 259 | mod tests { 260 | use crate::database::{node::Node, VEC_OVERHEAD}; 261 | 262 | #[test] 263 | fn test_buffer_size() { 264 | let mut v = Vec::new(); 265 | let node = Node { 266 | ..Default::default() 267 | }; 268 | let node_size = bincode::serialized_size(&node).unwrap(); 269 | println!("node: {}", node_size); 270 | v.push(node); 271 | 272 | let vec_size = bincode::serialized_size(&v).unwrap(); 273 | println!("vec_: {}", vec_size); 274 | 275 | let capa = 2621440; 276 | let datav = vec![0; capa]; 277 | 278 | let mut v = Vec::new(); 279 | let mut size = 0; 280 | for i in 0..10 { 281 | let node = Node { 282 | _binary: Some(datav.clone()), 283 | _entity: i.to_string(), 284 | ..Default::default() 285 | }; 286 | 287 | size += bincode::serialized_size(&node).unwrap(); 288 | size += VEC_OVERHEAD; 289 | v.push(node); 290 | } 291 | println!("comp: {}", size); 292 | println!("repo: {}", bincode::serialized_size(&v).unwrap()); 293 | } 294 | } 295 | -------------------------------------------------------------------------------- /src/database/query_language/data_model.pest: -------------------------------------------------------------------------------- 1 | // add DEFAULT to be used with the ifnull() sqlite function 2 | // partial index every entity id 3 | 4 | WHITESPACE = _{ " " | "\t" | "\r" | "\n" } 5 | COMMENT = _{ "//" ~ (!NEWLINE ~ ANY)* ~ NEWLINE } 6 | 7 | identifier = @{ (LETTER | NUMBER | "_")+ } 8 | 9 | namespace_entity = @{ (LETTER | NUMBER | "_" | ".")+ } 10 | 11 | deprecated = @{ ^"@deprecated" } 12 | comma = { "," } 13 | 14 | deprecable_identifier = { deprecated? ~ identifier } 15 | 16 | datamodel = { SOI ~ namespace* ~ EOI } 17 | entity = { deprecable_identifier ~ entity_param? ~ "{" ~ entry ~ (comma ~ entry)* ~ comma? ~ "}" } 18 | namespace = { identifier? ~ "{" ~ entity* ~ "}" } 19 | 20 | entity_param = { 21 | "(" ~ ")" 22 | | "(" ~ disable_feature ~ (comma ~ disable_feature)* ~ comma? ~ ")" 23 | } 24 | disable_feature = { no_full_text_index } 25 | 26 | no_full_text_index = { "no_full_text_index" } 27 | 28 | nullable = { ^"nullable" } 29 | default = { ^"default" ~ default_value } 30 | default_value = { float | integer | boolean | string } 31 | scalar_type = { ^"Integer" | ^"Float" | ^"Boolean" | ^"String" | ^"Base64" | ^"Json" } 32 | scalar_field = { scalar_type ~ (nullable | default)? } 33 | entity_array = { "[" ~ namespace_entity ~ "]" ~ (nullable)? } 34 | entity_field = { namespace_entity ~ (nullable)? } 35 | field = { deprecable_identifier ~ ":" ~ (entity_array | scalar_field | entity_field) } 36 | 37 | index = { ^"index" ~ "(" ~ identifier ~ (comma ~ identifier)* ~ comma? ~ ")" } 38 | entry = { index | field } 39 | 40 | string = ${ "\"" ~ inner ~ "\"" } 41 | inner = @{ char* } 42 | char = { 43 | !("\"" | "\\") ~ ANY 44 | | "\\" ~ ("\"" | "\\" | "/" | "b" | "f" | "n" | "r" | "t") 45 | | "\\" ~ ("u" ~ ASCII_HEX_DIGIT{4}) 46 | } 47 | 48 | float = @{ 49 | "-"? ~ ("0" | ASCII_NONZERO_DIGIT ~ ASCII_DIGIT*) ~ ("." ~ ASCII_DIGIT*) ~ (^"e" ~ ("+" | "-")? ~ ASCII_DIGIT+)? 50 | } 51 | 52 | integer = @{ 53 | "-"? ~ ASCII_DIGIT+ 54 | } 55 | boolean = { ^"true" | ^"false" } 56 | -------------------------------------------------------------------------------- /src/database/query_language/deletion.pest: -------------------------------------------------------------------------------- 1 | /* 2 | deletion delete{ 3 | 4 | //remove a references in the parent field 5 | person { 6 | $id, 7 | parent: [$sid2, $id3] 8 | } 9 | 10 | //remove person 11 | person{ 12 | $sid 13 | } 14 | } 15 | */ 16 | WHITESPACE = _{ " " | "\t" | "\r" | "\n" } 17 | COMMENT = _{ "//" ~ (!NEWLINE ~ ANY)* ~ NEWLINE } 18 | 19 | identifier = @{ (LETTER | NUMBER | "_")+ } 20 | namespace_entity = @{ (LETTER | NUMBER | "_" | ".")+ } 21 | 22 | variable = @{ "$" ~ identifier } 23 | entity_name = { namespace_entity ~ (":" ~ namespace_entity)? } 24 | deletion = { SOI ~ deletion_name ~ "{" ~ entity+ ~ "}" ~ EOI } 25 | deletion_name = { "delete" ~ (identifier)? } 26 | entity = { entity_name ~ "{" ~ id_field ~ array_field* ~ "}" } 27 | 28 | id_field = { variable } 29 | 30 | array_field = { identifier ~ "[" ~ variable ~ ("," ~ variable)* ~ ","? ~ "]" } 31 | -------------------------------------------------------------------------------- /src/database/query_language/deletion_parser.rs: -------------------------------------------------------------------------------- 1 | use super::{data_model_parser::DataModel, parameter::Variables, Error}; 2 | use super::{FieldType, VariableType}; 3 | use pest::iterators::Pair; 4 | use pest::Parser; 5 | use pest_derive::Parser; 6 | 7 | #[derive(Parser)] 8 | #[grammar = "database/query_language/deletion.pest"] 9 | struct PestParser; 10 | 11 | #[derive(Debug)] 12 | pub struct DeletionParser { 13 | pub name: String, 14 | pub variables: Variables, 15 | pub deletions: Vec, 16 | } 17 | 18 | #[derive(Debug)] 19 | pub struct EntityDeletion { 20 | pub name: String, 21 | pub short_name: String, 22 | pub alias: Option, 23 | pub id_param: String, 24 | pub references: Vec, 25 | } 26 | impl Default for EntityDeletion { 27 | fn default() -> Self { 28 | EntityDeletion::new() 29 | } 30 | } 31 | impl EntityDeletion { 32 | pub fn new() -> Self { 33 | Self { 34 | name: "".to_string(), 35 | short_name: "".to_string(), 36 | alias: None, 37 | id_param: "".to_string(), 38 | references: Vec::new(), 39 | } 40 | } 41 | } 42 | 43 | #[derive(Debug)] 44 | pub struct ReferenceDeletion { 45 | // pub entity_name: String, 46 | pub label: String, 47 | pub dest_param: String, 48 | } 49 | impl Default for DeletionParser { 50 | fn default() -> Self { 51 | DeletionParser::new() 52 | } 53 | } 54 | impl DeletionParser { 55 | pub fn new() -> Self { 56 | Self { 57 | name: "".to_string(), 58 | variables: Variables::new(), 59 | deletions: Vec::new(), 60 | } 61 | } 62 | 63 | pub fn parse(query: &str, data_model: &DataModel) -> Result { 64 | let parse = match PestParser::parse(Rule::deletion, query) { 65 | Err(e) => { 66 | let message = format!("{}", e); 67 | return Err(Error::Parser(message)); 68 | } 69 | Ok(f) => f, 70 | } 71 | .next() 72 | .unwrap(); 73 | 74 | let mut deletion = DeletionParser::new(); 75 | 76 | match parse.as_rule() { 77 | Rule::deletion => { 78 | let mut deletion_pairs = parse.into_inner(); 79 | 80 | let deletion_name = deletion_pairs.next().unwrap(); 81 | if let Some(name) = deletion_name.into_inner().next() { 82 | deletion.name = name.as_str().to_string(); 83 | } 84 | 85 | for entity_pair in deletion_pairs { 86 | match entity_pair.as_rule() { 87 | Rule::entity => { 88 | let ent = Self::parse_entity( 89 | data_model, 90 | entity_pair, 91 | &mut deletion.variables, 92 | )?; 93 | deletion.deletions.push(ent); 94 | } 95 | Rule::EOI => {} 96 | _ => unreachable!(), 97 | } 98 | } 99 | } 100 | _ => unreachable!(), 101 | } 102 | 103 | Ok(deletion) 104 | } 105 | 106 | fn parse_entity( 107 | data_model: &DataModel, 108 | pair: Pair<'_, Rule>, 109 | variables: &mut Variables, 110 | ) -> Result { 111 | let mut entity = EntityDeletion::new(); 112 | 113 | let mut entity_pairs = pair.into_inner(); 114 | let mut name_pair = entity_pairs.next().unwrap().into_inner(); 115 | let entity_name = if name_pair.len() == 2 { 116 | let alias = name_pair.next().unwrap().as_str().to_string(); 117 | if alias.starts_with('_') { 118 | return Err(Error::InvalidName(alias)); 119 | } 120 | 121 | entity.alias = Some(alias); 122 | name_pair.next().unwrap().as_str() 123 | } else { 124 | name_pair.next().unwrap().as_str() 125 | }; 126 | let model_entity = data_model.get_entity(entity_name)?; 127 | entity.name = entity_name.to_string(); 128 | entity.short_name = model_entity.short_name.clone(); 129 | 130 | for entity_pair in entity_pairs { 131 | match entity_pair.as_rule() { 132 | Rule::id_field => { 133 | let var = &entity_pair.as_str()[1..]; //remove $ 134 | variables.add(var, VariableType::Base64(false))?; 135 | entity.id_param = var.to_string(); 136 | } 137 | Rule::array_field => { 138 | let mut array_field_pairs = entity_pair.into_inner(); 139 | let name = array_field_pairs.next().unwrap().as_str().to_string(); 140 | let model_field = match model_entity.fields.get(&name) { 141 | None => { 142 | return Err(Error::InvalidQuery(format!( 143 | "Unknown field '{}' in entity '{}'", 144 | name, entity_name 145 | ))) 146 | } 147 | Some(e) => match e.field_type { 148 | FieldType::Array(_) => e, 149 | _ => { 150 | return Err(Error::InvalidQuery(format!( 151 | "'{}' in entity '{}' is not defined as an array in the data model", 152 | name, entity_name 153 | ))) 154 | } 155 | }, 156 | }; 157 | 158 | for param_pair in array_field_pairs { 159 | let id_param = param_pair.as_str()[1..].to_string(); //remove $ 160 | 161 | variables.add(&id_param, VariableType::Base64(false))?; 162 | 163 | entity.references.push(ReferenceDeletion { 164 | label: model_field.short_name.clone(), 165 | dest_param: id_param, 166 | // entity_name: entity_name.to_string(), 167 | }); 168 | } 169 | } 170 | 171 | _ => unreachable!(), 172 | } 173 | } 174 | 175 | Ok(entity) 176 | } 177 | } 178 | 179 | #[cfg(test)] 180 | mod tests { 181 | 182 | use super::*; 183 | #[test] 184 | fn parse_valid_deletion() { 185 | let mut data_model = DataModel::new(); 186 | data_model 187 | .update( 188 | " 189 | { 190 | Person { 191 | name : String , 192 | surname : String , 193 | parent : [Person], 194 | pet : [Pet] 195 | } 196 | 197 | Pet { 198 | name : String , 199 | } 200 | }", 201 | ) 202 | .unwrap(); 203 | 204 | let deletion = DeletionParser::parse( 205 | " 206 | //comment 207 | delete delete_person { 208 | //comment 209 | del1: Person { 210 | //comment 211 | $id //comment 212 | parent[$id2, $id3] 213 | pet[$id4] 214 | } 215 | 216 | Pet { 217 | $id3 218 | } 219 | } 220 | 221 | ", 222 | &data_model, 223 | ) 224 | .unwrap(); 225 | 226 | assert_eq!("delete_person", deletion.name); 227 | 228 | assert_eq!(2, deletion.deletions.len()); 229 | 230 | let query = deletion.deletions.get(0).unwrap(); 231 | assert_eq!("Person", query.name); 232 | assert_eq!("0", query.short_name); 233 | let alias = query.alias.as_ref().unwrap(); 234 | assert_eq!("del1", alias); 235 | assert_eq!("id", query.id_param); 236 | assert_eq!(3, query.references.len()); 237 | 238 | let reference = query.references.get(0).unwrap(); 239 | assert_eq!("34", reference.label); 240 | assert_eq!("id2".to_string(), reference.dest_param); 241 | 242 | let reference = query.references.get(1).unwrap(); 243 | assert_eq!("34", reference.label); 244 | assert_eq!("id3".to_string(), reference.dest_param); 245 | 246 | let reference = query.references.get(2).unwrap(); 247 | assert_eq!("35", reference.label); 248 | assert_eq!("id4".to_string(), reference.dest_param); 249 | 250 | let query = deletion.deletions.get(1).unwrap(); 251 | assert_eq!("Pet", query.name); 252 | assert_eq!("1", query.short_name); 253 | assert_eq!(None, query.alias); 254 | assert_eq!("id3", query.id_param); 255 | assert_eq!(0, query.references.len()); 256 | } 257 | 258 | #[test] 259 | fn parse_namespace_datamodel() { 260 | let mut data_model = DataModel::new(); 261 | data_model 262 | .update( 263 | " 264 | ns { 265 | Person { 266 | name : String , 267 | surname : String , 268 | parent : [ns.Person], 269 | pet : [ns.Pet] 270 | } 271 | 272 | Pet { 273 | name : String, 274 | } 275 | } 276 | ", 277 | ) 278 | .unwrap(); 279 | 280 | let _ = DeletionParser::parse( 281 | " 282 | delete { 283 | ns.Pet { 284 | $id3 285 | } 286 | } 287 | 288 | ", 289 | &data_model, 290 | ) 291 | .expect("'Pet' is corectly defined"); 292 | 293 | let _ = DeletionParser::parse( 294 | " 295 | delete { 296 | pets : ns.Pet { 297 | $id3 298 | } 299 | } 300 | 301 | ", 302 | &data_model, 303 | ) 304 | .expect("'Pet' is corectly defined"); 305 | } 306 | 307 | #[test] 308 | fn parse_invalid_datamodel() { 309 | let mut data_model = DataModel::new(); 310 | data_model 311 | .update( 312 | " 313 | { 314 | Person { 315 | name : String , 316 | surname : String , 317 | parent : [Person], 318 | pet : [Pet] 319 | } 320 | 321 | Pet { 322 | name : String, 323 | } 324 | } 325 | ", 326 | ) 327 | .unwrap(); 328 | 329 | let _ = DeletionParser::parse( 330 | " 331 | delete { 332 | pet { 333 | $id3 334 | } 335 | } 336 | 337 | ", 338 | &data_model, 339 | ) 340 | .expect_err("Entity name is case sensitives. 'pet' is not a valid entity but 'Pet' is"); 341 | 342 | let _ = DeletionParser::parse( 343 | " 344 | delete { 345 | Pet { 346 | $id3 347 | } 348 | } 349 | 350 | ", 351 | &data_model, 352 | ) 353 | .expect("'Pet' is corectly defined"); 354 | 355 | let _ = DeletionParser::parse( 356 | " 357 | delete { 358 | Person { 359 | $id1 360 | Parent[$id2] 361 | } 362 | } 363 | 364 | ", 365 | &data_model, 366 | ) 367 | .expect_err( 368 | "Entity field is case sensitives. 'Parent' is not a valid entity but 'parent' is", 369 | ); 370 | 371 | let _ = DeletionParser::parse( 372 | " 373 | delete { 374 | Person { 375 | $id1 376 | parent[$id2] 377 | } 378 | } 379 | 380 | ", 381 | &data_model, 382 | ) 383 | .expect("'Parent' is defined correctly"); 384 | 385 | let _ = DeletionParser::parse( 386 | " 387 | delete { 388 | Person { 389 | $id1 390 | parent[] 391 | } 392 | } 393 | 394 | ", 395 | &data_model, 396 | ) 397 | .expect_err( 398 | "'parent' cannot be empty to delete it compretely set it to null in a mutation query", 399 | ); 400 | } 401 | 402 | #[test] 403 | fn parse_invalid_field_type() { 404 | let mut data_model = DataModel::new(); 405 | data_model 406 | .update( 407 | " 408 | { 409 | Person { 410 | name : String , 411 | surname : String , 412 | parent : [Person], 413 | pet : [Person] 414 | } 415 | }", 416 | ) 417 | .unwrap(); 418 | 419 | let _ = DeletionParser::parse( 420 | " 421 | delete delete_person{ 422 | Person { 423 | $id1 424 | surname[$id2] 425 | } 426 | }", 427 | &data_model, 428 | ) 429 | .expect_err("'surname' type is not defined as an array"); 430 | 431 | let _ = DeletionParser::parse( 432 | " 433 | delete delete_person{ 434 | Person { 435 | $id1 436 | parent[$id2] 437 | } 438 | }", 439 | &data_model, 440 | ) 441 | .expect("'parent' type is an array"); 442 | } 443 | } 444 | -------------------------------------------------------------------------------- /src/database/query_language/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod data_model_parser; 2 | pub mod data_model_parser_test; 3 | pub mod deletion_parser; 4 | pub mod mutation_parser; 5 | pub mod parameter; 6 | pub mod query_parser; 7 | pub mod query_parser_test; 8 | use std::fmt; 9 | 10 | use serde::{Deserialize, Serialize}; 11 | use serde_json::Number; 12 | use thiserror::Error; 13 | 14 | #[derive(Debug, Clone)] 15 | pub enum FieldValue { 16 | Variable(String), 17 | Value(ParamValue), 18 | } 19 | 20 | #[derive(Debug, Clone, Serialize, Deserialize)] 21 | pub enum ParamValue { 22 | Boolean(bool), 23 | Integer(i64), 24 | Float(f64), 25 | String(String), 26 | Binary(String), 27 | Null, 28 | } 29 | impl ParamValue { 30 | pub fn as_boolean(&self) -> Option { 31 | if let Self::Boolean(e) = self { 32 | Some(*e) 33 | } else { 34 | None 35 | } 36 | } 37 | 38 | pub fn as_i64(&self) -> Option { 39 | match self { 40 | Self::Float(e) => Some(*e as i64), 41 | Self::Integer(e) => Some(*e), 42 | _ => None, 43 | } 44 | } 45 | 46 | pub fn as_f64(&self) -> Option { 47 | match self { 48 | Self::Float(e) => Some(*e), 49 | Self::Integer(e) => Some(*e as f64), 50 | _ => None, 51 | } 52 | } 53 | 54 | pub fn as_string(&self) -> Option<&String> { 55 | match self { 56 | ParamValue::String(e) => Some(e), 57 | ParamValue::Binary(e) => Some(e), 58 | _ => None, 59 | } 60 | } 61 | 62 | pub fn as_serde_json_value(&self) -> Result { 63 | match self { 64 | ParamValue::Boolean(v) => Ok(serde_json::Value::Bool(*v)), 65 | ParamValue::Integer(v) => Ok(serde_json::Value::Number(Number::from(*v))), 66 | ParamValue::Float(v) => { 67 | let number = Number::from_f64(*v); 68 | match number { 69 | Some(e) => Ok(serde_json::Value::Number(e)), 70 | None => Err(Error::InvalidFloat(*v)), 71 | } 72 | } 73 | ParamValue::String(v) => Ok(serde_json::Value::String(String::from(v))), 74 | ParamValue::Binary(v) => Ok(serde_json::Value::String(String::from(v))), 75 | ParamValue::Null => Ok(serde_json::Value::Null), 76 | } 77 | } 78 | } 79 | 80 | #[derive(Debug, PartialEq, Eq)] 81 | pub enum VariableType { 82 | Boolean(bool), 83 | Float(bool), 84 | Base64(bool), 85 | Json(bool), 86 | Integer(bool), 87 | String(bool), 88 | Binary(bool), 89 | Invalid, 90 | } 91 | impl fmt::Display for VariableType { 92 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 93 | write!(f, "{:?}", self) 94 | } 95 | } 96 | 97 | #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] 98 | pub enum FieldType { 99 | Array(String), 100 | Entity(String), 101 | Boolean, 102 | Float, 103 | Base64, 104 | Integer, 105 | String, 106 | Json, 107 | } 108 | impl fmt::Display for FieldType { 109 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 110 | write!(f, "{:?}", self) 111 | } 112 | } 113 | 114 | #[derive(Error, Debug)] 115 | pub enum Error { 116 | #[error(transparent)] 117 | Json(#[from] serde_json::Error), 118 | 119 | #[error(transparent)] 120 | BoolParsing(#[from] std::str::ParseBoolError), 121 | 122 | #[error(transparent)] 123 | IntParsing(#[from] std::num::ParseIntError), 124 | 125 | #[error(transparent)] 126 | TryfromInt(#[from] std::num::TryFromIntError), 127 | 128 | #[error("{0}")] 129 | Parser(String), 130 | 131 | #[error("{0}")] 132 | InvalidQuery(String), 133 | 134 | #[error("Namespace: '{0}' does not exists")] 135 | NamespaceNotFound(String), 136 | 137 | #[error("{0}")] 138 | NamespaceUpdate(String), 139 | 140 | #[error("Entity: '{0}' does not exists")] 141 | EntityNotFound(String), 142 | 143 | #[error("Index '{0}' allready exists in entity {1}.{2}")] 144 | IndexAllreadyExists(String, String, String), 145 | 146 | #[error("'{0}' is allready defined as a '{1}' and is conflicting with a field that requires an '{2}' ")] 147 | ConflictingVariableType(String, String, String), 148 | 149 | #[error("Field {0} requires type '{1}' and but is used with type '{2}' ")] 150 | InvalidFieldType(String, String, String), 151 | 152 | #[error("'{0}' is not nullable")] 153 | NotNullable(String), 154 | 155 | #[error("{0}")] 156 | DuplicatedParameters(String), 157 | 158 | #[error("entity {0} is allready defined")] 159 | DuplicatedEntity(String), 160 | 161 | #[error("field {0} is allready defined")] 162 | DuplicatedField(String), 163 | 164 | #[error("field {0} is conflicting with a system field, you have to change its name")] 165 | SystemFieldConflict(String), 166 | 167 | #[error("Namespace: '{0}' does not exists")] 168 | MissingNamespace(String), 169 | 170 | #[error("Entity {0} is missing in the new data model")] 171 | MissingEntity(String), 172 | 173 | #[error("Field {0}.{1} is missing in the new data model")] 174 | MissingField(String, String), 175 | 176 | #[error("New field definition {0}.{1} is not nullable and needs a default value to ensure backward compatibility")] 177 | MissingDefaultValue(String, String), 178 | 179 | #[error("New field definition {0}.{1} is is tring to change the field type. old type:{2} new type:{3}")] 180 | CannotUpdateFieldType(String, String, String, String), 181 | 182 | #[error("Field {0}.{1} is in postion {2} and was expected in position '{3}'")] 183 | InvalidFieldOrdering(String, String, usize, usize), 184 | 185 | #[error("Namespace {0} is in postion {1} and was expected in position '{2}'")] 186 | InvalidNamespaceOrdering(String, usize, usize), 187 | 188 | #[error("Entity {0} is in postion {1} and was expected in position '{2}'")] 189 | InvalidEntityOrdering(String, String, String), 190 | 191 | #[error(transparent)] 192 | FloatParsing(#[from] std::num::ParseFloatError), 193 | 194 | #[error("filter on entity {0} can only use operations 'is null' of 'is not null' ")] 195 | InvalidEntityFilter(String), 196 | 197 | #[error("Parameter: '{0}' is missing")] 198 | MissingParameter(String), 199 | 200 | #[error("'{0}' is not a base64 value")] 201 | InvalidBase64(String), 202 | 203 | #[error("'{0}' is not valid JSON value")] 204 | InvalidJson(String), 205 | 206 | #[error("'{0}' is not a {1}. value:{2}")] 207 | ConflictingParameterType(String, String, String), 208 | 209 | #[error("field {0} default value is a '{1}' is not a {2}")] 210 | InvalidDefaultValue(String, String, String), 211 | 212 | #[error("float {0} is not a valid JSON float")] 213 | InvalidFloat(f64), 214 | 215 | #[error("name {0} cannot start with '_'. Names starting with '_' are reserved for the system")] 216 | InvalidName(String), 217 | 218 | #[error(" '{0}' is a reserved keyword")] 219 | ReservedKeyword(String), 220 | 221 | #[error( 222 | "{0}.{1} is required to create the entity. It is not nullable and has no default value" 223 | )] 224 | MissingUpdateField(String, String), 225 | 226 | #[error("'after' and 'before' parameter number '{0}' must have the '{1}' type to match the order by fields")] 227 | InvalidPagingValue(usize, String), 228 | 229 | #[error("'after' and 'before' parameters cannot be used on aggregate queries")] 230 | InvalidPagingQuery(), 231 | 232 | #[error("Nullable field '{0}' not found in the query")] 233 | UnknownNullableField(String), 234 | 235 | #[error("Field '{0}' has the type '{1}' and nullable is only valid for types that references an entity ")] 236 | InvalidNullableField(String, String), 237 | 238 | #[error("the provided parameters could not be parsed in a valid JSON object")] 239 | InvalidJsonParamObject(), 240 | 241 | #[error("the provided parameters '{0}' cannot be an object or an array ")] 242 | InvalidJsonParamField(String), 243 | } 244 | -------------------------------------------------------------------------------- /src/database/query_language/mutation.pest: -------------------------------------------------------------------------------- 1 | /* 2 | mutation mutationName{ 3 | 4 | 5 | friend { 6 | id : $id 7 | name: "name" //update name 8 | truc: {id:$idtruc} 9 | atruc: [{id:$id}, {name:"qdqs"}] //add $idtruc and create name then add it 10 | } 11 | } 12 | } 13 | */ 14 | WHITESPACE = _{ " " | "\t" | "\r" | "\n" } 15 | COMMENT = _{ "//" ~ (!NEWLINE ~ ANY)* ~ NEWLINE } 16 | 17 | identifier = @{ (LETTER | NUMBER | "_")+ } 18 | namespace_entity = @{ (LETTER | NUMBER | "_" | ".")+ } 19 | 20 | variable = @{ "$" ~ identifier } 21 | comma = { "," } 22 | 23 | mutation = { SOI ~ mutation_name ~ "{" ~ entity+ ~ "}" ~ EOI } 24 | mutation_name = { "mutate" ~ (identifier)? } 25 | 26 | entity = { entity_name ~ "{" ~ field* ~ "}" } 27 | entity_name = { namespace_entity ~ (":" ~ namespace_entity)? } 28 | 29 | field = { identifier ~ ":" ~ value } 30 | 31 | value = { variable | entity_ref | entity_array | string | float | integer | boolean | null } 32 | 33 | entity_ref = { "{" ~ field+ ~ "}" } 34 | entity_array = { "[" ~ entity_ref ~ (comma ~ entity_ref)* ~ comma? ~ "]" } 35 | 36 | string = ${ "\"" ~ inner ~ "\"" } 37 | inner = @{ char* } 38 | char = { 39 | !("\"" | "\\") ~ ANY 40 | | "\\" ~ ("\"" | "\\" | "/" | "b" | "f" | "n" | "r" | "t") 41 | | "\\" ~ ("u" ~ ASCII_HEX_DIGIT{4}) 42 | } 43 | 44 | float = @{ 45 | "-"? ~ ("0" | ASCII_NONZERO_DIGIT ~ ASCII_DIGIT*) ~ ("." ~ ASCII_DIGIT*) ~ (^"e" ~ ("+" | "-")? ~ ASCII_DIGIT+)? 46 | } 47 | 48 | integer = @{ 49 | "-"? ~ ASCII_DIGIT+ 50 | } 51 | boolean = { ^"true" | ^"false" } 52 | null = { ^"null" } 53 | -------------------------------------------------------------------------------- /src/database/query_language/query.pest: -------------------------------------------------------------------------------- 1 | WHITESPACE = _{ " " | "\t" | "\r" | "\n" } 2 | COMMENT = _{ "//" ~ (!NEWLINE ~ ANY)* ~ NEWLINE } 3 | comma = { "," } 4 | 5 | identifier = @{ (LETTER | NUMBER | "_")+ } 6 | namespace_entity = @{ (LETTER | NUMBER | "_" | ".")+ } 7 | 8 | variable = @{ "$" ~ identifier } 9 | 10 | query = { SOI ~ query_name ~ "{" ~ entity+ ~ "}" ~ EOI } 11 | query_name = { "query" ~ (identifier)? } 12 | 13 | entity = { entity_name ~ entity_param? ~ "{" ~ field+ ~ "}" } 14 | entity_name = { namespace_entity ~ (":" ~ namespace_entity)? } 15 | 16 | named_field = { identifier ~ (":" ~ identifier)? } 17 | field = { entity | json_field | function | named_field } 18 | 19 | entity_param = { 20 | "(" ~ ")" 21 | | "(" ~ param ~ (comma ~ param)* ~ comma? ~ ")" 22 | } 23 | 24 | param = { search | order_by | first | skip | before | after | nullable | json_filter | filter } 25 | 26 | search = { "search" ~ "(" ~ search_value ~ ")" } 27 | search_value = { variable | string } 28 | 29 | order_by = { "order_by" ~ "(" ~ order_param ~ (comma ~ order_param)* ~ comma? ~ ")" } 30 | order_param = { identifier ~ order_direction } 31 | order_direction = { ^"asc" | ^"desc" } 32 | 33 | first = { "first " ~ limit_value } 34 | skip = { "skip " ~ limit_value } 35 | limit_value = { unsigned_int | variable } 36 | 37 | before = { "before" ~ "(" ~ before_value ~ ("," ~ before_value)* ~ ","? ~ ")" } 38 | after = { "after" ~ "(" ~ before_value ~ ("," ~ before_value)* ~ ","? ~ ")" } 39 | before_value = { variable | float | string | integer | boolean } 40 | 41 | nullable = { "nullable" ~ "(" ~ identifier ~ ("," ~ identifier)* ~ ","? ~ ")" } 42 | 43 | filter = { 44 | identifier ~ (gt_eq | neq | lt_eq | eq | gt | lt) ~ filter_value 45 | } 46 | 47 | json_filter = { json_selector ~ (gt_eq | neq | lt_eq | eq | gt | lt) ~ filter_value } 48 | 49 | filter_value = { variable | float | string | integer | boolean | null } 50 | 51 | eq = { "=" } 52 | neq = { "!=" } 53 | gt = { ">" } 54 | gt_eq = { ">=" } 55 | lt = { "<" } 56 | lt_eq = { "<=" } 57 | 58 | string = ${ "\"" ~ inner ~ "\"" } 59 | inner = @{ char* } 60 | char = { 61 | !("\"" | "\\") ~ ANY 62 | | "\\" ~ ("\"" | "\\" | "/" | "b" | "f" | "n" | "r" | "t") 63 | | "\\" ~ ("u" ~ ASCII_HEX_DIGIT{4}) 64 | } 65 | 66 | float = @{ 67 | "-"? ~ ("0" | ASCII_NONZERO_DIGIT ~ ASCII_DIGIT*) ~ ("." ~ ASCII_DIGIT*) ~ (^"e" ~ ("+" | "-")? ~ ASCII_DIGIT+)? 68 | } 69 | 70 | integer = @{ 71 | "-"? ~ ASCII_DIGIT+ 72 | } 73 | boolean = { ^"true" | ^"false" } 74 | 75 | unsigned_int = @{ ASCII_DIGIT+ } 76 | 77 | null = { ^"null" } 78 | 79 | function = { identifier ~ ":" ~ function_list } 80 | function_list = { avg_fn | count_fn | max_fn | min_fn | sum_fn } 81 | 82 | avg_fn = { "avg" ~ "(" ~ identifier ~ ")" } 83 | count_fn = { "count" ~ "(" ~ ")" } 84 | max_fn = { "max" ~ "(" ~ identifier ~ ")" } 85 | min_fn = { "min" ~ "(" ~ identifier ~ ")" } 86 | sum_fn = { "sum" ~ "(" ~ identifier ~ ")" } 87 | 88 | json_field = { identifier ~ ":" ~ json_selector } 89 | json_selector = ${ identifier ~ ("->") ~ (json_object_selector | json_array_selector) } 90 | 91 | json_array_selector = @{ unsigned_int } 92 | json_object_selector = @{ "$" ~ json_object_query* } 93 | json_object_query = @{ "." ~ ((identifier ~ "[" ~ unsigned_int ~ "]") | identifier) } 94 | -------------------------------------------------------------------------------- /src/date_utils.rs: -------------------------------------------------------------------------------- 1 | use chrono::{DateTime, Duration, NaiveDateTime, Utc}; 2 | 3 | /// 4 | /// current time in milliseconds since unix epoch 5 | /// 6 | pub fn now() -> i64 { 7 | let dt = Utc::now(); 8 | dt.timestamp_millis() 9 | } 10 | 11 | //returns the date without time 12 | pub fn date(date_time: i64) -> i64 { 13 | let date = DateTime::from_timestamp_millis(date_time).unwrap(); 14 | let ds: NaiveDateTime = date.date_naive().and_hms_opt(0, 0, 0).unwrap(); 15 | ds.and_utc().timestamp_millis() 16 | } 17 | 18 | //returns the next day without time 19 | pub fn date_next_day(date_time: i64) -> i64 { 20 | let date = DateTime::from_timestamp_millis(date_time).unwrap(); 21 | let date = date + Duration::days(1); 22 | let ds: NaiveDateTime = date.date_naive().and_hms_opt(0, 0, 0).unwrap(); 23 | ds.and_utc().timestamp_millis() 24 | } 25 | -------------------------------------------------------------------------------- /src/discret.rs: -------------------------------------------------------------------------------- 1 | //! Discret: Create local first, peer to peer application (P2P) using a GraphQL inspired API 2 | //! 3 | //! *Discret* hides the complexity of peer to peer networks and reduces it to a data access problem. 4 | //! 5 | //! The API allows you to: 6 | //! - manage your data using a GraphQL syntax, 7 | //! - add access right to your data (in graphQL too), 8 | //! - create and accept invites from other peers. 9 | //! 10 | //! *Discret* will synchronize your data with other peers, depending on the access right you have given to those peers. 11 | //! 12 | //! More details and tutorials are available in the [documentation site](https://discretlib.github.io/doc/) 13 | //! 14 | //! # Example 15 | //! The following example creates a very basic chat application. If you build and run this program on several different folder or local network devices 16 | //! you should be able to chat with yourself. 17 | //! ```ignore 18 | //! use std::{io, path::PathBuf}; 19 | //! use discret::{ 20 | //! derive_pass_phrase, zero_uid, Configuration, Discret, 21 | //! Parameters, ParametersAdd, ResultParser, 22 | //! }; 23 | //! use serde::Deserialize; 24 | //! 25 | //! //the application unique identifier 26 | //! const APPLICATION_KEY: &str = "github.com/discretlib/rust_example_simple_chat"; 27 | //! 28 | //! #[tokio::main] 29 | //! async fn main() { 30 | //! //define a datamodel 31 | //! let model = "chat { 32 | //! Message{ 33 | //! content:String 34 | //! } 35 | //! }"; 36 | //! //this struct is used to parse the query result 37 | //! #[derive(Deserialize)] 38 | //! struct Chat { 39 | //! pub id: String, 40 | //! pub mdate: i64, 41 | //! pub content: String, 42 | //! } 43 | //! 44 | //! let path: PathBuf = "test_data".into(); //where data is stored 45 | //! 46 | //! //used to derives all necessary secrets 47 | //! let key_material: [u8; 32] = derive_pass_phrase("my login", "my password"); 48 | //! 49 | //! //start the discret application 50 | //! let app: Discret = Discret::new( 51 | //! model, 52 | //! APPLICATION_KEY, 53 | //! &key_material, 54 | //! path, 55 | //! Configuration::default(), 56 | //! ) 57 | //! .await 58 | //! .unwrap(); 59 | //! 60 | //! //listen for events 61 | //! let mut events = app.subscribe_for_events().await; 62 | //! let event_app: Discret = app.clone(); 63 | //! tokio::spawn(async move { 64 | //! let mut last_date = 0; 65 | //! let mut last_id = zero_uid(); 66 | //! 67 | //! let private_room: String = event_app.private_room(); 68 | //! while let Ok(event) = events.recv().await { 69 | //! match event { 70 | //! //triggered when data is modified 71 | //! discret::Event::DataChanged(_) => { 72 | //! let mut param = Parameters::new(); 73 | //! param.add("mdate", last_date).unwrap(); 74 | //! param.add("id", last_id.clone()).unwrap(); 75 | //! param.add("room_id", private_room.clone()).unwrap(); 76 | //! 77 | //! //get the latest data, the result is in the JSON format 78 | //! let result: String = event_app 79 | //! .query( 80 | //! "query { 81 | //! res: chat.Message( 82 | //! order_by(mdate asc, id asc), 83 | //! after($mdate, $id), 84 | //! room_id = $room_id 85 | //! ) { 86 | //! id 87 | //! mdate 88 | //! content 89 | //! } 90 | //! }", 91 | //! Some(param), 92 | //! ) 93 | //! .await 94 | //! .unwrap(); 95 | //! let mut query_result = ResultParser::new(&result).unwrap(); 96 | //! let res: Vec = query_result.take_array("res").unwrap(); 97 | //! for msg in res { 98 | //! last_date = msg.mdate; 99 | //! last_id = msg.id; 100 | //! println!("you said: {}", msg.content); 101 | //! } 102 | //! } 103 | //! _ => {} //ignores other events 104 | //! } 105 | //! } 106 | //! }); 107 | //! 108 | //! //data is inserted in your private room 109 | //! let private_room: String = app.private_room(); 110 | //! let stdin = io::stdin(); 111 | //! let mut line = String::new(); 112 | //! println!("{}", "Write Something!"); 113 | //! loop { 114 | //! stdin.read_line(&mut line).unwrap(); 115 | //! if line.starts_with("/q") { 116 | //! break; 117 | //! } 118 | //! line.pop(); 119 | //! let mut params = Parameters::new(); 120 | //! params.add("message", line.clone()).unwrap(); 121 | //! params.add("room_id", private_room.clone()).unwrap(); 122 | //! app.mutate( 123 | //! "mutate { 124 | //! chat.Message { 125 | //! room_id:$room_id 126 | //! content: $message 127 | //! } 128 | //! }", 129 | //! Some(params), 130 | //! ) 131 | //! .await 132 | //! .unwrap(); 133 | //! line.clear(); 134 | //! } 135 | //! } 136 | //! ``` 137 | //! 138 | //! # Features 139 | //! *Discret* provides a blocking (DiscretBlocking) and a non blocking (Discret) API. 140 | //! 141 | //! On local network, peer connection happens without requiring any server. 142 | //! For peer to peer connection over the Internet, a discovery server is needed to allow peers to discover each others. 143 | //! The discret lib provides an implementation of the discovery server named Beacon. 144 | //! 145 | //! The library provides strong security features out of the box: 146 | //! - data is encrypted at rest by using the SQLCipher database 147 | //! - encrypted communication using the QUIC protocol 148 | //! - data integrity: each rows is signed with the peer signing key, making it very hard to synchronize bad data 149 | //! - access control via Rooms 150 | //! 151 | //! # Limitations 152 | //! As data lives on your devices, Discret should only be used for applications with data generated by "real person", with hundreds of peers at most. 153 | //! It is not suited for large scale applications and communities with thousands of peoples. 154 | //! 155 | //! It currently only supports text data but supports for file synchronization is planned. 156 | //! 157 | //! Connection over the internet is not 100% guaranteed to work, because certain types of enterprise firewalls will block the connection attempts. 158 | //! 159 | //! Please, be warned that P2P connections leaks your IP adress and should only be used with trusted peer. 160 | //! This leak exposes you to the following threats: 161 | //! - Distributed denial of service (DDOS) 162 | //! - Leak of your "Real World" location via geolocation services. 163 | //! - State sponsored surveillance: A state watching the network could determine which peer connect to which, giving a lot of knowledge about your social network. 164 | //! 165 | //! # Platform Support 166 | //! - Linux: Tested 167 | //! - Windows: Tested 168 | //! - macOS: not tested, should work 169 | //! - Android: works on arch64 architecture. Architectures i686 and x86_64 have some low level linker issues when working with Flutter. 170 | //! - iOS: not tested 171 | //! 172 | 173 | use std::path::PathBuf; 174 | use std::sync::{Arc, Mutex}; 175 | 176 | use tokio::sync::{mpsc, oneshot}; 177 | use tokio::{runtime::Runtime, sync::broadcast}; 178 | type Result = std::result::Result; 179 | 180 | use crate::{ 181 | configuration::Configuration, 182 | database::{ 183 | graph_database::{GraphDatabaseService, MutateReceiver}, 184 | query_language::parameter::Parameters, 185 | system_entities::DefaultRoom, 186 | }, 187 | event_service::Event, 188 | event_service::EventService, 189 | peer_connection_service::{PeerConnectionMessage, PeerConnectionService}, 190 | security::{ 191 | base64_encode, default_uid, derive_key, uid_encode, HardwareFingerprint, MeetingSecret, Uid, 192 | }, 193 | signature_verification_service::SignatureVerificationService, 194 | Error, 195 | }; 196 | 197 | /// 198 | /// returns the zero filled uid in base bas64 199 | /// 200 | /// uid are the unique identifiers used by the Discret internal database 201 | /// 202 | pub fn zero_uid() -> String { 203 | uid_encode(&default_uid()) 204 | } 205 | /// 206 | /// Verify that the Discret database defined by the parameters exists in the folder 207 | /// 208 | pub fn database_exists( 209 | app_key: &str, 210 | key_material: &[u8; 32], 211 | data_folder: &PathBuf, 212 | ) -> std::result::Result { 213 | GraphDatabaseService::database_exists(app_key, key_material, data_folder) 214 | } 215 | 216 | /// 217 | /// All the parameters available after Discret initialisation 218 | /// 219 | #[derive(Clone)] 220 | pub struct DiscretParams { 221 | pub app_key: String, 222 | pub verifying_key: Vec, 223 | pub private_room_id: Uid, 224 | pub hardware_fingerprint: HardwareFingerprint, 225 | pub configuration: Configuration, 226 | } 227 | 228 | /// 229 | /// All the discret services 230 | /// 231 | #[derive(Clone)] 232 | pub struct DiscretServices { 233 | pub events: EventService, 234 | pub database: GraphDatabaseService, 235 | pub signature_verification: SignatureVerificationService, 236 | } 237 | 238 | /// 239 | /// The main entry point for the Discret Library 240 | /// 241 | #[derive(Clone)] 242 | pub struct Discret { 243 | params: DiscretParams, 244 | services: DiscretServices, 245 | peers: PeerConnectionService, 246 | } 247 | impl Discret { 248 | /// Starts the Discret engine with the following parameters: 249 | ///- datamodel: define the data types that can be used by discret, 250 | ///- app_key: a unique identifier for the application that **cannot not** change once the application is in produciton 251 | ///- key_material: a master secret that will be used wit the app_key to derive all the secret required by discret 252 | ///- data_folder: where data is stored 253 | ///- configuration: the configuration stucture 254 | pub async fn new( 255 | datamodel: &str, 256 | app_key: &str, 257 | key_material: &[u8; 32], 258 | data_folder: PathBuf, 259 | configuration: Configuration, 260 | ) -> std::result::Result { 261 | let mut hardware_file = data_folder.clone(); 262 | hardware_file.push("hardware_fingerprint.bin"); 263 | let hardware_fingerprint = HardwareFingerprint::get(&hardware_file).unwrap(); 264 | let meeting_secret_key = 265 | derive_key(&format!("{}{}", "MEETING_SECRET", app_key,), key_material); 266 | let meeting_secret = MeetingSecret::new(meeting_secret_key); 267 | 268 | let pub_key = meeting_secret.public_key(); 269 | let public_key = pub_key.as_bytes(); 270 | 271 | let event_service: EventService = EventService::new(); 272 | let (database_service, verifying_key, private_room_id) = GraphDatabaseService::start( 273 | app_key, 274 | datamodel, 275 | key_material, 276 | public_key, 277 | data_folder.clone(), 278 | &configuration, 279 | event_service.clone(), 280 | ) 281 | .await?; 282 | 283 | let verify_service = SignatureVerificationService::start(configuration.parallelism); 284 | 285 | let params = DiscretParams { 286 | app_key: app_key.to_string(), 287 | verifying_key, 288 | private_room_id, 289 | hardware_fingerprint, 290 | configuration, 291 | }; 292 | 293 | let services = DiscretServices { 294 | events: event_service, 295 | database: database_service, 296 | signature_verification: verify_service, 297 | }; 298 | 299 | let peers = PeerConnectionService::start(¶ms, &services, meeting_secret).await?; 300 | 301 | Ok(Self { 302 | params, 303 | services, 304 | peers, 305 | }) 306 | } 307 | 308 | /// 309 | /// Performs a Deletion query 310 | /// 311 | pub async fn delete(&self, d: &str, p: Option) -> std::result::Result<(), Error> { 312 | match self.services.database.delete(d, p).await { 313 | Ok(_) => Ok(()), 314 | Err(e) => Err(e.into()), 315 | } 316 | } 317 | 318 | /// 319 | /// Performs a mutation query and returns the inserted tuple in a JSON String 320 | /// 321 | pub async fn mutate( 322 | &self, 323 | m: &str, 324 | p: Option, 325 | ) -> std::result::Result { 326 | Ok(self.services.database.mutate(m, p).await?) 327 | } 328 | 329 | /// 330 | /// Allow to send a stream of mutation. 331 | /// 332 | /// Usefull for batch insertion as you do have to wait for the mutation to finished before sending another. 333 | /// 334 | /// The receiver retrieve an internal representation of the mutation query to avoid the performance cost of creating the JSON result, wich is probably unecessary when doing batch insert. 335 | /// To get the JSON, call the MutationQuery.result() method 336 | /// 337 | pub fn mutation_stream(&self) -> (mpsc::Sender<(String, Option)>, MutateReceiver) { 338 | self.services.database.mutation_stream() 339 | } 340 | 341 | /// 342 | /// Perform a query to retrieve results from the database. 343 | /// returns the result in a JSON object 344 | /// 345 | pub async fn query( 346 | &self, 347 | q: &str, 348 | p: Option, 349 | ) -> std::result::Result { 350 | Ok(self.services.database.query(q, p).await?) 351 | } 352 | 353 | /// 354 | /// Create an invitation 355 | /// - default_room: once the inviation is accepted, the new Peer will be granted access to this room. 356 | /// 357 | /// The returned byte array have to be sent manually to another peer. 358 | /// 359 | pub async fn invite(&self, default_room: Option) -> Result> { 360 | let (reply, receive) = oneshot::channel::>>(); 361 | let _ = self 362 | .peers 363 | .sender 364 | .send(PeerConnectionMessage::CreateInvite(default_room, reply)) 365 | .await; 366 | receive.await? 367 | } 368 | 369 | /// 370 | /// Accept an invitation 371 | /// Once an invitation is accepted, the two peers will be able to discover themselves and start exchanging data 372 | /// 373 | pub async fn accept_invite(&self, invitation: Vec) -> std::result::Result<(), Error> { 374 | let _ = self 375 | .peers 376 | .sender 377 | .send(PeerConnectionMessage::AcceptInvite(invitation)) 378 | .await; 379 | 380 | Ok(()) 381 | } 382 | 383 | /// 384 | /// This is is your Public identity. 385 | /// 386 | /// It is derived from the provided key_material and app_key. 387 | /// 388 | /// Every data you create will be signed using the associated signing_key, and 389 | /// other peers will use this verifying key to ensure the integrity of the data 390 | /// 391 | pub fn verifying_key(&self) -> String { 392 | base64_encode(&self.params.verifying_key) 393 | } 394 | 395 | /// 396 | /// This special room is used internally to store system data. 397 | /// you are allowed to used it to store any kind of private data that will only be synchronized with your devices. 398 | /// 399 | pub fn private_room(&self) -> String { 400 | base64_encode(&self.params.private_room_id) 401 | } 402 | 403 | /// 404 | /// Subscribe for the event queue 405 | /// 406 | pub async fn subscribe_for_events(&self) -> broadcast::Receiver { 407 | self.services.events.subcribe().await 408 | } 409 | 410 | /// 411 | /// Update the existing data model definition with a new one. 412 | /// 413 | /// returns the JSON representation of the updated datamodel. 414 | /// 415 | /// Can be usefull to create a data model editor. 416 | /// 417 | pub async fn update_data_model(&self, datamodel: &str) -> std::result::Result { 418 | Ok(self.services.database.update_data_model(datamodel).await?) 419 | } 420 | 421 | /// 422 | /// Provide a JSON representation of the datamodel 423 | /// 424 | /// The JSON contains the model plain text along with the internal datamodel representation. 425 | /// 426 | /// Can be usefull to create a data model editor. 427 | /// 428 | pub async fn data_model(&self) -> std::result::Result { 429 | Ok(self.services.database.datamodel().await?) 430 | } 431 | } 432 | 433 | struct BlockingRuntime { 434 | rt: Option, 435 | } 436 | impl BlockingRuntime { 437 | pub fn new() -> Self { 438 | Self { rt: None } 439 | } 440 | pub fn rt(&mut self) -> std::result::Result<&Runtime, Error> { 441 | if self.rt.is_none() { 442 | self.rt = Some( 443 | tokio::runtime::Builder::new_multi_thread() 444 | .enable_all() 445 | .build()?, 446 | ); 447 | } 448 | Ok(self.rt.as_ref().unwrap()) 449 | } 450 | } 451 | 452 | lazy_static::lazy_static! { 453 | static ref TOKIO_BLOCKING: Arc> = 454 | Arc::new(Mutex::new(BlockingRuntime::new())); 455 | } 456 | /// 457 | /// The main entry point for the Discret Library, with a blocking API 458 | /// Provides a blocking API 459 | /// 460 | #[derive(Clone)] 461 | pub struct DiscretBlocking { 462 | discret: Discret, 463 | } 464 | impl DiscretBlocking { 465 | /// Starts the Discret engine with the following parameters: 466 | ///- datamodel: define the data types that can be used by discret, 467 | ///- app_key: a unique identifier for the application that **cannot not** change once the application is in produciton 468 | ///- key_material: a master secret that will be used wit the app_key to derive all the secret required by discret 469 | ///- data_folder: where data is stored 470 | ///- configuration: the configuration stucture 471 | pub fn new( 472 | datamodel: &str, 473 | app_key: &str, 474 | key_material: &[u8; 32], 475 | data_folder: PathBuf, 476 | configuration: Configuration, 477 | ) -> std::result::Result { 478 | let discret = TOKIO_BLOCKING.lock().unwrap().rt()?.block_on(Discret::new( 479 | datamodel, 480 | app_key, 481 | key_material, 482 | data_folder, 483 | configuration, 484 | ))?; 485 | 486 | Ok(Self { discret }) 487 | } 488 | 489 | /// 490 | /// Performs a Deletion query 491 | /// 492 | pub fn delete(&self, d: &str, p: Option) -> std::result::Result<(), Error> { 493 | TOKIO_BLOCKING 494 | .lock() 495 | .unwrap() 496 | .rt()? 497 | .block_on(self.discret.delete(d, p)) 498 | } 499 | 500 | /// 501 | /// Performs a mutation query and returns the inserted tuple in a JSON String 502 | /// 503 | pub fn mutate(&self, m: &str, p: Option) -> std::result::Result { 504 | TOKIO_BLOCKING 505 | .lock() 506 | .unwrap() 507 | .rt()? 508 | .block_on(self.discret.mutate(m, p)) 509 | } 510 | 511 | /// 512 | /// Allow to send a stream of mutation. 513 | /// 514 | /// Usefull for batch insertion as you do have to wait for the mutation to finished before sending another. 515 | /// 516 | /// The receiver retrieve an internal representation of the mutation query to avoid the performance cost of creating the JSON result, wich is probably unecessary when doing batch insert. 517 | /// To get the JSON, call the MutationQuery.result() method 518 | /// 519 | pub fn mutation_stream(&self) -> (mpsc::Sender<(String, Option)>, MutateReceiver) { 520 | self.discret.mutation_stream() 521 | } 522 | 523 | /// 524 | /// Perform a query to retrieve results from the database. 525 | /// returns the result in a JSON object 526 | /// 527 | pub fn query(&self, q: &str, p: Option) -> std::result::Result { 528 | TOKIO_BLOCKING 529 | .lock() 530 | .unwrap() 531 | .rt()? 532 | .block_on(self.discret.query(q, p)) 533 | } 534 | 535 | /// 536 | /// Create an invitation 537 | /// - default_room: once the inviation is accepted, the new Peer will be granted access to this room. 538 | /// 539 | /// The returned byte array have to be sent manually to another peer. 540 | /// 541 | pub async fn invite(&self, default_room: Option) -> Result> { 542 | TOKIO_BLOCKING 543 | .lock() 544 | .unwrap() 545 | .rt()? 546 | .block_on(self.discret.invite(default_room)) 547 | } 548 | 549 | /// 550 | /// Accept an invitation 551 | /// Once an invitation is accepted, the two peers will be able to discover themselves and start exchanging data 552 | /// 553 | pub async fn accept_invite(&self, invitation: Vec) -> std::result::Result<(), Error> { 554 | TOKIO_BLOCKING 555 | .lock() 556 | .unwrap() 557 | .rt()? 558 | .block_on(self.discret.accept_invite(invitation)) 559 | } 560 | 561 | /// 562 | /// This is is your Public identity. 563 | /// 564 | /// It is derived from the provided key_material and app_key. 565 | /// 566 | /// Every data you create will be signed using the associated signing_key, and 567 | /// other peers will use this verifying key to ensure the integrity of the data 568 | /// 569 | pub fn verifying_key(&self) -> String { 570 | self.discret.verifying_key() 571 | } 572 | /// 573 | /// This special room is used internally to store system data. 574 | /// you are allowed to used it to store any kind of private data that will only be synchronized with your devices. 575 | /// 576 | pub fn private_room(&self) -> String { 577 | self.discret.private_room() 578 | } 579 | 580 | /// 581 | /// Subscribe for the event queue 582 | /// 583 | pub fn subscribe_for_events(&self) -> broadcast::Receiver { 584 | TOKIO_BLOCKING 585 | .lock() 586 | .unwrap() 587 | .rt() 588 | .unwrap() 589 | .block_on(self.discret.subscribe_for_events()) 590 | } 591 | 592 | /// 593 | /// Update the existing data model definition with a new one. 594 | /// 595 | /// returns the JSON representation of the updated datamodel. 596 | /// 597 | /// Can be usefull to create a data model editor. 598 | /// 599 | pub fn update_data_model(&self, datamodel: &str) -> std::result::Result { 600 | TOKIO_BLOCKING 601 | .lock() 602 | .unwrap() 603 | .rt()? 604 | .block_on(self.discret.update_data_model(datamodel)) 605 | } 606 | 607 | /// 608 | /// Provide a JSON representation of the datamodel 609 | /// 610 | /// The JSON contains the model plain text along with the internal datamodel representation. 611 | /// 612 | /// Can be usefull to create a data model editor. 613 | /// 614 | pub fn data_model(&self) -> std::result::Result { 615 | TOKIO_BLOCKING 616 | .lock() 617 | .unwrap() 618 | .rt()? 619 | .block_on(self.discret.data_model()) 620 | } 621 | } 622 | -------------------------------------------------------------------------------- /src/event_service.rs: -------------------------------------------------------------------------------- 1 | use std::sync::Arc; 2 | 3 | use tokio::sync::{broadcast, mpsc, oneshot}; 4 | 5 | use crate::{ 6 | base64_encode, 7 | database::{room::Room, DataModification}, 8 | security::Uid, 9 | }; 10 | 11 | pub enum EventServiceMessage { 12 | Subscribe(oneshot::Sender>), 13 | DataChanged(DataModification), 14 | RoomModified(Room), 15 | PeerConnected(Vec, i64, Uid), 16 | PeerDisconnected(Vec, i64, Uid), 17 | RoomSynchronized(Uid), 18 | PendingPeer(), 19 | PendingHardware(), 20 | } 21 | 22 | /// 23 | /// The list of events that are sent by Discret 24 | /// 25 | #[derive(Clone)] 26 | pub enum Event { 27 | /// 28 | /// This event is triggered whenever data is modified or inserted. Data is inserted/deleted in batches and this events describes each batch. 29 | /// **data_modification** constains a *HashMap*: 30 | /// - the key is the identifier of the *Rooms* that have been modified 31 | /// - the data contains the modified Entity name and the mutation days (date without hour:minutes:second). 32 | DataChanged(Arc), 33 | 34 | /// 35 | /// This event is triggered when a *Room* is modified. 36 | /// 37 | RoomModified(Arc), 38 | 39 | /// This event is triggered when a peer has connected successfully to your device. 40 | /// - **verifying_key**: the peer verifying key, 41 | /// - **date**: the connection date, 42 | ///- **connection_id**: the unique identifier of the connection 43 | PeerConnected(Vec, i64, String), 44 | 45 | /// This event is triggered when a peer have been disconnected 46 | /// - **verifying_key**: the peer verifying key, 47 | /// - **date**: the connection date, 48 | /// - **connection_id**: the unique identifier of the connection 49 | PeerDisconnected(Vec, i64, String), 50 | 51 | /// This event is triggered when a *Room* has been synchronized. 52 | /// - **room_id**: the *Room* identifier 53 | RoomSynchronized(String), 54 | 55 | /// This event is triggered when a new peer is found when synchronising a **Room**. 56 | PendingPeer(), 57 | 58 | /// This event is triggered when a new device is detected. 59 | PendingHardware(), 60 | } 61 | 62 | #[derive(Clone)] 63 | pub struct EventService { 64 | pub sender: mpsc::Sender, 65 | } 66 | impl EventService { 67 | pub fn new() -> Self { 68 | let (sender, mut receiver) = mpsc::channel(100); 69 | 70 | let (broadcast, _) = broadcast::channel(16); 71 | 72 | tokio::spawn(async move { 73 | while let Some(msg) = receiver.recv().await { 74 | match msg { 75 | EventServiceMessage::Subscribe(reply) => { 76 | let _ = reply.send(broadcast.subscribe()); 77 | } 78 | EventServiceMessage::DataChanged(res) => { 79 | let _ = broadcast.send(Event::DataChanged(Arc::new(res))); 80 | } 81 | EventServiceMessage::RoomModified(room) => { 82 | let _ = broadcast.send(Event::RoomModified(Arc::new(room))); 83 | } 84 | EventServiceMessage::PeerConnected(verifying_key, date, connection_id) => { 85 | let _ = broadcast.send(Event::PeerConnected( 86 | verifying_key, 87 | date, 88 | base64_encode(&connection_id), 89 | )); 90 | } 91 | EventServiceMessage::PeerDisconnected(verifying_key, date, connection_id) => { 92 | let _ = broadcast.send(Event::PeerDisconnected( 93 | verifying_key, 94 | date, 95 | base64_encode(&connection_id), 96 | )); 97 | } 98 | EventServiceMessage::RoomSynchronized(room) => { 99 | let _ = broadcast.send(Event::RoomSynchronized(base64_encode(&room))); 100 | } 101 | EventServiceMessage::PendingPeer() => { 102 | let _ = broadcast.send(Event::PendingPeer()); 103 | } 104 | EventServiceMessage::PendingHardware() => { 105 | let _ = broadcast.send(Event::PendingHardware()); 106 | } 107 | }; 108 | } 109 | }); 110 | 111 | Self { sender } 112 | } 113 | 114 | pub async fn subcribe(&self) -> broadcast::Receiver { 115 | let (sender, receiver) = oneshot::channel::>(); 116 | let _ = self 117 | .sender 118 | .send(EventServiceMessage::Subscribe(sender)) 119 | .await; 120 | 121 | receiver.await.unwrap() 122 | } 123 | 124 | pub async fn notify(&self, msg: EventServiceMessage) { 125 | let _ = self.sender.send(msg).await; 126 | } 127 | } 128 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | //! Discret: Create local first, peer to peer application (P2P) using a GraphQL inspired API 2 | //! 3 | //! *Discret* hides the complexity of peer to peer networks and reduces it to a data access problem. 4 | //! 5 | //! It provides the following features: 6 | //! - A database layer based on SQLite that is managed using a GraphQL inspired API, 7 | //! - An authentication and authorization layer to define who can access data, 8 | //! - A Peer to Peer layer that allows you to invite Peers. 9 | //! 10 | //! *Discret* will automatically synchronize your data with other peers, based on the access rights you have defined. 11 | //! 12 | //! More details and tutorials are available in the [documentation site](https://discretlib.github.io/doc/) 13 | //! 14 | //! # Example 15 | //! The following example creates a very basic chat application. If you build and run this program on several different folder or local network devices 16 | //! you should be able to chat with yourself. 17 | //! ```ignore 18 | //! use std::{io, path::PathBuf}; 19 | //! use discret::{ 20 | //! derive_pass_phrase, zero_uid, Configuration, Discret, 21 | //! Parameters, ParametersAdd, ResultParser, 22 | //! }; 23 | //! use serde::Deserialize; 24 | //! 25 | //! //the application unique identifier 26 | //! const APPLICATION_KEY: &str = "github.com/discretlib/rust_example_simple_chat"; 27 | //! 28 | //! #[tokio::main] 29 | //! async fn main() { 30 | //! //define a datamodel 31 | //! let model = "chat { 32 | //! Message{ 33 | //! content:String 34 | //! } 35 | //! }"; 36 | //! //this struct is used to parse the query result 37 | //! #[derive(Deserialize)] 38 | //! struct Chat { 39 | //! pub id: String, 40 | //! pub mdate: i64, 41 | //! pub content: String, 42 | //! } 43 | //! 44 | //! let path: PathBuf = "test_data".into(); //where data is stored 45 | //! 46 | //! //used to derives all necessary secrets 47 | //! let key_material: [u8; 32] = derive_pass_phrase("my login", "my password"); 48 | //! 49 | //! //start the discret application 50 | //! let app: Discret = Discret::new( 51 | //! model, 52 | //! APPLICATION_KEY, 53 | //! &key_material, 54 | //! path, 55 | //! Configuration::default(), 56 | //! ) 57 | //! .await 58 | //! .unwrap(); 59 | //! 60 | //! //listen for events 61 | //! let mut events = app.subscribe_for_events().await; 62 | //! let event_app: Discret = app.clone(); 63 | //! tokio::spawn(async move { 64 | //! let mut last_date = 0; 65 | //! let mut last_id = zero_uid(); 66 | //! 67 | //! let private_room: String = event_app.private_room(); 68 | //! while let Ok(event) = events.recv().await { 69 | //! match event { 70 | //! //triggered when data is modified 71 | //! discret::Event::DataChanged(_) => { 72 | //! let mut param = Parameters::new(); 73 | //! param.add("mdate", last_date).unwrap(); 74 | //! param.add("id", last_id.clone()).unwrap(); 75 | //! param.add("room_id", private_room.clone()).unwrap(); 76 | //! 77 | //! //get the latest data, the result is in the JSON format 78 | //! let result: String = event_app 79 | //! .query( 80 | //! "query { 81 | //! res: chat.Message( 82 | //! order_by(mdate asc, id asc), 83 | //! after($mdate, $id), 84 | //! room_id = $room_id 85 | //! ) { 86 | //! id 87 | //! mdate 88 | //! content 89 | //! } 90 | //! }", 91 | //! Some(param), 92 | //! ) 93 | //! .await 94 | //! .unwrap(); 95 | //! let mut query_result = ResultParser::new(&result).unwrap(); 96 | //! let res: Vec = query_result.take_array("res").unwrap(); 97 | //! for msg in res { 98 | //! last_date = msg.mdate; 99 | //! last_id = msg.id; 100 | //! println!("you said: {}", msg.content); 101 | //! } 102 | //! } 103 | //! _ => {} //ignores other events 104 | //! } 105 | //! } 106 | //! }); 107 | //! 108 | //! //data is inserted in your private room 109 | //! let private_room: String = app.private_room(); 110 | //! let stdin = io::stdin(); 111 | //! let mut line = String::new(); 112 | //! println!("{}", "Write Something!"); 113 | //! loop { 114 | //! stdin.read_line(&mut line).unwrap(); 115 | //! if line.starts_with("/q") { 116 | //! break; 117 | //! } 118 | //! line.pop(); 119 | //! let mut params = Parameters::new(); 120 | //! params.add("message", line.clone()).unwrap(); 121 | //! params.add("room_id", private_room.clone()).unwrap(); 122 | //! app.mutate( 123 | //! "mutate { 124 | //! chat.Message { 125 | //! room_id:$room_id 126 | //! content: $message 127 | //! } 128 | //! }", 129 | //! Some(params), 130 | //! ) 131 | //! .await 132 | //! .unwrap(); 133 | //! line.clear(); 134 | //! } 135 | //! } 136 | //! ``` 137 | //! 138 | //! # Features 139 | //! *Discret* provides a blocking (DiscretBlocking) and a non blocking (Discret) API. 140 | //! 141 | //! On local network, peer connection happens without requiring any server. 142 | //! For peer to peer connection over the Internet, a discovery server is needed to allow peers to discover each others. 143 | //! The discret lib provides an implementation of the discovery server named Beacon. 144 | //! 145 | //! The library provides strong security features out of the box: 146 | //! - data is encrypted at rest by using the SQLCipher database 147 | //! - encrypted communication using the QUIC protocol 148 | //! - data integrity: each rows is signed with the peer signing key, making it very hard to synchronize bad data 149 | //! - access control via Rooms 150 | //! 151 | //! # Limitations 152 | //! As data lives on your devices, Discret should only be used for applications with data generated by "real person", with hundreds of peers at most. 153 | //! It is not suited for large scale applications and communities with thousands of peoples. 154 | //! 155 | //! It currently only supports text data but supports for file synchronization is planned. 156 | //! 157 | //! Connection over the internet is not 100% guaranteed to work, because certain types of enterprise firewalls will block the connection attempts. 158 | //! 159 | //! Please, be warned that P2P connections leaks your IP adress and should only be used with trusted peer. 160 | //! This leak exposes you to the following threats: 161 | //! - Distributed denial of service (DDOS) 162 | //! - Leak of your "Real World" location via geolocation services. 163 | //! - State sponsored surveillance: A state watching the network could determine which peer connect to which, giving a lot of knowledge about your social network. 164 | //! 165 | //! # Platform Support 166 | //! - Linux: Tested 167 | //! - Windows: Tested 168 | //! - macOS: not tested, should work 169 | //! - Android: works on arch64 architecture. Architectures i686 and x86_64 have some low level linker issues when working with Flutter. 170 | //! - iOS: not tested 171 | //! 172 | #![forbid(unsafe_code)] 173 | #[allow(clippy::too_many_arguments)] 174 | //#![allow(dead_code)] 175 | mod configuration; 176 | mod database; 177 | mod date_utils; 178 | mod discret; 179 | mod event_service; 180 | mod network; 181 | mod peer_connection_service; 182 | mod security; 183 | mod signature_verification_service; 184 | mod synchronisation; 185 | 186 | use thiserror::Error; 187 | 188 | type Result = std::result::Result; 189 | 190 | pub use crate::{ 191 | configuration::{BeaconConfig, Configuration}, 192 | database::{ 193 | query_language::parameter::{Parameters, ParametersAdd}, 194 | room::Room, 195 | system_entities::DefaultRoom, 196 | DataModification, ResultParser, 197 | }, 198 | discret::{database_exists, zero_uid, Discret, DiscretBlocking}, 199 | event_service::Event, 200 | network::beacon::Beacon, 201 | security::{ 202 | base64_decode, base64_encode, derive_pass_phrase, generate_x509_certificate, hash, 203 | random_domain_name, 204 | }, 205 | }; 206 | 207 | /// 208 | /// Defines every errors that can be triggered by the discret lib 209 | /// 210 | #[derive(Error, Debug)] 211 | pub enum Error { 212 | #[error(transparent)] 213 | Security(#[from] crate::security::Error), 214 | 215 | #[error(transparent)] 216 | Database(#[from] crate::database::Error), 217 | 218 | #[error(transparent)] 219 | Network(#[from] crate::network::Error), 220 | 221 | #[error(transparent)] 222 | Parsing(#[from] crate::database::query_language::Error), 223 | 224 | #[error(transparent)] 225 | JSON(#[from] serde_json::Error), 226 | 227 | #[error(transparent)] 228 | TokioJoin(#[from] tokio::task::JoinError), 229 | 230 | #[error(transparent)] 231 | Timeout(#[from] tokio::time::error::Elapsed), 232 | 233 | #[error(transparent)] 234 | Bincode(#[from] Box), 235 | 236 | #[error(transparent)] 237 | Io(#[from] std::io::Error), 238 | 239 | #[error(transparent)] 240 | OneshotRecv(#[from] tokio::sync::oneshot::error::RecvError), 241 | 242 | #[error(transparent)] 243 | Synch(#[from] crate::synchronisation::Error), 244 | 245 | #[error(transparent)] 246 | InvalidAdress(#[from] std::net::AddrParseError), 247 | 248 | #[error("Invalid account")] 249 | InvalidAccount, 250 | 251 | #[error("An account allready exists")] 252 | AccountExists, 253 | 254 | #[error("Provider signer is not allowed to sign the datamodel")] 255 | InvalidSigner(), 256 | 257 | #[error("Application Template cannot be updated with a template with another id")] 258 | InvalidUpdateTemplate(), 259 | 260 | #[error("tokio send error")] 261 | SendError(String), 262 | 263 | #[error("{0}")] 264 | ChannelError(String), 265 | 266 | #[error("Timeout occured while sending {0}")] 267 | TimeOut(String), 268 | 269 | #[error("Remote Room did not sent back a room definition {0}")] 270 | RoomUnknow(String), 271 | 272 | #[error("{0} Edges where rejected during synchronisation of room: {1} at date: {2} ")] 273 | EdgeRejected(usize, String, i64), 274 | 275 | #[error("{0} Nodes where rejected during synchronisation of room: {1} at date: {2}")] 276 | NodeRejected(usize, String, i64), 277 | 278 | #[error("invalid certificate hash: '{0}'")] 279 | InvalidCertificateHash(String), 280 | 281 | #[error("Connection to Beacon {0} failed, reason: {1}")] 282 | BeaconConnectionFailed(String, String), 283 | 284 | #[error("{0}")] 285 | InvalidConnection(String), 286 | 287 | #[error("{0}")] 288 | SecurityViolation(String), 289 | 290 | #[error("{0}")] 291 | InvalidInvite(String), 292 | 293 | #[error("{0}")] 294 | Unsupported(String), 295 | } 296 | 297 | #[cfg(test)] 298 | pub mod test { 299 | 300 | use log::{Level, Metadata, Record}; 301 | struct SimpleLogger; 302 | 303 | impl log::Log for SimpleLogger { 304 | fn enabled(&self, metadata: &Metadata) -> bool { 305 | metadata.level() <= Level::Info 306 | } 307 | 308 | fn log(&self, record: &Record) { 309 | if self.enabled(record.metadata()) { 310 | println!("{} - {}", record.level(), record.args()); 311 | } 312 | } 313 | 314 | fn flush(&self) {} 315 | } 316 | 317 | use log::LevelFilter; 318 | 319 | static LOGGER: SimpleLogger = SimpleLogger; 320 | 321 | pub fn init_log() { 322 | log::set_logger(&LOGGER) 323 | .map(|()| log::set_max_level(LevelFilter::Debug)) 324 | .unwrap(); 325 | } 326 | } 327 | -------------------------------------------------------------------------------- /src/network/beacon.rs: -------------------------------------------------------------------------------- 1 | #[cfg(feature = "log")] 2 | use log::error; 3 | use std::{ 4 | collections::{HashMap, HashSet}, 5 | net::SocketAddr, 6 | sync::Arc, 7 | }; 8 | 9 | use quinn::{crypto::rustls::QuicServerConfig, Connection, Endpoint, Incoming, SendStream, VarInt}; 10 | use rustls::pki_types::{CertificateDer, PrivatePkcs8KeyDer}; 11 | use serde::{Deserialize, Serialize}; 12 | use tokio::{ 13 | io::{AsyncReadExt, AsyncWriteExt}, 14 | sync::Mutex, 15 | }; 16 | 17 | use crate::security::MeetingToken; 18 | 19 | use super::{ 20 | peer_manager::MAX_MESSAGE_SIZE, shared_buffers::SharedBuffers, Announce, AnnounceHeader, 21 | ALPN_QUIC_HTTP, 22 | }; 23 | 24 | #[derive(Serialize, Deserialize)] 25 | pub enum BeaconMessage { 26 | InitiateConnection(AnnounceHeader, SocketAddr, MeetingToken), 27 | } 28 | 29 | /// 30 | /// Provides a Beacon service that allow peers to discover each others on the Internet 31 | /// 32 | pub struct Beacon {} 33 | impl Beacon { 34 | /// 35 | /// starts the service 36 | /// 37 | pub fn start( 38 | ipv4_port: u16, 39 | der: Vec, 40 | pks_der: Vec, 41 | allow_same_ip: bool, 42 | ) -> Result { 43 | let shared_buffers = Arc::new(SharedBuffers::new()); 44 | 45 | let ipv4_addr: SocketAddr = format!("0.0.0.0:{}", ipv4_port).parse()?; 46 | let ipv4_endpoint = Self::enpoint(ipv4_addr, der.clone(), pks_der.clone())?; 47 | Self::start_endpoint( 48 | ipv4_endpoint, 49 | shared_buffers.clone(), 50 | MAX_MESSAGE_SIZE, 51 | allow_same_ip, 52 | ); 53 | 54 | Ok(Self {}) 55 | } 56 | 57 | fn enpoint(addr: SocketAddr, der: Vec, pks_der: Vec) -> Result { 58 | let cert_der = CertificateDer::from(der); 59 | let priv_key = PrivatePkcs8KeyDer::from(pks_der); 60 | let mut server_crypto = rustls::ServerConfig::builder() 61 | .with_no_client_auth() 62 | .with_single_cert(vec![cert_der], priv_key.into())?; 63 | 64 | server_crypto.alpn_protocols = ALPN_QUIC_HTTP.iter().map(|&x| x.into()).collect(); 65 | 66 | let mut server_config = 67 | quinn::ServerConfig::with_crypto(Arc::new(QuicServerConfig::try_from(server_crypto)?)); 68 | let transport_config = Arc::get_mut(&mut server_config.transport).unwrap(); 69 | transport_config.max_concurrent_uni_streams(0_u8.into()); 70 | 71 | Ok(Endpoint::server(server_config, addr)?) 72 | } 73 | 74 | fn start_endpoint( 75 | endpoint: Endpoint, 76 | shared_buffers: Arc, 77 | max_buffer_size: usize, 78 | allow_same_ip: bool, 79 | ) { 80 | tokio::spawn(async move { 81 | let meeting_point: Arc> = Arc::new(Mutex::new(MeetingPoint { 82 | meeting: HashMap::new(), 83 | buffer: Vec::new(), 84 | })); 85 | 86 | while let Some(incoming) = endpoint.accept().await { 87 | let shared_buff = shared_buffers.clone(); 88 | let meeting_point = meeting_point.clone(); 89 | tokio::spawn(async move { 90 | let new_conn = Self::start_accepted( 91 | incoming, 92 | shared_buff, 93 | max_buffer_size, 94 | meeting_point, 95 | allow_same_ip, 96 | ) 97 | .await; 98 | if let Err(_e) = new_conn { 99 | #[cfg(feature = "log")] 100 | error!("Beacon - start_accepted, Error: {_e}"); 101 | } 102 | }); 103 | } 104 | }); 105 | } 106 | 107 | async fn start_accepted( 108 | incoming: Incoming, 109 | shared_buffers: Arc, 110 | max_buffer_size: usize, 111 | meeting_point: Arc>, 112 | allow_same_ip: bool, 113 | ) -> Result<(), super::Error> { 114 | let new_conn = incoming.await?; 115 | let (send, mut recv) = new_conn.accept_bi().await?; 116 | 117 | recv.read_u8().await?; 118 | 119 | let sbuff = shared_buffers.clone(); 120 | tokio::spawn(async move { 121 | let id = new_conn.stable_id(); 122 | let conn_info: Arc> = Arc::new(Mutex::new(ConnectionInfo { 123 | conn: new_conn, 124 | sender: send, 125 | header: None, 126 | })); 127 | let mut header_initialised = false; 128 | let mut last_tokens: HashSet = HashSet::new(); 129 | loop { 130 | let len = recv.read_u32().await; 131 | if len.is_err() { 132 | break; 133 | } 134 | let len: usize = len.unwrap().try_into().unwrap(); 135 | if len > max_buffer_size { 136 | break; 137 | } 138 | 139 | let mut buffer = sbuff.take(); 140 | 141 | if buffer.len() < len { 142 | buffer.resize(len, 0); 143 | } 144 | 145 | let answer_bytes = recv.read_exact(&mut buffer[0..len]).await; 146 | if answer_bytes.is_err() { 147 | sbuff.release(buffer); 148 | break; 149 | } 150 | let announce: Result> = 151 | bincode::deserialize(&buffer[0..len]); 152 | sbuff.release(buffer); 153 | 154 | if announce.is_err() { 155 | break; 156 | } 157 | 158 | let announce = announce.unwrap(); 159 | if !header_initialised { 160 | let header = announce.header; 161 | 162 | let mut info_lock = conn_info.lock().await; 163 | info_lock.header = Some(header); 164 | drop(info_lock); 165 | 166 | header_initialised = true; 167 | } 168 | 169 | let new_tokens: HashSet = 170 | HashSet::from_iter(announce.tokens.into_iter()); 171 | 172 | let to_remove: HashSet<&MeetingToken> = 173 | last_tokens.difference(&new_tokens).collect(); 174 | 175 | let to_add: HashSet<&MeetingToken> = new_tokens.difference(&last_tokens).collect(); 176 | 177 | let mut meeting = meeting_point.lock().await; 178 | 179 | meeting.remove_tokens(id, &to_remove).await; 180 | meeting 181 | .add_tokens(id, &to_add, &conn_info, allow_same_ip) 182 | .await; 183 | 184 | last_tokens = new_tokens; 185 | } 186 | let mut to_remove: HashSet<&MeetingToken> = HashSet::with_capacity(last_tokens.len()); 187 | for s in &last_tokens { 188 | to_remove.insert(s); 189 | } 190 | let mut meeting = meeting_point.lock().await; 191 | meeting.remove_tokens(id, &to_remove).await; 192 | }); 193 | 194 | Ok(()) 195 | } 196 | } 197 | 198 | struct MeetingPoint { 199 | meeting: HashMap>>>, 200 | buffer: Vec, 201 | } 202 | impl MeetingPoint { 203 | pub async fn add_tokens( 204 | &mut self, 205 | id: usize, 206 | tokens: &HashSet<&MeetingToken>, 207 | conn: &Arc>, 208 | allow_same_ip: bool, 209 | ) { 210 | for token in tokens { 211 | let entry = self.meeting.entry(**token).or_default(); 212 | let mut insert = true; 213 | for other_conn in entry.iter() { 214 | let mut other_peer = other_conn.lock().await; 215 | if other_peer.conn.stable_id() == id { 216 | insert = false; 217 | } else { 218 | let mut this_peer = conn.lock().await; 219 | if allow_same_ip 220 | || !other_peer 221 | .conn 222 | .remote_address() 223 | .ip() 224 | .eq(&this_peer.conn.remote_address().ip()) 225 | { 226 | let this_msg = BeaconMessage::InitiateConnection( 227 | other_peer.header.clone().unwrap(), 228 | other_peer.conn.remote_address(), 229 | **token, 230 | ); 231 | 232 | self.buffer.clear(); 233 | bincode::serialize_into::<&mut Vec, _>(&mut self.buffer, &this_msg) 234 | .unwrap(); 235 | 236 | if this_peer 237 | .sender 238 | .write_u32(self.buffer.len() as u32) 239 | .await 240 | .is_err() 241 | { 242 | this_peer.conn.close(VarInt::from_u32(1), "".as_bytes()); 243 | break; 244 | } 245 | if this_peer.sender.write_all(&self.buffer).await.is_err() { 246 | this_peer.conn.close(VarInt::from_u32(1), "".as_bytes()); 247 | break; 248 | } 249 | 250 | let other_msg = BeaconMessage::InitiateConnection( 251 | this_peer.header.clone().unwrap(), 252 | this_peer.conn.remote_address(), 253 | **token, 254 | ); 255 | self.buffer.clear(); 256 | bincode::serialize_into::<&mut Vec, _>(&mut self.buffer, &other_msg) 257 | .unwrap(); 258 | 259 | if other_peer 260 | .sender 261 | .write_u32(self.buffer.len() as u32) 262 | .await 263 | .is_err() 264 | { 265 | other_peer.conn.close(VarInt::from_u32(1), "".as_bytes()); 266 | } 267 | 268 | if other_peer.sender.write_all(&self.buffer).await.is_err() { 269 | other_peer.conn.close(VarInt::from_u32(1), "".as_bytes()); 270 | } 271 | } 272 | } 273 | } 274 | if insert { 275 | entry.push(conn.clone()) 276 | } 277 | } 278 | } 279 | 280 | pub async fn remove_tokens(&mut self, id: usize, tokens: &HashSet<&MeetingToken>) { 281 | for token in tokens { 282 | if let Some(entry) = self.meeting.get_mut(*token) { 283 | let mut index = -1; 284 | for (i, peer) in entry.iter().enumerate() { 285 | let peer = peer.lock().await; 286 | if peer.conn.stable_id() == id { 287 | index = i as i32; 288 | break; 289 | } 290 | } 291 | if index >= 0 { 292 | entry.remove(index as usize); 293 | } 294 | } 295 | } 296 | } 297 | } 298 | 299 | struct ConnectionInfo { 300 | conn: Connection, 301 | sender: SendStream, 302 | header: Option, 303 | } 304 | -------------------------------------------------------------------------------- /src/network/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod beacon; 2 | pub mod endpoint; 3 | pub mod multicast; 4 | pub mod peer_manager; 5 | pub mod shared_buffers; 6 | use serde::{Deserialize, Serialize}; 7 | 8 | use std::io; 9 | use thiserror::Error; 10 | 11 | use crate::security::{MeetingToken, Uid}; 12 | 13 | //Application-Layer Protocol Negotiation (ALPN). Use the tag used for HTTP/3 over QUIC v1 14 | pub const ALPN_QUIC_HTTP: &[&[u8]] = &[b"h3"]; 15 | 16 | #[derive(Serialize, Deserialize, Clone)] 17 | pub struct ConnectionInfo { 18 | pub endpoint_id: Uid, 19 | pub remote_id: Uid, 20 | pub conn_id: Uid, 21 | pub meeting_token: MeetingToken, 22 | pub peer_verifying_key: Vec, 23 | } 24 | 25 | #[derive(Serialize, Deserialize, Clone)] 26 | pub struct AnnounceHeader { 27 | endpoint_id: Uid, 28 | certificate_hash: [u8; 32], 29 | signature: Vec, 30 | } 31 | impl AnnounceHeader { 32 | pub fn hash(&self) -> [u8; 32] { 33 | let mut hasher = blake3::Hasher::new(); 34 | hasher.update(&self.endpoint_id); 35 | hasher.update(&self.certificate_hash); 36 | *hasher.finalize().as_bytes() 37 | } 38 | } 39 | 40 | #[derive(Serialize, Deserialize)] 41 | pub struct Announce { 42 | pub header: AnnounceHeader, 43 | pub tokens: Vec, 44 | } 45 | 46 | #[derive(Error, Debug)] 47 | pub enum Error { 48 | #[error(transparent)] 49 | Io(#[from] io::Error), 50 | 51 | #[error(transparent)] 52 | Rustls(#[from] rustls::Error), 53 | 54 | #[error(transparent)] 55 | AddrParse(#[from] std::net::AddrParseError), 56 | 57 | #[error(transparent)] 58 | QuinnConfig(#[from] quinn::crypto::rustls::NoInitialCipherSuite), 59 | 60 | #[error(transparent)] 61 | QuinnConnect(#[from] quinn::ConnectError), 62 | 63 | #[error(transparent)] 64 | QuinnConnection(#[from] quinn::ConnectionError), 65 | 66 | #[error(transparent)] 67 | Serialisation(#[from] Box), 68 | 69 | #[error(transparent)] 70 | SocketWrite(#[from] quinn::WriteError), 71 | 72 | #[error(transparent)] 73 | SocketRead(#[from] quinn::ReadExactError), 74 | 75 | #[error(transparent)] 76 | Security(#[from] crate::security::Error), 77 | 78 | #[error(transparent)] 79 | Database(#[from] crate::database::Error), 80 | 81 | #[error("Message size {0} is to long and is ignored. Maximum allowed: {1}")] 82 | MsgSerialisationToLong(usize, usize), 83 | 84 | #[error("Message size {0} is to long and is ignored. Maximum allowed: {1}")] 85 | MsgDeserialisationToLong(usize, usize), 86 | 87 | #[error("IPV6 is not supported on this device")] 88 | IPV6NotSuported(), 89 | 90 | #[error("Failed to connect to {0} after {1} try, reason: {2}")] 91 | ConnectionFailed(String, usize, String), 92 | 93 | #[error("Invalid Stream flag: {0}")] 94 | InvalidStream(u8), 95 | 96 | #[error("One or several Streams are missing")] 97 | MissingStream(), 98 | 99 | #[error("{0}")] 100 | UnacceptableBehavior(String), 101 | 102 | #[error("{0}")] 103 | Unknown(String), 104 | } 105 | -------------------------------------------------------------------------------- /src/network/multicast.rs: -------------------------------------------------------------------------------- 1 | #[cfg(feature = "log")] 2 | use log::error; 3 | 4 | use super::{Announce, AnnounceHeader, Error}; 5 | use crate::peer_connection_service::{PeerConnectionMessage, PeerConnectionService}; 6 | use crate::security::MeetingToken; 7 | use bincode; 8 | 9 | use serde::{Deserialize, Serialize}; 10 | use socket2::{Domain, Protocol, SockAddr, Socket, Type}; 11 | use std::io; 12 | use std::net::{IpAddr, Ipv4Addr, SocketAddr}; 13 | use tokio::net::UdpSocket; 14 | use tokio::sync::mpsc; 15 | use tokio::sync::mpsc::Sender; 16 | 17 | //maximum message size 18 | const MULTICAST_MTU: usize = 4096; 19 | 20 | #[derive(Serialize, Deserialize)] 21 | pub enum MulticastMessage { 22 | Annouce(Announce, u16), 23 | InitiateConnection(AnnounceHeader, MeetingToken, u16), 24 | } 25 | 26 | //#[allow(clippy::unnecessary_unwrap)] 27 | pub async fn start_multicast_discovery( 28 | multicast_adress: SocketAddr, 29 | multicast_ipv4_interface: Ipv4Addr, 30 | peer_service: PeerConnectionService, 31 | ) -> Result, Error> { 32 | let socket_sender = new_sender(&multicast_ipv4_interface)?; 33 | let socket_listener = new_listener(multicast_adress, &multicast_ipv4_interface)?; 34 | let (sender, mut receiv) = mpsc::channel::(1); 35 | 36 | tokio::spawn(async move { 37 | let mut buffer: Vec = Vec::new(); 38 | while let Some(msg) = receiv.recv().await { 39 | buffer.clear(); 40 | let b = bincode::serialize_into(&mut buffer, &msg); 41 | match b { 42 | Ok(_) => { 43 | let error = socket_sender.send_to(&buffer, multicast_adress).await; 44 | if let Err(_e) = error { 45 | #[cfg(feature = "log")] 46 | error!("multicast send, error: {}", _e); 47 | } 48 | } 49 | Err(_e) => { 50 | #[cfg(feature = "log")] 51 | error!("multicast send, error: {}", _e) 52 | } 53 | } 54 | } 55 | }); 56 | 57 | tokio::spawn(async move { 58 | let mut buffer: [u8; MULTICAST_MTU] = [0; MULTICAST_MTU]; 59 | loop { 60 | let rec = receive(&socket_listener, &mut buffer).await; 61 | 62 | match rec { 63 | Ok((msg, adress)) => { 64 | let _ = peer_service 65 | .sender 66 | .send(PeerConnectionMessage::MulticastMessage(msg, adress)) 67 | .await; 68 | } 69 | Err(_e) => { 70 | #[cfg(feature = "log")] 71 | error!("multicast receive: {}", _e); 72 | } 73 | } 74 | } 75 | }); 76 | 77 | Ok(sender) 78 | } 79 | 80 | async fn receive( 81 | socket_listener: &UdpSocket, 82 | buffer: &mut [u8; MULTICAST_MTU], 83 | ) -> Result<(MulticastMessage, SocketAddr), Error> { 84 | let (len, remote_addr) = socket_listener 85 | .recv_from(buffer) 86 | .await 87 | .map_err(Error::from)?; 88 | 89 | let message: MulticastMessage = bincode::deserialize(&buffer[0..len])?; 90 | 91 | Ok((message, remote_addr)) 92 | } 93 | 94 | fn new_listener( 95 | multicast_adress: SocketAddr, 96 | multicast_ipv4_interface: &Ipv4Addr, 97 | ) -> io::Result { 98 | let ip_addr = multicast_adress.ip(); 99 | let socket = new_socket()?; 100 | 101 | match ip_addr { 102 | IpAddr::V4(ref v4) => { 103 | // join to the multicast address, with all interfaces 104 | socket.join_multicast_v4(v4, multicast_ipv4_interface)?; 105 | } 106 | IpAddr::V6(ref _v6) => {} //don't know how to make it work well windows 107 | }; 108 | bind_multicast(&socket, &multicast_adress, multicast_ipv4_interface)?; 109 | let socket: std::net::UdpSocket = socket.into(); 110 | Ok(UdpSocket::from_std(socket).expect("could not convert to tokio socket")) 111 | } 112 | 113 | fn new_sender(multicast_ipv4_interface: &Ipv4Addr) -> io::Result { 114 | let socket = new_socket()?; 115 | socket.set_multicast_if_v4(multicast_ipv4_interface)?; 116 | socket.bind(&SockAddr::from(SocketAddr::new( 117 | (*multicast_ipv4_interface).into(), 118 | 0, 119 | )))?; 120 | let socket: std::net::UdpSocket = socket.into(); 121 | Ok(UdpSocket::from_std(socket).expect("could not convert to tokio socket")) 122 | } 123 | 124 | // this will be common for all our sockets 125 | fn new_socket() -> io::Result { 126 | let socket = Socket::new(Domain::IPV4, Type::DGRAM, Some(Protocol::UDP))?; 127 | // socket.set_read_timeout(Some(Duration::from_millis(100)))?; 128 | socket 129 | .set_nonblocking(true) 130 | .expect("could not set socket to non blocking"); 131 | Ok(socket) 132 | } 133 | 134 | /// On Windows, unlike all Unix variants, it is improper to bind to the multicast address 135 | /// 136 | /// see https://msdn.microsoft.com/en-us/library/windows/desktop/ms737550(v=vs.85).aspx 137 | #[cfg(windows)] 138 | fn bind_multicast( 139 | socket: &Socket, 140 | addr: &SocketAddr, 141 | multicast_ipv4_interface: &Ipv4Addr, 142 | ) -> io::Result<()> { 143 | let addr = SocketAddr::new(multicast_ipv4_interface.clone().into(), addr.port()); 144 | socket.set_reuse_address(true)?; 145 | socket.bind(&socket2::SockAddr::from(addr)) 146 | } 147 | 148 | /// On unixes we bind to the multicast address 149 | #[cfg(unix)] 150 | fn bind_multicast(socket: &Socket, addr: &SocketAddr, _: &Ipv4Addr) -> io::Result<()> { 151 | socket.set_reuse_address(true)?; 152 | socket.bind(&socket2::SockAddr::from(*addr)) 153 | } 154 | 155 | #[cfg(test)] 156 | mod test { 157 | 158 | use super::*; 159 | 160 | #[tokio::test(flavor = "multi_thread")] 161 | async fn multicast_test() { 162 | let multicast_adress = SocketAddr::new(Ipv4Addr::new(224, 0, 0, 224).into(), 22401); 163 | let multicast_ipv4: Ipv4Addr = Ipv4Addr::new(0, 0, 0, 0); 164 | let socket_sender = new_sender(&multicast_ipv4).unwrap(); 165 | let socket_listener = new_listener(multicast_adress, &multicast_ipv4).unwrap(); 166 | let socket_listener2 = new_listener(multicast_adress, &multicast_ipv4).unwrap(); 167 | 168 | let first = tokio::spawn(async move { 169 | let mut buffer: [u8; 4096] = [0; 4096]; 170 | let (len, remote_addr) = socket_listener 171 | .recv_from(&mut buffer) 172 | .await 173 | .map_err(Error::from) 174 | .unwrap(); 175 | 176 | ( 177 | String::from_utf8(buffer[0..len].to_vec()).unwrap(), 178 | remote_addr, 179 | ) 180 | }); 181 | 182 | let second = tokio::spawn(async move { 183 | let mut buffer: [u8; 4096] = [0; 4096]; 184 | let (len, remote_addr) = socket_listener2 185 | .recv_from(&mut buffer) 186 | .await 187 | .map_err(Error::from) 188 | .unwrap(); 189 | 190 | ( 191 | String::from_utf8(buffer[0..len].to_vec()).unwrap(), 192 | remote_addr, 193 | ) 194 | }); 195 | 196 | let message = "Hello World".to_string(); 197 | let _ = socket_sender 198 | .send_to(message.as_bytes(), multicast_adress) 199 | .await 200 | .unwrap(); 201 | 202 | let (msg, _) = first.await.unwrap(); 203 | assert_eq!(msg, message); 204 | let (msg, _) = second.await.unwrap(); 205 | assert_eq!(msg, message); 206 | } 207 | } 208 | -------------------------------------------------------------------------------- /src/network/shared_buffers.rs: -------------------------------------------------------------------------------- 1 | use std::sync::Mutex; 2 | 3 | pub struct SharedBuffers { 4 | buffers: Mutex>>, 5 | } 6 | impl SharedBuffers { 7 | pub fn new() -> Self { 8 | Self { 9 | buffers: Mutex::new(Vec::new()), 10 | } 11 | } 12 | 13 | pub fn take(&self) -> Vec { 14 | let mut buff = self.buffers.lock().unwrap(); 15 | match buff.pop() { 16 | Some(v) => v, 17 | None => Vec::new(), 18 | } 19 | 20 | //todo 21 | } 22 | pub fn release(&self, buffer: Vec) { 23 | let mut buff = self.buffers.lock().unwrap(); 24 | buff.push(buffer) 25 | } 26 | } 27 | -------------------------------------------------------------------------------- /src/peer_connection_service.rs: -------------------------------------------------------------------------------- 1 | #[cfg(feature = "log")] 2 | use log::error; 3 | 4 | use std::{ 5 | collections::HashSet, 6 | net::{Ipv4Addr, SocketAddr}, 7 | sync::{atomic::AtomicBool, Arc}, 8 | time::Duration, 9 | }; 10 | 11 | use quinn::Connection; 12 | 13 | use crate::{ 14 | database::node::Node, 15 | date_utils::now, 16 | discret::{DiscretParams, DiscretServices}, 17 | event_service::{Event, EventServiceMessage}, 18 | network::{ 19 | endpoint::DiscretEndpoint, 20 | multicast::{self, MulticastMessage}, 21 | peer_manager::{self, PeerManager, TokenType}, 22 | Announce, AnnounceHeader, ConnectionInfo, 23 | }, 24 | security::{uid_decode, HardwareFingerprint, MeetingSecret, MeetingToken, Uid}, 25 | synchronisation::{ 26 | peer_inbound_service::{LocalPeerService, QueryService}, 27 | peer_outbound_service::{InboundQueryService, RemotePeerHandle}, 28 | room_locking_service::RoomLockService, 29 | Answer, LocalEvent, QueryProtocol, RemoteEvent, 30 | }, 31 | DefaultRoom, Result, 32 | }; 33 | use tokio::{ 34 | sync::{broadcast, mpsc, oneshot, Mutex}, 35 | time, 36 | }; 37 | 38 | pub enum PeerConnectionMessage { 39 | NewConnection( 40 | Option, 41 | ConnectionInfo, 42 | mpsc::Sender, 43 | mpsc::Receiver, 44 | mpsc::Sender, 45 | mpsc::Receiver, 46 | mpsc::Sender, 47 | mpsc::Receiver, 48 | ), 49 | PeerConnectionFailed(Uid, Uid), 50 | PeerConnected(Vec, Uid), 51 | PeerDisconnected(Vec, [u8; 32], Uid), 52 | ValidateHardware([u8; 32], HardwareFingerprint, oneshot::Sender>), 53 | InviteAccepted(TokenType, Node), 54 | NewPeer(Vec), 55 | SendAnnounce(), 56 | MulticastMessage(MulticastMessage, SocketAddr), 57 | CreateInvite(Option, oneshot::Sender>>), 58 | AcceptInvite(Vec), 59 | BeaconConnectionFailed(SocketAddr, String), 60 | BeaconConnected(SocketAddr, mpsc::Sender), 61 | BeaconDisconnected(SocketAddr), 62 | BeaconInitiateConnection(SocketAddr, AnnounceHeader, MeetingToken), 63 | } 64 | 65 | static PEER_CHANNEL_SIZE: usize = 32; 66 | 67 | /// 68 | /// Handle the creation and removal of peers 69 | /// 70 | #[derive(Clone)] 71 | pub struct PeerConnectionService { 72 | pub sender: mpsc::Sender, 73 | } 74 | impl PeerConnectionService { 75 | pub async fn start( 76 | params: &DiscretParams, 77 | services: &DiscretServices, 78 | meeting_secret: MeetingSecret, 79 | ) -> Result { 80 | let (sender, mut connection_receiver) = 81 | mpsc::channel::(PEER_CHANNEL_SIZE); 82 | let (local_event_broadcast, _) = broadcast::channel::(16); 83 | let lock_service = RoomLockService::start(params.configuration.parallelism); 84 | let peer_service = Self { sender }; 85 | let ret = peer_service.clone(); 86 | 87 | let max_buffer_size = params.configuration.max_object_size_in_kb * 1024 * 2; 88 | 89 | let endpoint = DiscretEndpoint::start( 90 | peer_service.clone(), 91 | max_buffer_size as usize, 92 | ¶ms.verifying_key, 93 | ) 94 | .await?; 95 | 96 | let multicast_discovery = if params.configuration.enable_multicast { 97 | let multicast_adress: SocketAddr = params.configuration.multicast_ipv4_group.parse()?; // SocketAddr::new(Ipv4Addr::new(224, 0, 0, 224).into(), 22402); 98 | let multicast_ipv4_interface: Ipv4Addr = 99 | params.configuration.multicast_ipv4_interface.parse()?; 100 | let multicast_discovery = multicast::start_multicast_discovery( 101 | multicast_adress, 102 | multicast_ipv4_interface, 103 | peer_service.clone(), 104 | ) 105 | .await?; 106 | Some(multicast_discovery) 107 | } else { 108 | None 109 | }; 110 | 111 | let mut peer_manager = PeerManager::new( 112 | params, 113 | services, 114 | endpoint, 115 | multicast_discovery, 116 | meeting_secret, 117 | ) 118 | .await?; 119 | 120 | peer_manager 121 | .init_hardware(params.hardware_fingerprint.clone()) 122 | .await?; 123 | 124 | if params.configuration.enable_beacons { 125 | for beacon in ¶ms.configuration.beacons { 126 | peer_manager 127 | .add_beacon(&beacon.hostname, &beacon.cert_hash) 128 | .await?; 129 | } 130 | } 131 | 132 | let service = peer_service.clone(); 133 | let frequency = params.configuration.announce_frequency_in_ms; 134 | 135 | tokio::spawn(async move { 136 | let mut interval = time::interval(Duration::from_millis(frequency)); 137 | 138 | loop { 139 | interval.tick().await; 140 | let _ = service 141 | .sender 142 | .send(PeerConnectionMessage::SendAnnounce()) 143 | .await; 144 | } 145 | }); 146 | 147 | let discret_params = params.clone(); 148 | let discret_service = services.clone(); 149 | tokio::spawn(async move { 150 | let mut event_receiver = discret_service.events.subcribe().await; 151 | loop { 152 | tokio::select! { 153 | msg = connection_receiver.recv() =>{ 154 | match msg{ 155 | Some(msg) =>{ 156 | let err = Self::process_peer_message( 157 | msg, 158 | &mut peer_manager, 159 | &discret_params, 160 | &discret_service, 161 | &peer_service, 162 | &lock_service, 163 | local_event_broadcast.subscribe(), 164 | ).await; 165 | if let Err(_e) = err{ 166 | #[cfg(feature = "log")] 167 | error!("Process_peer_message error: {_e}"); 168 | 169 | } 170 | }, 171 | None => break, 172 | } 173 | } 174 | msg = event_receiver.recv() =>{ 175 | match msg{ 176 | Ok(event) => { 177 | Self::process_event(event, &local_event_broadcast).await; 178 | }, 179 | Err(e) => match e { 180 | broadcast::error::RecvError::Closed => break, 181 | broadcast::error::RecvError::Lagged(_) => {}, 182 | }, 183 | } 184 | } 185 | } 186 | } 187 | }); 188 | Ok(ret) 189 | } 190 | 191 | pub async fn disconnect( 192 | &self, 193 | verifying_key: Vec, 194 | circuit_id: [u8; 32], 195 | connection_id: Uid, 196 | ) { 197 | let _ = self 198 | .sender 199 | .send(PeerConnectionMessage::PeerDisconnected( 200 | verifying_key, 201 | circuit_id, 202 | connection_id, 203 | )) 204 | .await; 205 | } 206 | 207 | pub async fn connected(&self, verifying_key: Vec, connection_id: Uid) { 208 | let _ = self 209 | .sender 210 | .send(PeerConnectionMessage::PeerConnected( 211 | verifying_key, 212 | connection_id, 213 | )) 214 | .await; 215 | } 216 | 217 | pub async fn invite_accepted(&self, token: TokenType, peer: Node) { 218 | let _ = self 219 | .sender 220 | .send(PeerConnectionMessage::InviteAccepted(token, peer)) 221 | .await; 222 | } 223 | 224 | async fn process_peer_message( 225 | msg: PeerConnectionMessage, 226 | peer_manager: &mut PeerManager, 227 | discret_params: &DiscretParams, 228 | discret_services: &DiscretServices, 229 | peer_service: &PeerConnectionService, 230 | lock_service: &RoomLockService, 231 | local_event_broadcast: broadcast::Receiver, 232 | ) -> Result<()> { 233 | match msg { 234 | PeerConnectionMessage::NewConnection( 235 | connection, 236 | connection_info, 237 | answer_sender, 238 | answer_receiver, 239 | query_sender, 240 | query_receiver, 241 | event_sender, 242 | event_receiver, 243 | ) => { 244 | let circuit_id = 245 | PeerManager::circuit_id(connection_info.endpoint_id, connection_info.remote_id); 246 | 247 | let token_type = peer_manager.get_token_type( 248 | &connection_info.meeting_token, 249 | &connection_info.peer_verifying_key, 250 | )?; 251 | 252 | if let Some(conn) = connection { 253 | peer_manager.add_connection( 254 | circuit_id, 255 | conn, 256 | connection_info.conn_id, 257 | connection_info.meeting_token, 258 | ) 259 | }; 260 | 261 | let remote_verifying_key: Arc>> = Arc::new(Mutex::new(Vec::new())); 262 | let conn_ready = Arc::new(AtomicBool::new(true)); 263 | 264 | let inbound_query_service = InboundQueryService::start( 265 | discret_params.hardware_fingerprint.clone(), 266 | circuit_id, 267 | connection_info.conn_id, 268 | RemotePeerHandle { 269 | db: discret_services.database.clone(), 270 | allowed_room: HashSet::new(), 271 | verifying_key: discret_params.verifying_key.clone(), 272 | reply: answer_sender, 273 | }, 274 | query_receiver, 275 | peer_service.clone(), 276 | remote_verifying_key.clone(), 277 | conn_ready.clone(), 278 | ); 279 | 280 | let query_service = QueryService::start(query_sender, answer_receiver); 281 | 282 | LocalPeerService::start( 283 | event_receiver, 284 | local_event_broadcast, 285 | circuit_id, 286 | connection_info.clone(), 287 | discret_params.verifying_key.clone(), 288 | token_type, 289 | remote_verifying_key.clone(), 290 | conn_ready, 291 | lock_service.clone(), 292 | query_service, 293 | event_sender.clone(), 294 | peer_service.clone(), 295 | inbound_query_service, 296 | &discret_services, 297 | ); 298 | } 299 | 300 | PeerConnectionMessage::PeerConnected(verifying_key, connection_id) => { 301 | let _ = discret_services 302 | .events 303 | .sender 304 | .send(EventServiceMessage::PeerConnected( 305 | verifying_key, 306 | now(), 307 | connection_id, 308 | )) 309 | .await; 310 | } 311 | 312 | PeerConnectionMessage::PeerDisconnected(verifying_key, circuit_id, connection_id) => { 313 | if peer_manager.disconnect( 314 | circuit_id, 315 | connection_id, 316 | peer_manager::REASON_UNKNOWN, 317 | "", 318 | ) { 319 | let _ = discret_services 320 | .events 321 | .sender 322 | .send(EventServiceMessage::PeerDisconnected( 323 | verifying_key, 324 | now(), 325 | connection_id, 326 | )) 327 | .await; 328 | } 329 | } 330 | 331 | PeerConnectionMessage::InviteAccepted(token, peer) => { 332 | if let Err(_e) = peer_manager.invite_accepted(token, peer).await { 333 | #[cfg(feature = "log")] 334 | error!("PeerConnectionMessage::InviteAccepted error: {_e}"); 335 | } 336 | } 337 | 338 | PeerConnectionMessage::NewPeer(peers) => { 339 | if peer_manager 340 | .add_new_peers(peers, discret_params.configuration.auto_allow_new_peers) 341 | .await? 342 | { 343 | let _ = discret_services 344 | .events 345 | .sender 346 | .send(EventServiceMessage::PendingPeer()) 347 | .await; 348 | } 349 | } 350 | 351 | PeerConnectionMessage::SendAnnounce() => { 352 | if let Err(_e) = peer_manager.send_annouces().await { 353 | #[cfg(feature = "log")] 354 | error!("PeerConnectionMessage::SendAnnounce, error: {_e} "); 355 | } 356 | } 357 | PeerConnectionMessage::MulticastMessage(message, address) => match message { 358 | MulticastMessage::Annouce(a, port) => { 359 | peer_manager 360 | .multicast_announce(a, address, port, true) 361 | .await? 362 | } 363 | MulticastMessage::InitiateConnection(header, token, port) => { 364 | peer_manager 365 | .multicast_initiate_connection(header, token, address, port, true) 366 | .await? 367 | } 368 | }, 369 | 370 | PeerConnectionMessage::PeerConnectionFailed(endpoint_id, remote_id) => { 371 | peer_manager.clean_progress(endpoint_id, remote_id); 372 | } 373 | PeerConnectionMessage::CreateInvite(default_room, reply) => { 374 | let s = peer_manager.create_invite(default_room).await; 375 | let _ = reply.send(s); 376 | } 377 | PeerConnectionMessage::AcceptInvite(invite) => { 378 | peer_manager.accept_invite(&invite).await?; 379 | } 380 | PeerConnectionMessage::ValidateHardware(circuit, fingerprint, reply) => { 381 | let valid = peer_manager 382 | .validate_hardware( 383 | &circuit, 384 | fingerprint, 385 | discret_params.configuration.auto_accept_local_device, 386 | ) 387 | .await; 388 | if let Ok(val) = valid.as_ref() { 389 | if !val { 390 | let _ = discret_services 391 | .events 392 | .sender 393 | .send(EventServiceMessage::PendingHardware()) 394 | .await; 395 | } 396 | } 397 | let _ = reply.send(valid); 398 | } 399 | PeerConnectionMessage::BeaconConnectionFailed(address, error) => { 400 | peer_manager.beacon_connection_failed(address, error).await; 401 | } 402 | PeerConnectionMessage::BeaconConnected(address, sender) => { 403 | peer_manager.beacon_connected(address, sender).await?; 404 | } 405 | PeerConnectionMessage::BeaconDisconnected(address) => { 406 | peer_manager.beacon_disconnected(address).await; 407 | } 408 | PeerConnectionMessage::BeaconInitiateConnection(address, header, token) => { 409 | peer_manager 410 | .beacon_initiate_connection(address, header, token) 411 | .await?; 412 | } 413 | } 414 | Ok(()) 415 | } 416 | 417 | async fn process_event(event: Event, local_event_broadcast: &broadcast::Sender) { 418 | match event { 419 | Event::DataChanged(data_modif) => { 420 | let mut rooms = Vec::new(); 421 | for room in &data_modif.rooms { 422 | rooms.push(uid_decode(room.0).unwrap()); 423 | } 424 | let _ = local_event_broadcast.send(LocalEvent::RoomDataChanged(rooms)); 425 | } 426 | Event::RoomModified(room) => { 427 | let _ = local_event_broadcast.send(LocalEvent::RoomDefinitionChanged(room)); 428 | } 429 | _ => {} 430 | } 431 | } 432 | } 433 | -------------------------------------------------------------------------------- /src/security.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | fs::File, 3 | io::{Read, Write}, 4 | path::PathBuf, 5 | }; 6 | 7 | use crate::date_utils::now; 8 | use argon2::{self, Config, Variant, Version}; 9 | use base64::{engine::general_purpose::URL_SAFE_NO_PAD as enc64, Engine as _}; 10 | use ed25519_dalek::{SignatureError, Signer, Verifier}; 11 | use rand::{rngs::OsRng, RngCore}; 12 | use rcgen::{CertificateParams, KeyPair, SanType}; 13 | use serde::{Deserialize, Serialize}; 14 | use sysinfo::System; 15 | use thiserror::Error; 16 | use x25519_dalek::{PublicKey, StaticSecret}; 17 | 18 | #[derive(Error, Debug)] 19 | pub enum Error { 20 | #[error("{0}")] 21 | InvalidKeyType(u8), 22 | 23 | #[error("{0}")] 24 | InvalidKeyLenght(String), 25 | 26 | #[error("{0}")] 27 | InvalidSignature(String), 28 | 29 | #[error(transparent)] 30 | Signature(#[from] SignatureError), 31 | 32 | #[error(transparent)] 33 | Decode(#[from] base64::DecodeError), 34 | 35 | #[error(transparent)] 36 | Io(#[from] std::io::Error), 37 | 38 | #[error("Invalid Base64 encoded Uid")] 39 | Uid(), 40 | 41 | #[error("Invalid Base64 encoded MeetingToken")] 42 | MeetingToken(), 43 | } 44 | 45 | /// 46 | /// when exporting a key the first byte is a flag indicating the public key algorithm used 47 | /// currenlty useless but might become usefull in the future to implement new key algorithms 48 | /// 49 | const KEY_TYPE_ED_2519: u8 = 1; 50 | 51 | /// 52 | /// import a existing signing key, using the first byte flag to detect the signature scheme 53 | /// 54 | /// 55 | #[cfg(test)] 56 | pub fn import_signing_key(keypair: &[u8]) -> Result { 57 | if keypair[0] != KEY_TYPE_ED_2519 { 58 | return Err(Error::InvalidKeyType(KEY_TYPE_ED_2519)); 59 | } 60 | if keypair.len() != 33 { 61 | return Err(Error::InvalidKeyLenght(format!( 62 | "key lenght must be 33, value: {} ", 63 | keypair.len() 64 | ))); 65 | } 66 | 67 | let ke: [u8; 32] = keypair[1..33].try_into().unwrap(); 68 | let keypair = ed25519_dalek::SigningKey::from(ke); 69 | 70 | Ok(Ed25519SigningKey { 71 | signing_key: keypair, 72 | }) 73 | } 74 | 75 | /// 76 | /// import a existing verifying key, using the first byte flag to detect the signature scheme 77 | /// 78 | pub fn import_verifying_key(veriying_key: &[u8]) -> Result, Error> { 79 | if veriying_key[0] != KEY_TYPE_ED_2519 { 80 | return Err(Error::InvalidKeyType(KEY_TYPE_ED_2519)); 81 | } 82 | if veriying_key.len() != 33 { 83 | return Err(Error::InvalidKeyLenght(format!( 84 | "key lenght must be 33, value: {} ", 85 | veriying_key.len() 86 | ))); 87 | } 88 | 89 | let ke: [u8; 32] = veriying_key[1..33].try_into().unwrap(); 90 | 91 | let veriying_key = ed25519_dalek::VerifyingKey::from_bytes(&ke)?; 92 | Ok(Box::new(Ed2519VerifyingKey { veriying_key })) 93 | } 94 | 95 | /// 96 | /// Signing key using Ed25519 signature scheme 97 | /// 98 | pub struct Ed25519SigningKey { 99 | signing_key: ed25519_dalek::SigningKey, 100 | } 101 | 102 | impl Ed25519SigningKey { 103 | /// 104 | /// new key using a random number 105 | /// 106 | #[cfg(test)] 107 | pub fn new() -> Self { 108 | let random: [u8; 32] = random32(); 109 | Ed25519SigningKey::create_from(&random) 110 | } 111 | 112 | /// 113 | /// creates a signing key using a provided random number 114 | /// usefull to create a predictable key from a key derivation function 115 | /// 116 | pub fn create_from(random: &[u8; 32]) -> Self { 117 | let sk: &ed25519_dalek::SecretKey = random; 118 | Ed25519SigningKey { 119 | signing_key: ed25519_dalek::SigningKey::from(sk), 120 | } 121 | } 122 | } 123 | 124 | /// 125 | /// Defines the necessary functions to sign data 126 | /// 127 | pub trait SigningKey { 128 | /// 129 | /// Export the signing key, adding a flag to detect the encryption scheme 130 | /// 131 | fn export(&self) -> Vec; 132 | 133 | /// 134 | /// Exports the verifying key, adding a a flag to detect the encryption scheme 135 | /// 136 | fn export_verifying_key(&self) -> Vec; 137 | 138 | /// 139 | /// Provides a copy of the verifying key 140 | /// 141 | fn verifying_key(&self) -> impl VerifyingKey; 142 | 143 | /// 144 | /// Sign a message, returning the signature 145 | /// passed message should be small, like a hash of the real message 146 | /// 147 | fn sign(&self, message: &[u8]) -> Vec; 148 | } 149 | 150 | impl SigningKey for Ed25519SigningKey { 151 | fn export(&self) -> Vec { 152 | let mut export = vec![KEY_TYPE_ED_2519]; 153 | let keyp = self.signing_key.to_bytes(); 154 | export.extend(keyp); 155 | export 156 | } 157 | 158 | fn export_verifying_key(&self) -> Vec { 159 | let mut export = vec![KEY_TYPE_ED_2519]; 160 | let keyp = self.signing_key.verifying_key().to_bytes(); 161 | export.extend(keyp); 162 | export 163 | } 164 | 165 | fn sign(&self, message: &[u8]) -> Vec { 166 | self.signing_key.sign(message).to_bytes().into() 167 | } 168 | 169 | fn verifying_key(&self) -> impl VerifyingKey { 170 | Ed2519VerifyingKey { 171 | veriying_key: self.signing_key.verifying_key(), 172 | } 173 | } 174 | } 175 | 176 | /// 177 | /// Defines the necessary function to verify data 178 | /// 179 | pub trait VerifyingKey { 180 | /// 181 | /// Export the verifying key, adding a flag to detect the encryption scheme 182 | /// 183 | // fn export(&self) -> Vec; 184 | 185 | /// 186 | /// verify the signature against the provided message 187 | /// 188 | fn verify(&self, data: &[u8], signature: &[u8]) -> Result<(), Error>; 189 | } 190 | 191 | /// 192 | /// verification key using Ed25519 signature scheme 193 | /// 194 | pub struct Ed2519VerifyingKey { 195 | pub veriying_key: ed25519_dalek::VerifyingKey, 196 | } 197 | impl VerifyingKey for Ed2519VerifyingKey { 198 | // fn export(&self) -> Vec { 199 | // let mut export = vec![KEY_TYPE_ED_2519]; 200 | // let keyp = self.veriying_key.to_bytes(); 201 | // export.extend(keyp); 202 | // export 203 | // } 204 | 205 | fn verify(&self, data: &[u8], signature: &[u8]) -> Result<(), Error> { 206 | if signature.len() != 64 { 207 | return Err(Error::InvalidKeyLenght(format!( 208 | "signatue lenght must be 64, value: {} ", 209 | signature.len() 210 | ))); 211 | } 212 | let sign: [u8; 64] = signature.try_into().unwrap(); 213 | 214 | let sig = ed25519_dalek::Signature::from_bytes(&sign); 215 | self.veriying_key.verify(data, &sig)?; 216 | Ok(()) 217 | } 218 | } 219 | /// 220 | /// generates a self signed certificate 221 | /// 222 | pub fn generate_x509_certificate(name: &str) -> rcgen::CertifiedKey { 223 | let mut params: CertificateParams = Default::default(); 224 | 225 | params.subject_alt_names = vec![SanType::DnsName(name.try_into().unwrap())]; 226 | let key_pair = KeyPair::generate_for(&rcgen::PKCS_ED25519).unwrap(); 227 | let cert = params.self_signed(&key_pair).unwrap(); 228 | // let cert: rcgen::CertifiedKey = rcgen::generate_simple_self_signed(vec![name]).unwrap(); 229 | rcgen::CertifiedKey { cert, key_pair } 230 | } 231 | 232 | /// 233 | /// The quick connection initiate connection with a domain name used during TLS negociations to avoid man in the middle attack. 234 | /// As we are using p2p and self signed certificates, we don't have a domain name to connect to we have to generate one. 235 | /// 236 | /// This name is transmitted in clear text over the network as part of the QUIC handshake. 237 | /// If the name is too weird (like a base64 encoded value), it could be easy to detect that the connection is a Discret connection, and not a standard HTTP3 one. 238 | /// So we try to be a little bit sneaky by creating a plausible domain names. 239 | /// 240 | /// This functiion is public because it is used by the discret_beacon projet 241 | /// 242 | pub fn random_domain_name() -> String { 243 | let min_value = 5; 244 | let max_value = 8; 245 | let divider = max_value - min_value; 246 | let offset: usize = OsRng.next_u32().try_into().unwrap(); 247 | let num = offset % divider; 248 | 249 | let alphabet = [ 250 | 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 251 | 'v', 'w', 'x', 'z', 252 | ]; 253 | let vowels = ['a', 'e', 'i', 'o', 'u', 'y']; 254 | let extension = [ 255 | ".com", ".org", ".net", ".us", ".co", ".biz", ".info", ".fr", ".uk", ".me", ".cn", ".de", 256 | ".ly", ".in", ".eu", ".ca", ".coop", ".asia", ".cat", ".pro", ".ac", ".ad", ".ae", ".af", 257 | ".ai", ".am", 258 | ]; 259 | let size = num + min_value; 260 | let mut domain = String::new(); 261 | 262 | let mut vowel = OsRng.next_u32() % 2 == 1; 263 | for _ in 0..size { 264 | let mut index: usize = OsRng.next_u32().try_into().unwrap(); 265 | if vowel { 266 | index %= vowels.len(); 267 | domain.push(vowels[index]); 268 | vowel = false; 269 | } else { 270 | index %= alphabet.len(); 271 | domain.push(alphabet[index]); 272 | vowel = true; 273 | } 274 | } 275 | let mut index: usize = OsRng.next_u32().try_into().unwrap(); 276 | index %= extension.len(); 277 | domain.push_str(extension[index]); 278 | 279 | domain 280 | } 281 | 282 | pub fn random32() -> [u8; 32] { 283 | let mut random: [u8; 32] = [0; 32]; 284 | 285 | OsRng.fill_bytes(&mut random); 286 | random 287 | } 288 | 289 | pub const MEETING_TOKEN_SIZE: usize = 7; 290 | pub type MeetingToken = [u8; MEETING_TOKEN_SIZE]; 291 | /// 292 | /// Use Diffie Hellman to create an id to be used to announce yourself on the network to the other peers. 293 | /// The id is way too small to be secured, but it is big enougth to have a low collision rate 294 | /// this will allow peer to recognize themselves on the network 295 | /// The authentication is performed using a signature 296 | /// 297 | /// The connection protocol is 298 | /// generate a self signed certificate 299 | /// for each allowed peer generate the diffie hellman id an put it in an array 300 | /// sign the cerificate and the array with the public_key used to insert data 301 | /// broadcast this data 302 | /// in one single packet you can announce yoursef to other peers 303 | /// upeon retrieval 304 | /// peer will check if there is an id corresponding 305 | /// this is will allow to retrieve the peer verifying key 306 | /// verify authenticity 307 | /// start connection 308 | /// 309 | pub struct MeetingSecret { 310 | secret: StaticSecret, 311 | } 312 | impl MeetingSecret { 313 | pub fn new(bytes: [u8; 32]) -> Self { 314 | Self { 315 | secret: StaticSecret::from(bytes), 316 | } 317 | } 318 | 319 | pub fn public_key(&self) -> PublicKey { 320 | PublicKey::from(&self.secret) 321 | } 322 | 323 | pub fn token(&self, their_public: &PublicKey) -> MeetingToken { 324 | //if both public key are the same, it is the same user and hash the private key instead of using diffie hellman 325 | let hash = if their_public.eq(&self.public_key()) { 326 | hash(self.secret.as_bytes()) 327 | } else { 328 | let df = self.secret.diffie_hellman(their_public); 329 | hash(df.as_bytes()) 330 | }; 331 | let mut token: MeetingToken = [0; MEETING_TOKEN_SIZE]; 332 | token.copy_from_slice(&hash[0..MEETING_TOKEN_SIZE]); 333 | token 334 | } 335 | 336 | /// derive a token from a string context and a secret 337 | /// provided by the Blake3 hash function 338 | /// 339 | pub fn derive_token(context: &str, key_material: &[u8]) -> MeetingToken { 340 | let hash = blake3::derive_key(context, key_material); 341 | let mut token: MeetingToken = [0; MEETING_TOKEN_SIZE]; 342 | token.copy_from_slice(&hash[0..MEETING_TOKEN_SIZE]); 343 | token 344 | } 345 | 346 | pub fn decode_token(base64: &str) -> Result { 347 | let r = base64_decode(base64.as_bytes()).map_err(|_| Error::MeetingToken())?; 348 | let mut token: MeetingToken = [0; MEETING_TOKEN_SIZE]; 349 | if r.len() < MEETING_TOKEN_SIZE { 350 | return Err(Error::MeetingToken()); 351 | } 352 | token.copy_from_slice(&r[0..MEETING_TOKEN_SIZE]); 353 | 354 | Ok(token) 355 | } 356 | } 357 | 358 | /// 359 | /// Derive a password using argon2id 360 | /// using parameters slighly greater than the minimum recommended by OSWAP 361 | /// - 20480 KB of memory 362 | /// - an iteration count of 2 363 | /// - parallelism count of 2 364 | /// - the login is used as a salt 365 | /// 366 | pub fn derive_pass_phrase(login: &str, pass_phrase: &str) -> [u8; 32] { 367 | let password = pass_phrase.as_bytes(); 368 | let salt = hash(login.as_bytes()); 369 | 370 | let config = Config::<'_> { 371 | mem_cost: 20480, 372 | time_cost: 2, 373 | variant: Variant::Argon2id, 374 | lanes: 2, 375 | version: Version::Version13, 376 | ..Default::default() 377 | }; 378 | 379 | let hashed = argon2::hash_encoded(password, &salt, &config).unwrap(); 380 | let matches = argon2::verify_encoded(&hashed, password).unwrap(); 381 | assert!(matches); 382 | hash(hashed.as_bytes()) 383 | } 384 | 385 | /// 386 | /// hash a byte array using the Blake3 hash function 387 | /// 388 | pub fn hash(bytes: &[u8]) -> [u8; 32] { 389 | *blake3::hash(bytes).as_bytes() 390 | } 391 | 392 | /// 393 | /// derive a ket from a string context and a secret 394 | /// provided by the Blake3 hash function 395 | /// 396 | pub fn derive_key(context: &str, key_material: &[u8]) -> [u8; 32] { 397 | blake3::derive_key(context, key_material) 398 | } 399 | 400 | /// 401 | /// encode bytes into a base 64 String 402 | /// 403 | pub fn base64_encode(data: &[u8]) -> String { 404 | enc64.encode(data) 405 | } 406 | 407 | /// 408 | /// decode a base 64 String into bytes 409 | /// 410 | pub fn base64_decode(data: &[u8]) -> Result, Error> { 411 | enc64.decode(data).map_err(Error::from) 412 | } 413 | 414 | pub const UID_SIZE: usize = 16; 415 | pub type Uid = [u8; UID_SIZE]; 416 | const DEFAULT_UID: Uid = [0; UID_SIZE]; 417 | /// 418 | /// generate a 16 byte uid with the time on the first 6 bytes to improve index locality 419 | /// 420 | /// 421 | pub fn new_uid() -> Uid { 422 | const TIME_BYTES: usize = 6; 423 | let time = now(); 424 | let time = &time.to_be_bytes()[TIME_BYTES..]; 425 | 426 | let mut uid = DEFAULT_UID; 427 | let (one, two) = uid.split_at_mut(time.len()); 428 | 429 | one.copy_from_slice(time); 430 | OsRng.fill_bytes(two); 431 | 432 | uid 433 | } 434 | 435 | /// derive a ket from a string context and a secret 436 | /// provided by the Blake3 hash function 437 | /// 438 | pub fn derive_uid(context: &str, key_material: &[u8]) -> Uid { 439 | let hash = blake3::derive_key(context, key_material); 440 | let mut uid = default_uid(); 441 | uid.copy_from_slice(&hash[0..UID_SIZE]); 442 | uid 443 | } 444 | 445 | pub fn default_uid() -> Uid { 446 | DEFAULT_UID 447 | } 448 | 449 | pub fn uid_decode(base64: &str) -> Result { 450 | let s = base64_decode(base64.as_bytes())?; 451 | let uid: Uid = s.try_into().map_err(|_| Error::Uid())?; 452 | Ok(uid) 453 | } 454 | 455 | pub fn uid_encode(uid: &Uid) -> String { 456 | base64_encode(uid) 457 | } 458 | 459 | pub fn uid_from(v: Vec) -> Result { 460 | let uid: Uid = v.try_into().map_err(|_| Error::Uid())?; 461 | Ok(uid) 462 | } 463 | 464 | /// 465 | /// try to get a unique identifier from the underlying hardware 466 | /// returns a unique identifier and the machine name. 467 | /// 468 | /// This is only used to identifies devices you own, not devices of other peers 469 | /// 470 | /// if the platform is not supported by sysinfo, 471 | /// creates a file named discret_fingerprint.bin and stores a random number 472 | /// 473 | #[derive(Serialize, Deserialize, Clone)] 474 | pub struct HardwareFingerprint { 475 | pub id: Uid, 476 | pub name: String, 477 | } 478 | impl HardwareFingerprint { 479 | pub fn get(file_path: &PathBuf) -> Result { 480 | let id = if file_path.is_file() { 481 | let mut file = File::open(file_path)?; 482 | let mut uid = default_uid(); 483 | file.read(&mut uid)?; 484 | uid 485 | } else { 486 | let uid = new_uid(); 487 | let mut file = File::create(file_path)?; 488 | file.write_all(&uid)?; 489 | uid 490 | }; 491 | 492 | let mut name = "Unknown Device".to_string(); 493 | if sysinfo::IS_SUPPORTED_SYSTEM { 494 | let host = System::host_name().unwrap_or_default(); 495 | //let system = System::name().unwrap_or_default(); 496 | //let networks = Networks::new_with_refreshed_list(); 497 | let osname = System::long_os_version().unwrap_or_default(); 498 | name = format!("{osname}, {host}"); 499 | }; 500 | 501 | Ok(Self { id, name }) 502 | } 503 | } 504 | 505 | #[cfg(test)] 506 | mod tests { 507 | use std::fs; 508 | 509 | use super::*; 510 | #[test] 511 | fn control_derive_pass_phrase() { 512 | let login = "test"; 513 | let pass_phrase = "testphrase"; 514 | 515 | let hashed = derive_pass_phrase(login, pass_phrase); 516 | 517 | assert_eq!( 518 | base64_encode(&hashed), 519 | "KER9-vDQvEeLBD5EAnPo52l8XEiuEO5vuaZDXOpQId0" 520 | ); 521 | } 522 | 523 | #[test] 524 | fn control_hash() { 525 | assert_eq!( 526 | base64_encode(&hash(b"bytes")), 527 | "3xmx8QX_kpGRzknQu_3FtO3CpxpA9QLclVNZ6zNkniQ" 528 | ); 529 | } 530 | 531 | #[test] 532 | fn control_ed25519() { 533 | let rd = hash(b"not random"); 534 | let signing_key = Ed25519SigningKey::create_from(&rd); 535 | 536 | let exp_kp = signing_key.export(); 537 | 538 | assert_eq!( 539 | base64_encode(signing_key.signing_key.as_bytes()), 540 | "RkG04WSsJLl3i6STKCBmU-sB2xqREK0VCM-qnjoq8Ik" 541 | ); 542 | 543 | assert_eq!( 544 | base64_encode(&signing_key.export_verifying_key()[0..]), 545 | "AVS6nzDyEt5jfykx0UObb32ySqI4sN89Y6nL9KFp_tWO" 546 | ); 547 | 548 | let msg = b"message to sign"; 549 | let signature = signing_key.sign(&msg.to_vec()); 550 | 551 | let keypair = import_signing_key(&exp_kp).unwrap(); 552 | 553 | let exp_pub = keypair.export_verifying_key(); 554 | let imp_pub = import_verifying_key(&exp_pub).unwrap(); 555 | 556 | imp_pub.verify(msg, &signature).unwrap(); 557 | } 558 | 559 | #[test] 560 | pub fn meeting_secret() { 561 | let peer1 = MeetingSecret::new(random32()); 562 | let peer1_public = peer1.public_key(); 563 | let peer1_public = bincode::serialize(&peer1_public.as_bytes()).unwrap(); 564 | 565 | let peer2 = MeetingSecret::new(random32()); 566 | let peer2_public = peer2.public_key(); 567 | let peer2_public = bincode::serialize(&peer2_public).unwrap(); 568 | 569 | let peer1_public: PublicKey = bincode::deserialize(&peer1_public).unwrap(); 570 | let peer2_public: PublicKey = bincode::deserialize(&peer2_public).unwrap(); 571 | 572 | let id1 = peer2.token(&peer1_public); 573 | let id2 = peer1.token(&peer2_public); 574 | assert_eq!(id1, id2); 575 | } 576 | 577 | #[test] 578 | pub fn hardware_fingerprint() { 579 | let path: PathBuf = "test_data/hardware_fingerprint.bin".into(); 580 | let _ = fs::remove_file(&path); 581 | 582 | let id1 = HardwareFingerprint::get(&path).unwrap(); 583 | let id2 = HardwareFingerprint::get(&path).unwrap(); 584 | assert_eq!(id1.id, id2.id); 585 | } 586 | 587 | #[test] 588 | pub fn random_domain() { 589 | for _ in 0..50 { 590 | // println!("{}", random_domain_name()); 591 | assert!(random_domain_name().len() <= 14); 592 | } 593 | } 594 | } 595 | -------------------------------------------------------------------------------- /src/signature_verification_service.rs: -------------------------------------------------------------------------------- 1 | use std::thread; 2 | 3 | use super::Result; 4 | use crate::{ 5 | database::{ 6 | edge::{Edge, EdgeDeletionEntry}, 7 | node::{Node, NodeDeletionEntry}, 8 | room_node::RoomNode, 9 | }, 10 | security::import_verifying_key, 11 | }; 12 | //use ed25519_dalek::{verify_batch, Signature, Signer, SigningKey, VerifyingKey}; 13 | 14 | use tokio::sync::oneshot::{self}; 15 | 16 | pub enum VerificationMessage { 17 | RoomNode(Box, oneshot::Sender>), 18 | Nodes(Vec, oneshot::Sender>>), 19 | Edges(Vec, oneshot::Sender>>), 20 | EdgeLog( 21 | Vec, 22 | oneshot::Sender>>, 23 | ), 24 | NodeLog( 25 | Vec, 26 | oneshot::Sender>>, 27 | ), 28 | Hash(Vec, [u8; 32], Vec, oneshot::Sender), 29 | } 30 | /// 31 | /// Signature verification consumes a lot of cpu ressources. 32 | /// it is moved to real threads to avoid blocking Tokio processes 33 | /// 34 | #[derive(Clone)] 35 | pub struct SignatureVerificationService { 36 | pub sender: flume::Sender, 37 | } 38 | impl SignatureVerificationService { 39 | pub fn start(verification_treads: usize) -> Self { 40 | let (sender, receiver) = flume::bounded::(verification_treads * 2); 41 | for _ in 0..verification_treads { 42 | let local_receiver = receiver.clone(); 43 | thread::spawn(move || { 44 | while let Ok(msg) = local_receiver.recv() { 45 | match msg { 46 | VerificationMessage::RoomNode(node, reply) => { 47 | let _ = reply.send(Self::room_check(*node)); 48 | } 49 | VerificationMessage::Nodes(nodes, reply) => { 50 | let _ = reply.send(Self::nodes_check(nodes)); 51 | } 52 | VerificationMessage::Edges(edges, reply) => { 53 | let _ = reply.send(Self::edges_check(edges)); 54 | } 55 | VerificationMessage::EdgeLog(log, reply) => { 56 | let _ = reply.send(Self::edge_log_check(log)); 57 | } 58 | VerificationMessage::NodeLog(log, reply) => { 59 | let _ = reply.send(Self::node_log_check(log)); 60 | } 61 | VerificationMessage::Hash(signature, hash, verifying_key, reply) => { 62 | let pub_key = import_verifying_key(&verifying_key); 63 | match pub_key { 64 | Ok(pub_key) => match pub_key.verify(&hash, &signature) { 65 | Ok(_) => { 66 | let _ = reply.send(true); 67 | } 68 | Err(_) => { 69 | let _ = reply.send(false); 70 | } 71 | }, 72 | Err(_) => { 73 | let _ = reply.send(false); 74 | } 75 | } 76 | } 77 | } 78 | } 79 | }); 80 | } 81 | 82 | Self { sender } 83 | } 84 | 85 | pub fn nodes_check(nodes: Vec) -> Result> { 86 | // verify_batch(); 87 | 88 | for node in &nodes { 89 | node.verify()?; 90 | } 91 | Ok(nodes) 92 | } 93 | 94 | // pub fn nodes_check(nodes: Vec) -> Result> { 95 | // // verify_batch(); 96 | // let mut hashes = Vec::with_capacity(nodes.len()); 97 | // let mut signatures = Vec::with_capacity(nodes.len()); 98 | // let verifying_keys = Vec::with_capacity(nodes.len()); 99 | // for node in &nodes { 100 | // let hash = node.hash()?; 101 | // hashes.push(hash.as_bytes().to_owned()); 102 | 103 | // let sign = node._signature.clone(); 104 | // let sign: [u8; 64] = sign.try_into().unwrap(); 105 | // let sig = ed25519_dalek::Signature::from_bytes(&sign); 106 | // signatures.push(sig); 107 | 108 | // node.verify()?; 109 | // } 110 | // let mut messages: Vec<&[u8]> = Vec::with_capacity(nodes.len()); 111 | // for msg in &hashes { 112 | // messages.push(msg); 113 | // } 114 | // verify_batch(&messages, &signatures, &verifying_keys).map_err(|e| crate::Error::); 115 | // Ok(nodes) 116 | // } 117 | 118 | pub fn edges_check(edges: Vec) -> Result> { 119 | for edge in &edges { 120 | edge.verify()?; 121 | } 122 | Ok(edges) 123 | } 124 | 125 | pub fn edge_log_check(log: Vec) -> Result> { 126 | for edge_log in &log { 127 | edge_log.verify()?; 128 | } 129 | Ok(log) 130 | } 131 | 132 | pub fn node_log_check(log: Vec) -> Result> { 133 | for node_log in &log { 134 | node_log.verify()?; 135 | } 136 | Ok(log) 137 | } 138 | 139 | pub fn room_check(node: RoomNode) -> Result { 140 | node.node.verify()?; 141 | 142 | for edge in &node.admin_edges { 143 | edge.verify()?; 144 | } 145 | 146 | for user in &node.admin_nodes { 147 | user.node.verify()?; 148 | } 149 | 150 | for edge in &node.auth_edges { 151 | edge.verify()?; 152 | } 153 | 154 | for auth in &node.auth_nodes { 155 | auth.node.verify()?; 156 | for edge in &auth.user_edges { 157 | edge.verify()?; 158 | } 159 | for user in &auth.user_nodes { 160 | user.node.verify()?; 161 | } 162 | 163 | for edge in &auth.right_edges { 164 | edge.verify()?; 165 | } 166 | for right in &auth.right_nodes { 167 | right.node.verify()?; 168 | } 169 | 170 | for edge in &auth.user_admin_edges { 171 | edge.verify()?; 172 | } 173 | 174 | for user in &auth.user_admin_nodes { 175 | user.node.verify()?; 176 | } 177 | } 178 | Ok(node) 179 | } 180 | 181 | pub async fn verify_room_node(&self, node: RoomNode) -> Result { 182 | let (reply, receiver) = oneshot::channel::>(); 183 | let _ = self 184 | .sender 185 | .send_async(VerificationMessage::RoomNode(Box::new(node), reply)) 186 | .await; 187 | receiver.await.unwrap() //won't fail unless when stopping app 188 | } 189 | 190 | pub async fn verify_nodes(&self, nodes: Vec) -> Result> { 191 | let (reply, receiver) = oneshot::channel::>>(); 192 | let _ = self 193 | .sender 194 | .send_async(VerificationMessage::Nodes(nodes, reply)) 195 | .await; 196 | receiver.await.unwrap() //won't fail unless when stopping app 197 | } 198 | 199 | pub async fn verify_edges(&self, nodes: Vec) -> Result> { 200 | let (reply, receiver) = oneshot::channel::>>(); 201 | let _ = self 202 | .sender 203 | .send_async(VerificationMessage::Edges(nodes, reply)) 204 | .await; 205 | receiver.await.unwrap() //won't fail unless when stopping app 206 | } 207 | 208 | pub async fn verify_edge_log( 209 | &self, 210 | log: Vec, 211 | ) -> Result> { 212 | let (reply, receiver) = oneshot::channel::>>(); 213 | let _ = self 214 | .sender 215 | .send_async(VerificationMessage::EdgeLog(log, reply)) 216 | .await; 217 | receiver.await.unwrap() //won't fail unless when stopping app 218 | } 219 | 220 | pub async fn verify_node_log( 221 | &self, 222 | log: Vec, 223 | ) -> Result> { 224 | let (reply, receiver) = oneshot::channel::>>(); 225 | let _ = self 226 | .sender 227 | .send_async(VerificationMessage::NodeLog(log, reply)) 228 | .await; 229 | receiver.await.unwrap() //won't fail unless when stopping app 230 | } 231 | 232 | pub async fn verify_hash( 233 | &self, 234 | signature: Vec, 235 | hash: [u8; 32], 236 | verifying_key: Vec, 237 | ) -> bool { 238 | let (reply, receiver) = oneshot::channel::(); 239 | let _ = self 240 | .sender 241 | .send_async(VerificationMessage::Hash( 242 | signature, 243 | hash, 244 | verifying_key, 245 | reply, 246 | )) 247 | .await; 248 | receiver.await.unwrap() //won't fail unless when stopping app 249 | } 250 | } 251 | -------------------------------------------------------------------------------- /src/synchronisation/mod.rs: -------------------------------------------------------------------------------- 1 | use std::sync::Arc; 2 | 3 | use serde::{Deserialize, Serialize}; 4 | 5 | use crate::{ 6 | database::{node::Node, room::Room}, 7 | security::{self, Uid}, 8 | }; 9 | use thiserror::Error; 10 | pub mod peer_inbound_service; 11 | pub mod peer_outbound_service; 12 | pub mod room_locking_service; 13 | 14 | #[derive(Serialize, Deserialize, Debug, Error)] 15 | pub enum Error { 16 | #[error("Authorisation for Query {0}")] 17 | Authorisation(String), 18 | 19 | #[error("RemoteTechnical for Query {0}")] 20 | RemoteTechnical(String), 21 | 22 | #[error("TimeOut ")] 23 | TimeOut, 24 | 25 | #[error("Parsing")] 26 | Parsing, 27 | 28 | #[error("Technical")] 29 | Technical, 30 | } 31 | 32 | /// Queries have 10 seconds to returns before closing connection 33 | pub static NETWORK_TIMEOUT_SEC: u64 = 10; 34 | 35 | #[derive(Serialize, Deserialize)] 36 | pub enum Query { 37 | ProveIdentity(Vec), 38 | HardwareFingerprint(), 39 | RoomList, 40 | RoomDefinition(Uid), 41 | RoomNode(Uid), 42 | RoomLog(Uid), 43 | RoomLogAt(Uid, i64), 44 | EdgeDeletionLog(Uid, String, i64), 45 | NodeDeletionLog(Uid, String, i64), 46 | RoomDailyNodes(Uid, String, i64), 47 | Nodes(Uid, Vec), 48 | Edges(Uid, Vec<(Uid, i64)>), 49 | PeersForRoom(Uid), 50 | } 51 | 52 | #[derive(Serialize, Deserialize)] 53 | pub struct QueryProtocol { 54 | pub id: u64, 55 | pub query: Query, 56 | } 57 | 58 | #[derive(Serialize, Deserialize)] 59 | pub struct Answer { 60 | pub id: u64, 61 | pub success: bool, 62 | pub complete: bool, 63 | pub serialized: Vec, 64 | } 65 | 66 | #[derive(Clone)] 67 | pub enum LocalEvent { 68 | RoomDefinitionChanged(Arc), 69 | RoomDataChanged(Vec), 70 | } 71 | 72 | #[derive(Serialize, Deserialize)] 73 | pub enum RemoteEvent { 74 | Ready, //indicate that this end of the connection is ready to synchronize 75 | ReadyFingerprint, //indicate that this end of the connection is ready to perform a hardware fingerprint check 76 | RoomDefinitionChanged(Uid), 77 | RoomDataChanged(Uid), 78 | } 79 | 80 | #[derive(Serialize, Deserialize)] 81 | pub struct IdentityAnswer { 82 | pub peer: Node, 83 | pub chall_signature: Vec, 84 | } 85 | impl IdentityAnswer { 86 | pub fn verify(&self, challenge: &[u8]) -> Result<(), security::Error> { 87 | let pub_key = security::import_verifying_key(&self.peer.verifying_key)?; 88 | pub_key.verify(challenge, &self.chall_signature)?; 89 | Ok(()) 90 | } 91 | } 92 | -------------------------------------------------------------------------------- /src/synchronisation/peer_outbound_service.rs: -------------------------------------------------------------------------------- 1 | #[cfg(feature = "log")] 2 | use log::error; 3 | 4 | use std::{ 5 | collections::HashSet, 6 | sync::{ 7 | atomic::{AtomicBool, Ordering}, 8 | Arc, 9 | }, 10 | }; 11 | 12 | use serde::Serialize; 13 | use tokio::sync::{ 14 | mpsc::{self, UnboundedSender}, 15 | Mutex, 16 | }; 17 | 18 | use crate::{ 19 | base64_encode, 20 | database::graph_database::GraphDatabaseService, 21 | peer_connection_service::PeerConnectionService, 22 | security::{HardwareFingerprint, Uid}, 23 | }; 24 | 25 | use super::{Answer, Error, IdentityAnswer, Query, QueryProtocol}; 26 | 27 | /// 28 | /// handle all inbound queries 29 | /// 30 | #[derive(Clone)] 31 | pub struct InboundQueryService { 32 | room_sender: UnboundedSender, 33 | } 34 | impl InboundQueryService { 35 | #[allow(clippy::too_many_arguments)] 36 | pub fn start( 37 | fingerprint: HardwareFingerprint, 38 | circuit_id: [u8; 32], 39 | conn_id: Uid, 40 | mut peer: RemotePeerHandle, 41 | mut receiver: mpsc::Receiver, 42 | peer_service: PeerConnectionService, 43 | verifying_key: Arc>>, 44 | conn_ready: Arc, 45 | ) -> Self { 46 | let (room_sender, mut room_receiver) = mpsc::unbounded_channel::(); 47 | 48 | tokio::spawn(async move { 49 | loop { 50 | tokio::select! { 51 | msg = receiver.recv() =>{ 52 | match msg{ 53 | Some(msg) => { 54 | if let Err(_e) = Self::process_inbound(msg, &mut peer, &verifying_key, &conn_ready, &fingerprint).await{ 55 | #[cfg(feature = "log")] 56 | error!("RemoteQueryService Channel Send, Error: {_e}"); 57 | } 58 | }, 59 | None => break, 60 | } 61 | 62 | } 63 | msg = room_receiver.recv() =>{ 64 | match msg{ 65 | Some(uid) => peer.add_allowed_room(uid), 66 | None => break, 67 | } 68 | } 69 | } 70 | } 71 | 72 | let key = verifying_key.lock().await; 73 | peer_service 74 | .disconnect(key.clone(), circuit_id, conn_id) 75 | .await; 76 | }); 77 | Self { room_sender } 78 | } 79 | 80 | pub async fn process_inbound( 81 | msg: QueryProtocol, 82 | peer: &mut RemotePeerHandle, 83 | verifying_key: &Arc>>, 84 | conn_ready: &Arc, 85 | fingerprint: &HardwareFingerprint, 86 | ) -> Result<(), crate::Error> { 87 | match msg.query { 88 | Query::ProveIdentity(challenge) => { 89 | let res = peer.db.sign(challenge).await; 90 | let self_peer = peer 91 | .db 92 | .get_peer_node(peer.verifying_key.clone()) 93 | .await? 94 | .unwrap(); 95 | peer.send( 96 | msg.id, 97 | true, 98 | true, 99 | IdentityAnswer { 100 | peer: self_peer, 101 | chall_signature: res.1, 102 | }, 103 | ) 104 | .await 105 | } 106 | 107 | Query::HardwareFingerprint() => { 108 | let key = verifying_key.lock().await; 109 | if !key.is_empty() { 110 | if key.eq(&peer.verifying_key) { 111 | peer.send(msg.id, true, true, fingerprint.clone()).await?; 112 | } else { 113 | return Err(crate::Error::SecurityViolation(format!( 114 | "Query::HardwareFingerprint Peer with key {} is trying to get your hardware fingerprint", 115 | base64_encode(&key) 116 | ))); 117 | } 118 | } 119 | Ok(()) 120 | } 121 | 122 | Query::RoomList => { 123 | let key = verifying_key.lock().await; 124 | 125 | if !key.is_empty() && conn_ready.load(Ordering::Relaxed) { 126 | let init_rooms = peer.allowed_room.is_empty(); 127 | 128 | let mut res_reply = peer.db.get_rooms_for_peer(key.clone()).await; 129 | while let Some(rooms) = res_reply.recv().await { 130 | match rooms { 131 | Ok(room_list) => { 132 | if init_rooms { 133 | for room in &room_list { 134 | peer.allowed_room.insert(*room); 135 | } 136 | } 137 | peer.send(msg.id, true, false, room_list).await?; 138 | } 139 | Err(_e) => { 140 | #[cfg(feature = "log")] 141 | error!("Query::RoomList, Error: {_e}"); 142 | peer.send( 143 | msg.id, 144 | false, 145 | true, 146 | Error::RemoteTechnical("Query::RoomList".to_string()), 147 | ) 148 | .await?; 149 | } 150 | } 151 | } 152 | peer.send(msg.id, true, true, "").await?; 153 | } 154 | Ok(()) 155 | } 156 | 157 | Query::RoomDefinition(room_id) => { 158 | if peer.allowed_room.contains(&room_id) { 159 | let res = peer.db.get_room_definition(room_id).await; 160 | match res { 161 | Ok(definition) => peer.send(msg.id, true, true, definition).await?, 162 | Err(_e) => { 163 | #[cfg(feature = "log")] 164 | error!("Query::RoomDefinition, Error: {_e}"); 165 | peer.send( 166 | msg.id, 167 | false, 168 | true, 169 | Error::RemoteTechnical("Query::RoomDefinition".to_string()), 170 | ) 171 | .await?; 172 | } 173 | } 174 | } else { 175 | peer.send( 176 | msg.id, 177 | false, 178 | true, 179 | Error::Authorisation("Query::RoomDefinition".to_string()), 180 | ) 181 | .await?; 182 | } 183 | Ok(()) 184 | } 185 | 186 | Query::RoomNode(room_id) => { 187 | if peer.allowed_room.contains(&room_id) { 188 | let res = peer.db.get_room_node(room_id).await; 189 | match res { 190 | Ok(definition) => peer.send(msg.id, true, true, definition).await?, 191 | Err(_e) => { 192 | #[cfg(feature = "log")] 193 | error!("Query::RoomNode, Error: {_e}"); 194 | peer.send( 195 | msg.id, 196 | false, 197 | true, 198 | Error::RemoteTechnical("Query::RoomNode".to_string()), 199 | ) 200 | .await?; 201 | } 202 | } 203 | } else { 204 | peer.send( 205 | msg.id, 206 | false, 207 | true, 208 | Error::Authorisation("Query::RoomNode".to_string()), 209 | ) 210 | .await?; 211 | } 212 | 213 | Ok(()) 214 | } 215 | 216 | Query::RoomLog(room_id) => { 217 | if peer.allowed_room.contains(&room_id) { 218 | let mut res_reply = peer.db.get_room_log(room_id).await; 219 | while let Some(res) = res_reply.recv().await { 220 | match res { 221 | Ok(log) => peer.send(msg.id, true, false, log).await?, 222 | Err(_e) => { 223 | #[cfg(feature = "log")] 224 | error!("Query::RoomLog, Error: {_e}"); 225 | peer.send( 226 | msg.id, 227 | false, 228 | true, 229 | Error::RemoteTechnical("Query::RoomLog".to_string()), 230 | ) 231 | .await? 232 | } 233 | } 234 | } 235 | peer.send(msg.id, true, true, "").await?; 236 | } else { 237 | peer.send( 238 | msg.id, 239 | false, 240 | true, 241 | Error::Authorisation("Query::RoomLog".to_string()), 242 | ) 243 | .await? 244 | } 245 | 246 | Ok(()) 247 | } 248 | 249 | Query::RoomLogAt(room_id, date) => { 250 | if peer.allowed_room.contains(&room_id) { 251 | let res = peer.db.get_room_log_at(room_id, date).await; 252 | match res { 253 | Ok(log) => peer.send(msg.id, true, true, log).await?, 254 | Err(_e) => { 255 | #[cfg(feature = "log")] 256 | error!("Query::RoomLog, Error: {_e}"); 257 | peer.send( 258 | msg.id, 259 | false, 260 | true, 261 | Error::RemoteTechnical("Query::RoomLog".to_string()), 262 | ) 263 | .await? 264 | } 265 | } 266 | } else { 267 | peer.send( 268 | msg.id, 269 | false, 270 | true, 271 | Error::Authorisation("Query::RoomLog".to_string()), 272 | ) 273 | .await? 274 | } 275 | 276 | Ok(()) 277 | } 278 | 279 | Query::RoomDailyNodes(room_id, entity, date) => { 280 | if peer.allowed_room.contains(&room_id) { 281 | let mut res_reply = peer.db.get_room_daily_nodes(room_id, entity, date).await; 282 | while let Some(res) = res_reply.recv().await { 283 | match res { 284 | Ok(log) => peer.send(msg.id, true, false, log).await?, 285 | Err(_e) => { 286 | #[cfg(feature = "log")] 287 | error!("Query::RoomDailyNodes, Error: {_e}"); 288 | peer.send( 289 | msg.id, 290 | false, 291 | true, 292 | Error::RemoteTechnical("Query::RoomDailyNodes".to_string()), 293 | ) 294 | .await? 295 | } 296 | } 297 | } 298 | peer.send(msg.id, true, true, "").await?; 299 | } else { 300 | peer.send( 301 | msg.id, 302 | false, 303 | true, 304 | Error::Authorisation("Query::RoomDailyNodes".to_string()), 305 | ) 306 | .await? 307 | } 308 | Ok(()) 309 | } 310 | 311 | Query::Nodes(room_id, node_ids) => { 312 | if peer.allowed_room.contains(&room_id) { 313 | let mut res_reply = peer.db.get_nodes(room_id, node_ids).await; 314 | while let Some(res) = res_reply.recv().await { 315 | match res { 316 | Ok(log) => peer.send(msg.id, true, false, log).await?, 317 | Err(_e) => { 318 | #[cfg(feature = "log")] 319 | error!("Query::Nodes, Error: {_e}"); 320 | peer.send( 321 | msg.id, 322 | false, 323 | true, 324 | Error::RemoteTechnical("Query::Nodes".to_string()), 325 | ) 326 | .await? 327 | } 328 | } 329 | } 330 | peer.send(msg.id, true, true, "").await?; 331 | } else { 332 | peer.send( 333 | msg.id, 334 | false, 335 | true, 336 | Error::Authorisation("Query::Nodes".to_string()), 337 | ) 338 | .await? 339 | } 340 | Ok(()) 341 | } 342 | 343 | Query::Edges(room_id, nodes) => { 344 | if peer.allowed_room.contains(&room_id) { 345 | let mut res_reply = peer.db.get_edges(room_id, nodes).await; 346 | while let Some(res) = res_reply.recv().await { 347 | match res { 348 | Ok(log) => peer.send(msg.id, true, false, log).await?, 349 | Err(_e) => { 350 | #[cfg(feature = "log")] 351 | error!("Query::Edges, Error: {_e}"); 352 | peer.send( 353 | msg.id, 354 | false, 355 | true, 356 | Error::RemoteTechnical("Query::Edges".to_string()), 357 | ) 358 | .await? 359 | } 360 | } 361 | } 362 | peer.send(msg.id, true, true, "").await?; 363 | } else { 364 | peer.send( 365 | msg.id, 366 | false, 367 | true, 368 | Error::Authorisation("Query::Edges".to_string()), 369 | ) 370 | .await? 371 | } 372 | Ok(()) 373 | } 374 | 375 | Query::EdgeDeletionLog(room_id, entity, date) => { 376 | if peer.allowed_room.contains(&room_id) { 377 | let mut res_reply = peer 378 | .db 379 | .get_room_edge_deletion_log(room_id, entity, date) 380 | .await; 381 | 382 | while let Some(res) = res_reply.recv().await { 383 | match res { 384 | Ok(log) => peer.send(msg.id, true, false, log).await?, 385 | Err(_e) => { 386 | #[cfg(feature = "log")] 387 | error!("Query::EdgeDeletionLog, Error: {_e}"); 388 | peer.send( 389 | msg.id, 390 | true, 391 | false, 392 | Error::RemoteTechnical("Query::EdgeDeletionLog".to_string()), 393 | ) 394 | .await? 395 | } 396 | } 397 | } 398 | peer.send(msg.id, true, true, "").await?; 399 | } else { 400 | peer.send( 401 | msg.id, 402 | false, 403 | true, 404 | Error::Authorisation("Query::EdgeDeletionLog".to_string()), 405 | ) 406 | .await? 407 | } 408 | Ok(()) 409 | } 410 | Query::NodeDeletionLog(room_id, entity, date) => { 411 | if peer.allowed_room.contains(&room_id) { 412 | let mut res_reply = peer 413 | .db 414 | .get_room_node_deletion_log(room_id, entity, date) 415 | .await; 416 | while let Some(res) = res_reply.recv().await { 417 | match res { 418 | Ok(log) => peer.send(msg.id, true, false, log).await?, 419 | Err(_e) => { 420 | #[cfg(feature = "log")] 421 | error!("Query::NodeDeletionLog, Error: {_e}"); 422 | peer.send( 423 | msg.id, 424 | false, 425 | true, 426 | Error::RemoteTechnical("Query::NodeDeletionLog".to_string()), 427 | ) 428 | .await? 429 | } 430 | } 431 | } 432 | peer.send(msg.id, true, true, "").await?; 433 | } else { 434 | peer.send( 435 | msg.id, 436 | false, 437 | true, 438 | Error::Authorisation("Query::NodeDeletionLog".to_string()), 439 | ) 440 | .await? 441 | } 442 | Ok(()) 443 | } 444 | 445 | Query::PeersForRoom(room_id) => { 446 | if peer.allowed_room.contains(&room_id) { 447 | let mut res_reply = peer.db.peers_for_room(room_id).await; 448 | 449 | while let Some(res) = res_reply.recv().await { 450 | match res { 451 | Ok(log) => peer.send(msg.id, true, false, log).await?, 452 | Err(_e) => { 453 | #[cfg(feature = "log")] 454 | error!("Query::PeerNodes, Error: {_e}"); 455 | peer.send( 456 | msg.id, 457 | false, 458 | true, 459 | Error::RemoteTechnical("Query::PeerNodes".to_string()), 460 | ) 461 | .await? 462 | } 463 | } 464 | } 465 | peer.send(msg.id, true, true, "").await?; 466 | } else { 467 | peer.send( 468 | msg.id, 469 | false, 470 | true, 471 | Error::Authorisation("Query::PeerNodes".to_string()), 472 | ) 473 | .await? 474 | } 475 | Ok(()) 476 | } 477 | } 478 | } 479 | pub fn add_allowed_room(&self, room: Uid) { 480 | let _ = self.room_sender.send(room); 481 | } 482 | } 483 | 484 | pub struct RemotePeerHandle { 485 | pub allowed_room: HashSet, 486 | pub db: GraphDatabaseService, 487 | pub verifying_key: Vec, 488 | pub reply: mpsc::Sender, 489 | } 490 | impl RemotePeerHandle { 491 | fn add_allowed_room(&mut self, room: Uid) { 492 | self.allowed_room.insert(room); 493 | } 494 | 495 | async fn send( 496 | &self, 497 | id: u64, 498 | success: bool, 499 | complete: bool, 500 | msg: T, 501 | ) -> Result<(), crate::Error> { 502 | let serialized = bincode::serialize(&msg)?; 503 | let answer = Answer { 504 | id, 505 | success, 506 | complete, 507 | serialized, 508 | }; 509 | self.reply 510 | .send(answer) 511 | .await 512 | .map_err(|e| crate::Error::SendError(e.to_string())) 513 | } 514 | } 515 | -------------------------------------------------------------------------------- /src/synchronisation/room_locking_service.rs: -------------------------------------------------------------------------------- 1 | use std::collections::{HashMap, HashSet, VecDeque}; 2 | 3 | use tokio::sync::mpsc; 4 | 5 | use crate::security::Uid; 6 | 7 | pub enum SyncLockMessage { 8 | RequestLock([u8; 32], VecDeque, mpsc::UnboundedSender), 9 | Unlock(Uid), 10 | } 11 | 12 | struct PeerLockRequest { 13 | rooms: VecDeque, 14 | reply: mpsc::UnboundedSender, 15 | } 16 | 17 | static LOCK_CHANNEL_SIZE: usize = 2; 18 | /// 19 | /// peer trying to synchronize room must first acquire a lock on the room to avoid having several peers trying to synchronize the same room at the same time 20 | /// also limits the maximum number of rooms that can be synchronized at the same time. 21 | /// 22 | #[derive(Clone)] 23 | pub struct RoomLockService { 24 | sender: mpsc::Sender, 25 | } 26 | impl RoomLockService { 27 | pub fn start(max_lock: usize) -> Self { 28 | let (sender, mut receiver) = mpsc::channel::(LOCK_CHANNEL_SIZE); 29 | tokio::spawn(async move { 30 | let mut peer_lock_request: HashMap<[u8; 32], PeerLockRequest> = HashMap::new(); 31 | let mut peer_queue: VecDeque<[u8; 32]> = VecDeque::new(); 32 | let mut locked: HashSet = HashSet::new(); 33 | let mut avalaible = max_lock; 34 | 35 | while let Some(msg) = receiver.recv().await { 36 | match msg { 37 | SyncLockMessage::RequestLock(circuit, rooms, reply) => { 38 | if let Some(lock_request) = peer_lock_request.get_mut(&circuit) { 39 | lock_request.reply = reply; 40 | for room in rooms { 41 | if !lock_request.rooms.iter().any(|e| room.eq(e)) { 42 | lock_request.rooms.push_back(room); //"hot" rooms are updated first 43 | } 44 | } 45 | } else { 46 | peer_lock_request.insert(circuit, PeerLockRequest { reply, rooms }); 47 | peer_queue.push_front(circuit); 48 | } 49 | let avail_iter = avalaible; 50 | for _ in 0..avail_iter { 51 | Self::acquire_lock( 52 | &mut peer_lock_request, 53 | &mut peer_queue, 54 | &mut locked, 55 | &mut avalaible, 56 | ) 57 | .await; 58 | } 59 | } 60 | SyncLockMessage::Unlock(room) => { 61 | if locked.remove(&room) { 62 | avalaible += 1; 63 | Self::acquire_lock( 64 | &mut peer_lock_request, 65 | &mut peer_queue, 66 | &mut locked, 67 | &mut avalaible, 68 | ) 69 | .await; 70 | } 71 | } 72 | } 73 | } 74 | }); 75 | Self { sender } 76 | } 77 | 78 | async fn acquire_lock( 79 | peer_lock_request: &mut HashMap<[u8; 32], PeerLockRequest>, 80 | peer_queue: &mut VecDeque<[u8; 32]>, 81 | locked: &mut HashSet, 82 | avalaible: &mut usize, 83 | ) { 84 | for _ in 0..peer_queue.len() { 85 | if let Some(peer) = peer_queue.pop_back() { 86 | if let Some(mut lock_request) = peer_lock_request.remove(&peer) { 87 | let mut lock_aquired = false; 88 | for _ in 0..lock_request.rooms.len() { 89 | if let Some(room) = lock_request.rooms.pop_back() { 90 | if locked.contains(&room) { 91 | lock_request.rooms.push_front(room); 92 | } else if lock_request.reply.send(room).is_ok() { 93 | locked.insert(room); 94 | *avalaible -= 1; 95 | lock_aquired = true; 96 | break; 97 | } 98 | } 99 | } 100 | if !lock_request.rooms.is_empty() { 101 | peer_lock_request.insert(peer, lock_request); 102 | peer_queue.push_front(peer); 103 | } 104 | if lock_aquired { 105 | break; 106 | } 107 | } 108 | } 109 | } 110 | } 111 | 112 | pub async fn request_locks( 113 | &self, 114 | circuit_id: [u8; 32], 115 | rooms: VecDeque, 116 | reply: mpsc::UnboundedSender, 117 | ) { 118 | let _ = self 119 | .sender 120 | .send(SyncLockMessage::RequestLock(circuit_id, rooms, reply)) 121 | .await; 122 | } 123 | 124 | pub async fn unlock(&self, room: Uid) { 125 | let _ = self.sender.send(SyncLockMessage::Unlock(room)).await; 126 | } 127 | } 128 | #[cfg(test)] 129 | mod tests { 130 | 131 | use super::*; 132 | use crate::security::{base64_encode, new_uid, random32}; 133 | 134 | #[tokio::test(flavor = "multi_thread")] 135 | async fn one_room_one_peer() { 136 | let lock_service = RoomLockService::start(1); 137 | 138 | let peer_id = random32(); 139 | 140 | let rooms: VecDeque = vec![new_uid()].into(); 141 | let (sender, mut receiver) = mpsc::unbounded_channel::(); 142 | 143 | lock_service 144 | .request_locks(peer_id.clone(), rooms.clone(), sender.clone()) 145 | .await; 146 | let room = receiver.recv().await.unwrap(); 147 | 148 | lock_service.unlock(room).await; 149 | 150 | lock_service 151 | .request_locks(peer_id.clone(), rooms, sender.clone()) 152 | .await; 153 | 154 | let room = receiver.recv().await.unwrap(); 155 | 156 | lock_service.unlock(room).await; 157 | } 158 | 159 | #[tokio::test(flavor = "multi_thread")] 160 | async fn some_rooms_some_peers() { 161 | let num_entries = 32; 162 | let lock_service = RoomLockService::start(num_entries); 163 | let mut rooms = VecDeque::new(); 164 | 165 | for _ in 0..num_entries { 166 | rooms.push_back(new_uid()); 167 | } 168 | 169 | let mut tasks = Vec::with_capacity(num_entries); 170 | for _ in 0..num_entries { 171 | let service = lock_service.clone(); 172 | let local_rooms = rooms.clone(); 173 | let peer = random32(); 174 | tasks.push(tokio::spawn(async move { 175 | let (sender, mut receiver) = mpsc::unbounded_channel::(); 176 | service 177 | .clone() 178 | .request_locks(peer.clone(), local_rooms, sender.clone()) 179 | .await; 180 | for _ in 0..num_entries { 181 | let room = receiver.recv().await.unwrap(); 182 | service.unlock(room).await; 183 | } 184 | format!("---------peer {} finished", base64_encode(&peer)) 185 | })); 186 | } 187 | for task in tasks { 188 | task.await.unwrap(); 189 | } 190 | } 191 | } 192 | -------------------------------------------------------------------------------- /tests/mutations.rs: -------------------------------------------------------------------------------- 1 | use std::path::PathBuf; 2 | 3 | use discret::{Configuration, Discret, Parameters, ParametersAdd, ResultParser}; 4 | use rand::{rngs::OsRng, RngCore}; 5 | 6 | use serde::Deserialize; 7 | const DATA_PATH: &str = "test_data/tests/"; 8 | pub fn random32() -> [u8; 32] { 9 | let mut random: [u8; 32] = [0; 32]; 10 | 11 | OsRng.fill_bytes(&mut random); 12 | random 13 | } 14 | #[tokio::test(flavor = "multi_thread")] 15 | async fn minimal() { 16 | let datamodel = "{ 17 | Greetings{ 18 | message:String 19 | } 20 | }"; 21 | 22 | let mut key_material: [u8; 32] = [0; 32]; 23 | OsRng.fill_bytes(&mut key_material); 24 | //let key_material = derive_pass_phrase("me", "my passphrase"); 25 | 26 | let data_folder: PathBuf = DATA_PATH.into(); 27 | let app = Discret::new( 28 | datamodel, 29 | "myappkey", //this key should be unique to your application and must never change once in production 30 | &key_material, 31 | data_folder, 32 | Configuration::default(), 33 | ) 34 | .await 35 | .unwrap(); 36 | 37 | let mut_result = app 38 | .mutate( 39 | r#"mutate { 40 | result: Greetings{ 41 | message: "Hello World" 42 | } 43 | }"#, 44 | None, 45 | ) 46 | .await 47 | .unwrap(); 48 | 49 | #[derive(Deserialize)] 50 | struct Id { 51 | id: String, 52 | } 53 | let mut parser = ResultParser::new(&mut_result).unwrap(); 54 | let ids: Id = parser.take_object("result").unwrap(); 55 | let id = ids.id; 56 | 57 | let mut params = Parameters::new(); 58 | params.add("id", id.clone()).unwrap(); 59 | let result = app 60 | .query( 61 | "query { 62 | Greetings(id=$id){ 63 | message 64 | } 65 | }", 66 | Some(params), 67 | ) 68 | .await 69 | .unwrap(); 70 | 71 | //println!("{:#?}", result); 72 | assert_eq!( 73 | result, 74 | "{\n\"Greetings\":[{\"message\":\"Hello World\"}]\n}" 75 | ) 76 | } 77 | 78 | #[tokio::test(flavor = "multi_thread")] 79 | async fn batch_insert() { 80 | let datamodel = "{ 81 | Greetings{ 82 | message:String 83 | } 84 | }"; 85 | 86 | let mut key_material: [u8; 32] = [0; 32]; 87 | OsRng.fill_bytes(&mut key_material); 88 | //let key_material = derive_pass_phrase("me", "my passphrase"); 89 | 90 | let data_folder: PathBuf = DATA_PATH.into(); 91 | let app = Discret::new( 92 | datamodel, 93 | "myappkey", //this key should be unique to your application and must never change once in production 94 | &key_material, 95 | data_folder, 96 | Configuration::default(), 97 | ) 98 | .await 99 | .unwrap(); 100 | 101 | let query = r#"mutate { 102 | result: Greetings{ 103 | message: $message 104 | } 105 | }"#; 106 | 107 | let num_message = 100; 108 | 109 | let (sender, mut receiver) = app.mutation_stream(); 110 | 111 | let result_task = tokio::spawn(async move { 112 | while let Some(msg) = receiver.recv().await { 113 | if let Err(e) = msg { 114 | println!("{}", e); 115 | } 116 | } 117 | }); 118 | 119 | for i in 0..num_message { 120 | let mut param = Parameters::new(); 121 | let _ = param.add("message", format!("hello world {}", i)); 122 | let _ = sender.send((query.to_string(), Some(param))).await; 123 | } 124 | 125 | // tokio::time::sleep(Duration::from_millis(2000)).await; 126 | 127 | drop(sender); 128 | result_task.await.unwrap(); 129 | 130 | let result = app 131 | .query( 132 | "query { 133 | Greetings (order_by(message asc)){ 134 | message 135 | } 136 | }", 137 | None, 138 | ) 139 | .await 140 | .unwrap(); 141 | #[derive(Deserialize)] 142 | struct Messages { 143 | pub message: String, 144 | } 145 | let mut parser = ResultParser::new(&result).unwrap(); 146 | let msg: Vec = parser.take_array("Greetings").unwrap(); 147 | assert_eq!(msg.len(), num_message); 148 | assert_eq!(&msg[0].message, "hello world 0"); 149 | } 150 | --------------------------------------------------------------------------------