├── .envrc ├── .gitignore ├── .rustfmt.toml ├── Cargo.toml ├── LICENSE ├── README.md ├── automerge-persistent-fs ├── Cargo.toml └── src │ └── lib.rs ├── automerge-persistent-localstorage ├── Cargo.toml └── src │ └── lib.rs ├── automerge-persistent-sled ├── Cargo.toml ├── benches │ └── save.rs └── src │ └── lib.rs ├── automerge-persistent ├── Cargo.toml └── src │ ├── autocommit.rs │ ├── lib.rs │ ├── mem.rs │ └── persister.rs ├── flake.lock └── flake.nix /.envrc: -------------------------------------------------------------------------------- 1 | use flake 2 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .direnv/ 2 | target 3 | Cargo.lock 4 | -------------------------------------------------------------------------------- /.rustfmt.toml: -------------------------------------------------------------------------------- 1 | group_imports = "StdExternalCrate" 2 | imports_granularity = "Crate" 3 | format_code_in_doc_comments = true 4 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [workspace] 2 | members = [ 3 | "automerge-persistent", 4 | "automerge-persistent-sled", 5 | "automerge-persistent-localstorage", 6 | "automerge-persistent-fs", 7 | ] 8 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2021 Andrew Jeffery 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Automerge persistent 2 | 3 | This project extends [automerge-rs](https://github.com/automerge/automerge-rs) 4 | with some persistence. There is a core trait for what functionality a persister 5 | should have and a backend wrapper struct to utilise this. 6 | 7 | For now, see the benches for an example of a 8 | [sled](https://github.com/spacejam/sled) backend. Adding more backends to this 9 | repo would be very much appreciated. 10 | 11 | Good backends to have would be: 12 | 13 | - [x] memory (for some testing scenarios) 14 | - [x] sled 15 | - [x] localstorage 16 | - [ ] indexeddb 17 | - [x] filesystem 18 | - other suggestions welcome! 19 | 20 | ## Usage 21 | 22 | The `PersistentBackend` struct should be the main point of reference and should 23 | fit in place of a normal `Backend`. It can load from the persistent storage and 24 | automatically saves on the appropriate actions (but more efficiently). 25 | Occasionally the user should schedule a call to `compact` if storage and load 26 | time are of concern. This gathers the changes and saves the backend in the more 27 | compressed form, then the old changes are removed. 28 | -------------------------------------------------------------------------------- /automerge-persistent-fs/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "automerge-persistent-fs" 3 | version = "0.4.0" 4 | authors = ["Andrew Jeffery "] 5 | edition = "2018" 6 | license = "MIT" 7 | repository = "https://github.com/jeffa5/automerge-persistent" 8 | description = "A file system adapter for persisting Automerge documents" 9 | 10 | [dependencies] 11 | automerge = { git = "https://github.com/jeffa5/automerge", branch = "cmp-heads" } 12 | automerge-persistent = { path = "../automerge-persistent", version = "0.4.0" } 13 | futures = { version = "0.3", optional = true } 14 | hex = "0.4.3" 15 | thiserror = "1.0.24" 16 | tokio = { version = "1", features = ["fs"], optional = true } 17 | 18 | [features] 19 | async = ["futures", "tokio"] 20 | -------------------------------------------------------------------------------- /automerge-persistent-fs/src/lib.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | collections::HashMap, 3 | fs, 4 | os::unix::prelude::OsStrExt, 5 | path::{Path, PathBuf}, 6 | }; 7 | 8 | use automerge::ActorId; 9 | use automerge_persistent::{Persister, StoredSizes}; 10 | #[cfg(feature = "async")] 11 | use futures::{Future, FutureExt, TryStreamExt}; 12 | use hex::FromHexError; 13 | 14 | #[derive(Debug)] 15 | pub struct FsPersister { 16 | changes_path: PathBuf, 17 | doc_path: PathBuf, 18 | sync_states_path: PathBuf, 19 | cache: FsPersisterCache, 20 | sizes: StoredSizes, 21 | } 22 | 23 | #[derive(Debug)] 24 | pub struct FsPersisterCache { 25 | changes: HashMap<(ActorId, u64), Vec>, 26 | document: Option>, 27 | sync_states: HashMap, Vec>, 28 | } 29 | 30 | impl FsPersisterCache { 31 | fn flush_changes(&mut self, changes_path: PathBuf) -> Result { 32 | let mut flushed = 0; 33 | for ((a, s), c) in self.changes.drain() { 34 | fs::write(make_changes_path(&changes_path, &a, s), &c)?; 35 | flushed += c.len(); 36 | } 37 | Ok(flushed) 38 | } 39 | 40 | fn flush_document(&mut self, doc_path: PathBuf) -> Result { 41 | let mut flushed = 0; 42 | if let Some(data) = self.document.take() { 43 | fs::write(&doc_path, &data)?; 44 | flushed = data.len(); 45 | } 46 | Ok(flushed) 47 | } 48 | 49 | fn flush_sync_states(&mut self, sync_states_path: PathBuf) -> Result { 50 | let mut flushed = 0; 51 | for (peer_id, sync_state) in self.sync_states.drain() { 52 | fs::write(make_peer_path(&sync_states_path, &peer_id), &sync_state)?; 53 | flushed += sync_state.len(); 54 | } 55 | Ok(flushed) 56 | } 57 | 58 | #[cfg(feature = "async")] 59 | async fn flush_changes_async( 60 | &mut self, 61 | changes_path: PathBuf, 62 | ) -> Result { 63 | let futs = futures::stream::FuturesUnordered::new(); 64 | for ((a, s), c) in self.changes.drain() { 65 | let len = c.len(); 66 | futs.push( 67 | tokio::fs::write(make_changes_path(&changes_path, &a, s), c).map(move |_| Ok(len)), 68 | ); 69 | } 70 | let res: Result, std::io::Error> = futs.try_collect().await; 71 | Ok(res?.iter().sum()) 72 | } 73 | 74 | #[cfg(feature = "async")] 75 | async fn flush_document_async(&mut self, doc_path: PathBuf) -> Result { 76 | let mut flushed = 0; 77 | if let Some(data) = self.document.take() { 78 | tokio::fs::write(&doc_path, &data).await?; 79 | flushed = data.len(); 80 | } 81 | Ok(flushed) 82 | } 83 | 84 | #[cfg(feature = "async")] 85 | async fn flush_sync_states_async( 86 | &mut self, 87 | sync_states_path: PathBuf, 88 | ) -> Result { 89 | let futs = futures::stream::FuturesUnordered::new(); 90 | for (peer_id, sync_state) in self.sync_states.drain() { 91 | let len = sync_state.len(); 92 | futs.push( 93 | tokio::fs::write(make_peer_path(&sync_states_path, &peer_id), sync_state) 94 | .map(move |_| Ok(len)), 95 | ); 96 | } 97 | let res: Result, std::io::Error> = futs.try_collect().await; 98 | Ok(res?.iter().sum()) 99 | } 100 | 101 | #[cfg(feature = "async")] 102 | pub async fn flush_async( 103 | &mut self, 104 | doc_path: PathBuf, 105 | changes_path: PathBuf, 106 | sync_states_path: PathBuf, 107 | ) -> Result { 108 | let mut flushed = 0; 109 | flushed += self.flush_document_async(doc_path).await?; 110 | flushed += self.flush_changes_async(changes_path).await?; 111 | flushed += self.flush_sync_states_async(sync_states_path).await?; 112 | Ok(flushed) 113 | } 114 | 115 | pub fn flush( 116 | &mut self, 117 | doc_path: PathBuf, 118 | changes_path: PathBuf, 119 | sync_states_path: PathBuf, 120 | ) -> Result { 121 | let mut flushed = 0; 122 | flushed += self.flush_document(doc_path)?; 123 | flushed += self.flush_changes(changes_path)?; 124 | flushed += self.flush_sync_states(sync_states_path)?; 125 | Ok(flushed) 126 | } 127 | 128 | fn drain_clone(&mut self) -> Self { 129 | Self { 130 | changes: self.changes.drain().collect(), 131 | document: self.document.take(), 132 | sync_states: self.sync_states.drain().collect(), 133 | } 134 | } 135 | } 136 | 137 | /// Possible errors from persisting. 138 | #[derive(Debug, thiserror::Error)] 139 | pub enum FsPersisterError { 140 | #[error(transparent)] 141 | Io(#[from] std::io::Error), 142 | #[error(transparent)] 143 | Hex(#[from] FromHexError), 144 | } 145 | 146 | const CHANGES_DIR: &str = "changes"; 147 | const DOC_FILE: &str = "doc"; 148 | const SYNC_DIR: &str = "sync"; 149 | 150 | impl FsPersister { 151 | pub fn new, P: AsRef>( 152 | root: R, 153 | prefix: P, 154 | ) -> Result { 155 | let root_path = root.as_ref().join(&prefix); 156 | fs::create_dir_all(&root_path)?; 157 | 158 | let changes_path = root_path.join(CHANGES_DIR); 159 | if fs::metadata(&changes_path).is_err() { 160 | fs::create_dir(&changes_path)?; 161 | } 162 | 163 | let doc_path = root_path.join(DOC_FILE); 164 | 165 | let sync_states_path = root_path.join(SYNC_DIR); 166 | if fs::metadata(&sync_states_path).is_err() { 167 | fs::create_dir(&sync_states_path)?; 168 | } 169 | 170 | let mut s = Self { 171 | changes_path, 172 | doc_path, 173 | sync_states_path, 174 | cache: FsPersisterCache { 175 | changes: HashMap::new(), 176 | document: None, 177 | sync_states: HashMap::new(), 178 | }, 179 | sizes: StoredSizes::default(), 180 | }; 181 | 182 | s.sizes.changes = s.get_changes()?.iter().map(|v| v.len() as u64).sum(); 183 | s.sizes.document = s.get_document()?.unwrap_or_default().len() as u64; 184 | s.sizes.sync_states = s 185 | .get_peer_ids()? 186 | .iter() 187 | .map(|id| { 188 | s.get_sync_state(id) 189 | .map(|o| o.unwrap_or_default().len() as u64) 190 | }) 191 | .collect::, _>>()? 192 | .iter() 193 | .sum(); 194 | 195 | Ok(s) 196 | } 197 | 198 | #[cfg(feature = "async")] 199 | pub fn flush_cache_async(&mut self) -> impl Future> { 200 | let doc_path = self.doc_path.clone(); 201 | let changes_path = self.changes_path.clone(); 202 | let sync_states_path = self.sync_states_path.clone(); 203 | let mut cache = self.cache.drain_clone(); 204 | async move { 205 | cache 206 | .flush_async(doc_path, changes_path, sync_states_path) 207 | .await 208 | } 209 | } 210 | 211 | pub fn load, P: AsRef>( 212 | root: R, 213 | prefix: P, 214 | ) -> Result, FsPersisterError> { 215 | if !root.as_ref().join(&prefix).exists() { 216 | return Ok(None); 217 | } 218 | let doc = Self::new(root, prefix)?; 219 | Ok(Some(doc)) 220 | } 221 | } 222 | 223 | fn make_changes_path>(changes_path: P, actor_id: &ActorId, seq: u64) -> PathBuf { 224 | changes_path 225 | .as_ref() 226 | .join(format!("{}-{}", actor_id.to_hex_string(), seq)) 227 | } 228 | 229 | fn make_peer_path>(sync_states_path: P, peer_id: &[u8]) -> PathBuf { 230 | sync_states_path.as_ref().join(hex::encode(peer_id)) 231 | } 232 | 233 | impl Persister for FsPersister { 234 | type Error = FsPersisterError; 235 | 236 | fn get_changes(&self) -> Result>, Self::Error> { 237 | fs::read_dir(&self.changes_path)? 238 | .filter_map(|entry| { 239 | if let Ok((Ok(file_type), path)) = 240 | entry.map(|entry| (entry.file_type(), entry.path())) 241 | { 242 | if file_type.is_file() { 243 | Some(fs::read(path).map_err(FsPersisterError::from)) 244 | } else { 245 | None 246 | } 247 | } else { 248 | None 249 | } 250 | }) 251 | .collect() 252 | } 253 | 254 | fn insert_changes(&mut self, changes: Vec<(ActorId, u64, Vec)>) -> Result<(), Self::Error> { 255 | for (a, s, c) in changes { 256 | self.sizes.changes += c.len() as u64; 257 | if let Some(old) = self.cache.changes.insert((a, s), c) { 258 | self.sizes.changes -= old.len() as u64; 259 | } 260 | } 261 | Ok(()) 262 | } 263 | 264 | fn remove_changes(&mut self, changes: Vec<(&ActorId, u64)>) -> Result<(), Self::Error> { 265 | for (a, s) in changes { 266 | if let Some(old) = self.cache.changes.remove(&(a.clone(), s)) { 267 | // not flushed yet 268 | self.sizes.changes -= old.len() as u64; 269 | continue; 270 | } 271 | 272 | let path = make_changes_path(&self.changes_path, a, s); 273 | if let Ok(meta) = fs::metadata(&path) { 274 | if meta.is_file() { 275 | fs::remove_file(&path)?; 276 | self.sizes.changes -= meta.len(); 277 | } 278 | } 279 | } 280 | Ok(()) 281 | } 282 | 283 | fn get_document(&self) -> Result>, Self::Error> { 284 | if let Some(ref doc) = self.cache.document { 285 | return Ok(Some(doc.clone())); 286 | } 287 | if fs::metadata(&self.doc_path).is_ok() { 288 | return Ok(fs::read(&self.doc_path).map(|v| if v.is_empty() { None } else { Some(v) })?); 289 | } 290 | Ok(None) 291 | } 292 | 293 | fn set_document(&mut self, data: Vec) -> Result<(), Self::Error> { 294 | self.sizes.document = data.len() as u64; 295 | self.cache.document = Some(data); 296 | Ok(()) 297 | } 298 | 299 | fn get_sync_state(&self, peer_id: &[u8]) -> Result>, Self::Error> { 300 | if let Some(sync_state) = self.cache.sync_states.get(peer_id) { 301 | return Ok(Some(sync_state.clone())); 302 | } 303 | let path = make_peer_path(&self.sync_states_path, peer_id); 304 | if fs::metadata(&path).is_ok() { 305 | return Ok(fs::read(&path).map(|v| if v.is_empty() { None } else { Some(v) })?); 306 | } 307 | Ok(None) 308 | } 309 | 310 | fn set_sync_state(&mut self, peer_id: Vec, sync_state: Vec) -> Result<(), Self::Error> { 311 | self.sizes.sync_states += sync_state.len() as u64; 312 | if let Some(old) = self.cache.sync_states.insert(peer_id, sync_state) { 313 | self.sizes.sync_states -= old.len() as u64; 314 | } 315 | Ok(()) 316 | } 317 | 318 | fn remove_sync_states(&mut self, peer_ids: &[&[u8]]) -> Result<(), Self::Error> { 319 | for peer_id in peer_ids { 320 | if let Some(old) = self.cache.sync_states.remove(*peer_id) { 321 | // not flushed yet 322 | self.sizes.sync_states -= old.len() as u64; 323 | continue; 324 | } 325 | let path = make_peer_path(&self.sync_states_path, peer_id); 326 | if let Ok(meta) = fs::metadata(&path) { 327 | if meta.is_file() { 328 | fs::remove_file(&path)?; 329 | self.sizes.sync_states -= meta.len(); 330 | } 331 | } 332 | } 333 | Ok(()) 334 | } 335 | 336 | fn get_peer_ids(&self) -> Result>, Self::Error> { 337 | fs::read_dir(&self.sync_states_path)? 338 | .filter_map(|entry| { 339 | if let Ok((Ok(file_type), path)) = 340 | entry.map(|entry| (entry.file_type(), entry.path())) 341 | { 342 | if file_type.is_file() { 343 | Some( 344 | hex::decode(path.file_name().unwrap().as_bytes()) 345 | .map_err(FsPersisterError::from), 346 | ) 347 | } else { 348 | None 349 | } 350 | } else { 351 | None 352 | } 353 | }) 354 | .collect() 355 | } 356 | 357 | fn sizes(&self) -> StoredSizes { 358 | self.sizes.clone() 359 | } 360 | 361 | fn flush(&mut self) -> Result { 362 | self.cache 363 | .drain_clone() 364 | .flush( 365 | self.doc_path.clone(), 366 | self.changes_path.clone(), 367 | self.sync_states_path.clone(), 368 | ) 369 | .map_err(FsPersisterError::from) 370 | } 371 | } 372 | -------------------------------------------------------------------------------- /automerge-persistent-localstorage/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "automerge-persistent-localstorage" 3 | version = "0.4.0" 4 | authors = ["Andrew Jeffery "] 5 | edition = "2018" 6 | license = "MIT" 7 | repository = "https://github.com/jeffa5/automerge-persistent" 8 | description = "A web-based localstorage adapter for persisting Automerge documents" 9 | 10 | [dependencies] 11 | automerge = { git = "https://github.com/jeffa5/automerge", branch = "cmp-heads" } 12 | automerge-persistent = { path = "../automerge-persistent", version = "0.4.0" } 13 | web-sys = { version = "0.3.50", features = ["Storage"] } 14 | serde = "1.0.125" 15 | serde_json = "1.0.64" 16 | thiserror = "1.0.24" 17 | wasm-bindgen = "0.2.73" 18 | base64 = "0.21.0" 19 | -------------------------------------------------------------------------------- /automerge-persistent-localstorage/src/lib.rs: -------------------------------------------------------------------------------- 1 | #![warn(missing_docs)] 2 | #![warn(missing_crate_level_docs)] 3 | #![warn(missing_doc_code_examples)] 4 | #![warn(clippy::pedantic)] 5 | #![warn(clippy::nursery)] 6 | 7 | //! A persister targetting `LocalStorage` in the browser. 8 | //! 9 | //! ```rust,no_run 10 | //! # use automerge_persistent_localstorage::{LocalStoragePersister, LocalStoragePersisterError}; 11 | //! # use automerge_persistent::PersistentAutomerge; 12 | //! # fn main() -> Result<(), LocalStoragePersisterError> { 13 | //! let storage = web_sys::window() 14 | //! .unwrap() 15 | //! .local_storage() 16 | //! .map_err(LocalStoragePersisterError::StorageError)? 17 | //! .unwrap(); 18 | //! 19 | //! let persister = LocalStoragePersister::new(storage, "document".to_owned(), "changes".to_owned(), "sync-states".to_owned())?; 20 | //! let doc = PersistentAutomerge::load(persister).unwrap(); 21 | //! # Ok(()) 22 | //! # } 23 | //! ``` 24 | 25 | use std::collections::HashMap; 26 | 27 | use automerge::ActorId; 28 | use automerge_persistent::{Persister, StoredSizes}; 29 | use base64::Engine; 30 | 31 | /// Persist changes and documents in to `LocalStorage`. 32 | /// 33 | /// While aimed at `LocalStorage`, it accepts any storage that conforms to the [`web_sys::Storage`] 34 | /// API. 35 | /// 36 | /// Since `LocalStorage` is limited we store changes in a JSON map in one key. 37 | #[derive(Debug)] 38 | pub struct LocalStoragePersister { 39 | storage: web_sys::Storage, 40 | changes: HashMap>, 41 | /// Base64 encoded peer_ids are used for the keys so they can be serialized to json. 42 | sync_states: HashMap>, 43 | document_key: String, 44 | changes_key: String, 45 | sync_states_key: String, 46 | sizes: StoredSizes, 47 | } 48 | 49 | /// Possible errors from persisting. 50 | #[derive(Debug, thiserror::Error)] 51 | pub enum LocalStoragePersisterError { 52 | /// Serde failure, converting the change/document into JSON. 53 | #[error(transparent)] 54 | SerdeError(#[from] serde_json::Error), 55 | /// An underlying storage error. 56 | #[error("storage error {0:?}")] 57 | StorageError(wasm_bindgen::JsValue), 58 | } 59 | 60 | impl LocalStoragePersister { 61 | /// Construct a new `LocalStoragePersister`. 62 | pub fn new( 63 | storage: web_sys::Storage, 64 | document_key: String, 65 | changes_key: String, 66 | sync_states_key: String, 67 | ) -> Result { 68 | let changes = if let Some(stored) = storage 69 | .get_item(&changes_key) 70 | .map_err(LocalStoragePersisterError::StorageError)? 71 | { 72 | serde_json::from_str(&stored)? 73 | } else { 74 | HashMap::new() 75 | }; 76 | let sync_states = if let Some(stored) = storage 77 | .get_item(&sync_states_key) 78 | .map_err(LocalStoragePersisterError::StorageError)? 79 | { 80 | serde_json::from_str(&stored)? 81 | } else { 82 | HashMap::new() 83 | }; 84 | let document = if let Some(doc_string) = storage 85 | .get_item(&document_key) 86 | .map_err(LocalStoragePersisterError::StorageError)? 87 | { 88 | let doc = serde_json::from_str::>(&doc_string)?; 89 | Some(doc) 90 | } else { 91 | None 92 | }; 93 | let sizes = StoredSizes { 94 | changes: changes.values().map(Vec::len).sum::() as u64, 95 | document: document.unwrap_or_default().len() as u64, 96 | sync_states: sync_states.values().map(Vec::len).sum::() as u64, 97 | }; 98 | Ok(Self { 99 | storage, 100 | changes, 101 | sync_states, 102 | document_key, 103 | changes_key, 104 | sync_states_key, 105 | sizes, 106 | }) 107 | } 108 | } 109 | 110 | impl Persister for LocalStoragePersister { 111 | type Error = LocalStoragePersisterError; 112 | 113 | fn get_changes(&self) -> Result>, Self::Error> { 114 | Ok(self.changes.values().cloned().collect()) 115 | } 116 | 117 | fn insert_changes(&mut self, changes: Vec<(ActorId, u64, Vec)>) -> Result<(), Self::Error> { 118 | for (a, s, c) in changes { 119 | let key = make_key(&a, s); 120 | 121 | self.sizes.changes += c.len() as u64; 122 | if let Some(old) = self.changes.insert(key, c) { 123 | self.sizes.changes -= old.len() as u64; 124 | } 125 | } 126 | self.storage 127 | .set_item(&self.changes_key, &serde_json::to_string(&self.changes)?) 128 | .map_err(LocalStoragePersisterError::StorageError)?; 129 | Ok(()) 130 | } 131 | 132 | fn remove_changes(&mut self, changes: Vec<(&ActorId, u64)>) -> Result<(), Self::Error> { 133 | let mut some_removal = false; 134 | for (a, s) in changes { 135 | let key = make_key(a, s); 136 | if let Some(old) = self.changes.remove(&key) { 137 | self.sizes.changes -= old.len() as u64; 138 | some_removal = true; 139 | } 140 | } 141 | 142 | if some_removal { 143 | let s = serde_json::to_string(&self.changes)?; 144 | self.storage 145 | .set_item(&self.changes_key, &s) 146 | .map_err(LocalStoragePersisterError::StorageError)?; 147 | } 148 | Ok(()) 149 | } 150 | 151 | fn get_document(&self) -> Result>, Self::Error> { 152 | if let Some(doc_string) = self 153 | .storage 154 | .get_item(&self.document_key) 155 | .map_err(LocalStoragePersisterError::StorageError)? 156 | { 157 | let doc = serde_json::from_str(&doc_string)?; 158 | Ok(Some(doc)) 159 | } else { 160 | Ok(None) 161 | } 162 | } 163 | 164 | fn set_document(&mut self, data: Vec) -> Result<(), Self::Error> { 165 | self.sizes.document = data.len() as u64; 166 | let data = serde_json::to_string(&data)?; 167 | self.storage 168 | .set_item(&self.document_key, &data) 169 | .map_err(LocalStoragePersisterError::StorageError)?; 170 | Ok(()) 171 | } 172 | 173 | fn get_sync_state(&self, peer_id: &[u8]) -> Result>, Self::Error> { 174 | let peer_id = base64::engine::general_purpose::STANDARD.encode(peer_id); 175 | Ok(self.sync_states.get(&peer_id).cloned()) 176 | } 177 | 178 | fn set_sync_state(&mut self, peer_id: Vec, sync_state: Vec) -> Result<(), Self::Error> { 179 | self.sizes.sync_states += sync_state.len() as u64; 180 | let peer_id = base64::engine::general_purpose::STANDARD.encode(peer_id); 181 | if let Some(old) = self.sync_states.insert(peer_id, sync_state) { 182 | self.sizes.sync_states -= old.len() as u64; 183 | } 184 | self.storage 185 | .set_item( 186 | &self.sync_states_key, 187 | &serde_json::to_string(&self.sync_states)?, 188 | ) 189 | .map_err(LocalStoragePersisterError::StorageError)?; 190 | Ok(()) 191 | } 192 | 193 | fn remove_sync_states(&mut self, peer_ids: &[&[u8]]) -> Result<(), Self::Error> { 194 | for peer_id in peer_ids { 195 | let peer_id = base64::engine::general_purpose::STANDARD.encode(peer_id); 196 | if let Some(old) = self.sync_states.remove(&peer_id) { 197 | self.sizes.sync_states -= old.len() as u64; 198 | } 199 | } 200 | self.storage 201 | .set_item( 202 | &self.sync_states_key, 203 | &serde_json::to_string(&self.sync_states)?, 204 | ) 205 | .map_err(LocalStoragePersisterError::StorageError)?; 206 | Ok(()) 207 | } 208 | 209 | fn get_peer_ids(&self) -> Result>, Self::Error> { 210 | Ok(self 211 | .sync_states 212 | .keys() 213 | .map(|key| { 214 | base64::engine::general_purpose::STANDARD 215 | .decode(key) 216 | .expect("Failed to base64 decode they peer_id") 217 | }) 218 | .collect()) 219 | } 220 | 221 | fn sizes(&self) -> StoredSizes { 222 | self.sizes.clone() 223 | } 224 | 225 | fn flush(&mut self) -> Result { 226 | Ok(0) 227 | } 228 | } 229 | 230 | /// Make a key from the `actor_id` and `sequence_number`. 231 | /// 232 | /// Converts the `actor_id` to a string and appends the `sequence_number`. 233 | fn make_key(actor_id: &ActorId, seq: u64) -> String { 234 | let mut key = actor_id.to_hex_string(); 235 | key.push_str(&seq.to_string()); 236 | key 237 | } 238 | -------------------------------------------------------------------------------- /automerge-persistent-sled/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "automerge-persistent-sled" 3 | version = "0.4.0" 4 | authors = ["Andrew Jeffery "] 5 | edition = "2018" 6 | license = "MIT" 7 | repository = "https://github.com/jeffa5/automerge-persistent" 8 | description = "A sled adapter for persisting Automerge documents" 9 | 10 | [dependencies] 11 | automerge = { git = "https://github.com/jeffa5/automerge", branch = "cmp-heads" } 12 | automerge-persistent = { path = "../automerge-persistent", version = "0.4.0" } 13 | sled = "0.34.6" 14 | thiserror = "1.0.24" 15 | 16 | [dev-dependencies] 17 | criterion = "0.4.0" 18 | 19 | [[bench]] 20 | name = "save" 21 | harness = false 22 | -------------------------------------------------------------------------------- /automerge-persistent-sled/benches/save.rs: -------------------------------------------------------------------------------- 1 | use automerge::{transaction::Transactable, ROOT}; 2 | use automerge_persistent::PersistentAutomerge; 3 | use criterion::{criterion_group, criterion_main, Criterion}; 4 | 5 | fn small_backend_apply_local_change(c: &mut Criterion) { 6 | c.bench_function("small backend apply local change", |b| { 7 | b.iter_batched( 8 | || { 9 | let db = sled::Config::new().temporary(true).open().unwrap(); 10 | let sled = automerge_persistent_sled::SledPersister::new( 11 | db.open_tree("changes").unwrap(), 12 | db.open_tree("document").unwrap(), 13 | db.open_tree("sync_states").unwrap(), 14 | "".to_owned(), 15 | ) 16 | .unwrap(); 17 | let mut doc: PersistentAutomerge = 18 | automerge_persistent::PersistentAutomerge::load(sled).unwrap(); 19 | doc.transact::<_, _, std::convert::Infallible>(|doc| { 20 | doc.put(ROOT, "a", "abcdef").unwrap(); 21 | Ok(()) 22 | }) 23 | .unwrap(); 24 | let change = doc.document().get_last_local_change().cloned(); 25 | 26 | (doc, change.unwrap()) 27 | }, 28 | |(mut persistent_doc, change)| { 29 | persistent_doc.document_mut().apply_changes(vec![change]) 30 | }, 31 | criterion::BatchSize::SmallInput, 32 | ) 33 | }); 34 | } 35 | 36 | fn small_backend_apply_local_change_flush(c: &mut Criterion) { 37 | c.bench_function("small backend apply local change flush", |b| { 38 | b.iter_batched( 39 | || { 40 | let db = sled::Config::new().temporary(true).open().unwrap(); 41 | let sled = automerge_persistent_sled::SledPersister::new( 42 | db.open_tree("changes").unwrap(), 43 | db.open_tree("document").unwrap(), 44 | db.open_tree("sync_states").unwrap(), 45 | "".to_owned(), 46 | ) 47 | .unwrap(); 48 | let mut doc: PersistentAutomerge = 49 | automerge_persistent::PersistentAutomerge::load(sled).unwrap(); 50 | doc.transact::<_, _, std::convert::Infallible>(|doc| { 51 | doc.put(ROOT, "a", "abcdef").unwrap(); 52 | Ok(()) 53 | }) 54 | .unwrap(); 55 | let change = doc.document().get_last_local_change().cloned(); 56 | 57 | (db, doc, change.unwrap()) 58 | }, 59 | |(db, mut persistent_doc, change)| { 60 | persistent_doc 61 | .document_mut() 62 | .apply_changes(vec![change]) 63 | .unwrap(); 64 | db.flush().unwrap() 65 | }, 66 | criterion::BatchSize::SmallInput, 67 | ) 68 | }); 69 | } 70 | 71 | fn small_backend_apply_changes(c: &mut Criterion) { 72 | c.bench_function("small backend apply changes", |b| { 73 | b.iter_batched( 74 | || { 75 | let db = sled::Config::new().temporary(true).open().unwrap(); 76 | let sled = automerge_persistent_sled::SledPersister::new( 77 | db.open_tree("changes").unwrap(), 78 | db.open_tree("document").unwrap(), 79 | db.open_tree("sync_states").unwrap(), 80 | "".to_owned(), 81 | ) 82 | .unwrap(); 83 | let other_backend = automerge::Automerge::new(); 84 | let mut doc: PersistentAutomerge = 85 | automerge_persistent::PersistentAutomerge::load(sled).unwrap(); 86 | doc.transact::<_, _, std::convert::Infallible>(|doc| { 87 | doc.put(ROOT, "a", "abcdef").unwrap(); 88 | Ok(()) 89 | }) 90 | .unwrap(); 91 | let changes = other_backend 92 | .get_changes(&[]) 93 | .unwrap() 94 | .into_iter() 95 | .cloned() 96 | .collect::>(); 97 | (doc, changes) 98 | }, 99 | |(mut persistent_doc, changes)| persistent_doc.document_mut().apply_changes(changes), 100 | criterion::BatchSize::SmallInput, 101 | ) 102 | }); 103 | } 104 | 105 | fn small_backend_compact(c: &mut Criterion) { 106 | c.bench_function("small backend compact", |b| { 107 | b.iter_batched( 108 | || { 109 | let db = sled::Config::new().temporary(true).open().unwrap(); 110 | let sled = automerge_persistent_sled::SledPersister::new( 111 | db.open_tree("changes").unwrap(), 112 | db.open_tree("document").unwrap(), 113 | db.open_tree("sync_states").unwrap(), 114 | "".to_owned(), 115 | ) 116 | .unwrap(); 117 | let mut doc: PersistentAutomerge = 118 | automerge_persistent::PersistentAutomerge::load(sled).unwrap(); 119 | doc.transact::<_, _, std::convert::Infallible>(|doc| { 120 | doc.put(ROOT, "a", "abcdef").unwrap(); 121 | Ok(()) 122 | }) 123 | .unwrap(); 124 | doc 125 | }, 126 | |mut persistent_doc| persistent_doc.compact(&[]), 127 | criterion::BatchSize::SmallInput, 128 | ) 129 | }); 130 | } 131 | 132 | criterion_group! { 133 | name = benches; 134 | config = Criterion::default().sample_size(50); 135 | targets = small_backend_apply_local_change, small_backend_apply_local_change_flush, small_backend_apply_changes, small_backend_compact 136 | } 137 | criterion_main!(benches); 138 | -------------------------------------------------------------------------------- /automerge-persistent-sled/src/lib.rs: -------------------------------------------------------------------------------- 1 | #![warn(missing_docs)] 2 | #![warn(missing_crate_level_docs)] 3 | #![warn(missing_doc_code_examples)] 4 | #![warn(clippy::pedantic)] 5 | #![warn(clippy::nursery)] 6 | 7 | //! A persister targetting [Sled](https://github.com/spacejam/sled). 8 | //! 9 | //! # Single persister 10 | //! 11 | //! ```rust 12 | //! # use automerge_persistent::PersistentAutomerge; 13 | //! # use automerge_persistent_sled::SledPersister; 14 | //! # use automerge_persistent_sled::SledPersisterError; 15 | //! # fn main() -> Result<(), SledPersisterError> { 16 | //! let db = sled::Config::new().temporary(true).open()?; 17 | //! let changes_tree = db.open_tree("changes")?; 18 | //! let documents_tree = db.open_tree("documents")?; 19 | //! let sync_states_tree = db.open_tree("sync-states")?; 20 | //! 21 | //! let persister = SledPersister::new(changes_tree, documents_tree, sync_states_tree, "")?; 22 | //! let doc = PersistentAutomerge::load(persister); 23 | //! # Ok(()) 24 | //! # } 25 | //! ``` 26 | //! 27 | //! # Multiple persisters sharing the same trees 28 | //! 29 | //! ```rust 30 | //! # use automerge_persistent::PersistentAutomerge; 31 | //! # use automerge_persistent_sled::SledPersister; 32 | //! # use automerge_persistent_sled::SledPersisterError; 33 | //! # fn main() -> Result<(), SledPersisterError> { 34 | //! let db = sled::Config::new().temporary(true).open()?; 35 | //! let changes_tree = db.open_tree("changes")?; 36 | //! let documents_tree = db.open_tree("documents")?; 37 | //! let sync_states_tree = db.open_tree("sync-states")?; 38 | //! 39 | //! let persister1 = SledPersister::new( 40 | //! changes_tree.clone(), 41 | //! documents_tree.clone(), 42 | //! sync_states_tree.clone(), 43 | //! "1", 44 | //! )?; 45 | //! let doc1 = PersistentAutomerge::load(persister1); 46 | //! 47 | //! let persister2 = SledPersister::new(changes_tree, documents_tree, sync_states_tree, "2")?; 48 | //! let doc2 = PersistentAutomerge::load(persister2); 49 | //! # Ok(()) 50 | //! # } 51 | //! ``` 52 | 53 | use automerge::ActorId; 54 | use automerge_persistent::{Persister, StoredSizes}; 55 | 56 | /// The persister that stores changes and documents in sled trees. 57 | /// 58 | /// Changes and documents are kept in separate trees. 59 | /// 60 | /// An optional prefix can be used in case multiple persisters may share the same trees. 61 | #[derive(Debug)] 62 | pub struct SledPersister { 63 | changes_tree: sled::Tree, 64 | document_tree: sled::Tree, 65 | sync_states_tree: sled::Tree, 66 | prefix: String, 67 | sizes: StoredSizes, 68 | } 69 | 70 | /// Possible errors from persisting. 71 | #[derive(Debug, thiserror::Error)] 72 | pub enum SledPersisterError { 73 | /// Internal errors from sled. 74 | #[error(transparent)] 75 | SledError(#[from] sled::Error), 76 | } 77 | 78 | impl SledPersister { 79 | /// Construct a new persister. 80 | pub fn new( 81 | changes_tree: sled::Tree, 82 | document_tree: sled::Tree, 83 | sync_states_tree: sled::Tree, 84 | prefix: S, 85 | ) -> Result 86 | where 87 | S: Into, 88 | { 89 | let prefix = prefix.into(); 90 | 91 | let mut s = Self { 92 | changes_tree, 93 | document_tree, 94 | sync_states_tree, 95 | prefix, 96 | sizes: StoredSizes::default(), 97 | }; 98 | s.sizes.changes = s.get_changes()?.iter().map(Vec::len).sum::() as u64; 99 | s.sizes.document = s.get_document()?.unwrap_or_default().len() as u64; 100 | s.sizes.sync_states = s 101 | .get_peer_ids()? 102 | .iter() 103 | .map(|id| s.get_sync_state(id).map(|o| o.unwrap_or_default().len())) 104 | .collect::, _>>()? 105 | .iter() 106 | .sum::() as u64; 107 | Ok(s) 108 | } 109 | 110 | /// Make a key from the prefix, `actor_id` and `sequence_number`. 111 | /// 112 | /// Converts the `actor_id` to bytes and appends the `sequence_number` in big endian form. 113 | fn make_key(&self, actor_id: &ActorId, seq: u64) -> Vec { 114 | let mut key = self.prefix.as_bytes().to_vec(); 115 | key.extend(actor_id.to_bytes()); 116 | key.extend(&seq.to_be_bytes()); 117 | key 118 | } 119 | 120 | /// Make a key just from the prefix. 121 | /// Since each document only has one thing to store in this tree we can just use the prefix. 122 | fn make_document_key(&self) -> Vec { 123 | self.prefix.as_bytes().to_vec() 124 | } 125 | 126 | fn make_peer_key(&self, peer_id: &[u8]) -> Vec { 127 | let mut key = self.prefix.as_bytes().to_vec(); 128 | key.extend(peer_id); 129 | key 130 | } 131 | } 132 | 133 | impl Persister for SledPersister { 134 | type Error = SledPersisterError; 135 | 136 | /// Get all of the current changes. 137 | fn get_changes(&self) -> Result>, Self::Error> { 138 | self.changes_tree 139 | .scan_prefix(&self.prefix) 140 | .values() 141 | .map(|v| v.map(|v| v.to_vec()).map_err(Self::Error::SledError)) 142 | .collect() 143 | } 144 | 145 | /// Insert all of the given changes into the tree. 146 | fn insert_changes(&mut self, changes: Vec<(ActorId, u64, Vec)>) -> Result<(), Self::Error> { 147 | for (a, s, c) in changes { 148 | let key = self.make_key(&a, s); 149 | self.sizes.changes += c.len() as u64; 150 | if let Some(old) = self.changes_tree.insert(key, c)? { 151 | self.sizes.changes -= old.len() as u64; 152 | } 153 | } 154 | Ok(()) 155 | } 156 | 157 | /// Remove all of the given changes from the tree. 158 | fn remove_changes(&mut self, changes: Vec<(&ActorId, u64)>) -> Result<(), Self::Error> { 159 | for (a, s) in changes { 160 | let key = self.make_key(a, s); 161 | if let Some(old) = self.changes_tree.remove(key)? { 162 | self.sizes.changes -= old.len() as u64; 163 | } 164 | } 165 | Ok(()) 166 | } 167 | 168 | /// Retrieve the document from the tree. 169 | fn get_document(&self) -> Result>, Self::Error> { 170 | Ok(self 171 | .document_tree 172 | .get(self.make_document_key())? 173 | .map(|v| v.to_vec())) 174 | } 175 | 176 | /// Set the document in the tree. 177 | fn set_document(&mut self, data: Vec) -> Result<(), Self::Error> { 178 | self.sizes.document = data.len() as u64; 179 | self.document_tree.insert(self.make_document_key(), data)?; 180 | Ok(()) 181 | } 182 | 183 | fn get_sync_state(&self, peer_id: &[u8]) -> Result>, Self::Error> { 184 | let sync_state_key = self.make_peer_key(peer_id); 185 | Ok(self 186 | .sync_states_tree 187 | .get(sync_state_key)? 188 | .map(|v| v.to_vec())) 189 | } 190 | 191 | fn set_sync_state(&mut self, peer_id: Vec, sync_state: Vec) -> Result<(), Self::Error> { 192 | let sync_state_key = self.make_peer_key(&peer_id); 193 | self.sizes.sync_states += sync_state.len() as u64; 194 | if let Some(old) = self.sync_states_tree.insert(sync_state_key, sync_state)? { 195 | self.sizes.sync_states -= old.len() as u64; 196 | } 197 | Ok(()) 198 | } 199 | 200 | fn remove_sync_states(&mut self, peer_ids: &[&[u8]]) -> Result<(), Self::Error> { 201 | for id in peer_ids { 202 | let key = self.make_peer_key(id); 203 | if let Some(old) = self.sync_states_tree.remove(key)? { 204 | self.sizes.sync_states -= old.len() as u64; 205 | } 206 | } 207 | Ok(()) 208 | } 209 | 210 | fn get_peer_ids(&self) -> Result>, Self::Error> { 211 | self.sync_states_tree 212 | .scan_prefix(&self.prefix) 213 | .keys() 214 | .map(|v| v.map(|v| v.to_vec()).map_err(Self::Error::SledError)) 215 | .collect() 216 | } 217 | 218 | fn sizes(&self) -> StoredSizes { 219 | self.sizes.clone() 220 | } 221 | 222 | fn flush(&mut self) -> Result { 223 | let mut flushed = 0; 224 | flushed += self.changes_tree.flush()?; 225 | flushed += self.document_tree.flush()?; 226 | flushed += self.sync_states_tree.flush()?; 227 | Ok(flushed) 228 | } 229 | } 230 | -------------------------------------------------------------------------------- /automerge-persistent/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "automerge-persistent" 3 | version = "0.4.0" 4 | authors = ["Andrew Jeffery "] 5 | edition = "2018" 6 | license = "MIT" 7 | repository = "https://github.com/jeffa5/automerge-persistent" 8 | description = "The core library for managing persistent state of Automerge documents" 9 | 10 | [dependencies] 11 | # automerge = "0.4" 12 | automerge = { git = "https://github.com/jeffa5/automerge", branch = "cmp-heads" } 13 | thiserror = "1.0.24" 14 | -------------------------------------------------------------------------------- /automerge-persistent/src/autocommit.rs: -------------------------------------------------------------------------------- 1 | use std::collections::HashMap; 2 | 3 | use crate::{Error, PeerId, Persister}; 4 | use automerge::{ 5 | sync::{self, SyncDoc}, 6 | AutoCommit, Change, ChangeHash, OpObserver, 7 | }; 8 | 9 | /// A wrapper for a persister and an automerge document. 10 | #[derive(Debug)] 11 | pub struct PersistentAutoCommit

{ 12 | document: AutoCommit, 13 | sync_states: HashMap, 14 | persister: P, 15 | saved_heads: Vec, 16 | } 17 | 18 | impl

PersistentAutoCommit

19 | where 20 | P: Persister + 'static, 21 | { 22 | pub const fn document(&self) -> &AutoCommit { 23 | &self.document 24 | } 25 | 26 | /// UNSAFE: this may lead to changes not being immediately persisted 27 | pub fn document_mut(&mut self) -> &mut AutoCommit { 28 | &mut self.document 29 | } 30 | 31 | /// Make changes to the document but don't immediately persist changes. 32 | pub fn transact Result, O, E>( 33 | &mut self, 34 | f: F, 35 | ) -> Result { 36 | let result = f(&mut self.document)?; 37 | // don't get the changes or anything as that will close the transaction, instead delay that 38 | // until another operation such as save or receive_sync_message etc. 39 | Ok(result) 40 | } 41 | 42 | /// Apply changes to this document. 43 | pub fn apply_changes( 44 | &mut self, 45 | changes: impl IntoIterator, 46 | ) -> Result<(), Error> { 47 | self.apply_changes_with::<_, ()>(changes, None) 48 | } 49 | 50 | pub fn apply_changes_with, Obs: OpObserver>( 51 | &mut self, 52 | changes: I, 53 | op_observer: Option<&mut Obs>, 54 | ) -> Result<(), Error> { 55 | let mut to_persist = vec![]; 56 | self.document.apply_changes_with( 57 | changes.into_iter().map(|change| { 58 | to_persist.push(( 59 | change.actor_id().clone(), 60 | change.seq(), 61 | change.raw_bytes().to_vec(), 62 | )); 63 | change 64 | }), 65 | op_observer, 66 | )?; 67 | self.persister 68 | .insert_changes(to_persist) 69 | .map_err(Error::PersisterError)?; 70 | Ok(()) 71 | } 72 | 73 | /// Load the persisted changes (both individual changes and a document) from storage and 74 | /// rebuild the document. 75 | /// 76 | /// ```rust 77 | /// # use automerge_persistent::MemoryPersister; 78 | /// # use automerge_persistent::PersistentAutoCommit; 79 | /// let persister = MemoryPersister::default(); 80 | /// let doc = PersistentAutoCommit::load(persister).unwrap(); 81 | /// ``` 82 | pub fn load(persister: P) -> Result> { 83 | let document = persister.get_document().map_err(Error::PersisterError)?; 84 | let mut doc = if let Some(document) = document { 85 | AutoCommit::load(&document).map_err(Error::AutomergeError)? 86 | } else { 87 | AutoCommit::new() 88 | }; 89 | 90 | let change_bytes = persister.get_changes().map_err(Error::PersisterError)?; 91 | 92 | let mut changes = Vec::new(); 93 | for change_bytes in change_bytes { 94 | changes.push(Change::from_bytes(change_bytes).map_err(Error::AutomergeLoadChangeError)?) 95 | } 96 | 97 | doc.apply_changes(changes).map_err(Error::AutomergeError)?; 98 | 99 | let saved_heads = doc.get_heads(); 100 | Ok(Self { 101 | document: doc, 102 | sync_states: HashMap::new(), 103 | persister, 104 | saved_heads, 105 | }) 106 | } 107 | 108 | /// Compact the storage. 109 | /// 110 | /// This first obtains the changes currently in the document, saves the document and persists the 111 | /// saved document. We then can remove the previously obtained changes one by one. 112 | /// 113 | /// It also clears out the storage used up by old sync states for peers by removing those given 114 | /// in `old_peers`. 115 | /// 116 | /// ```rust 117 | /// # use automerge_persistent::MemoryPersister; 118 | /// # use automerge_persistent::PersistentAutoCommit; 119 | /// # let persister = MemoryPersister::default(); 120 | /// # let mut doc = PersistentAutoCommit::load(persister).unwrap(); 121 | /// doc.compact(&[]).unwrap(); 122 | /// ``` 123 | pub fn compact(&mut self, old_peer_ids: &[&[u8]]) -> Result<(), Error> { 124 | let saved_document = self.document.save(); 125 | self.saved_heads = self.document.get_heads(); 126 | let changes = self.document.get_changes(&[])?; 127 | self.persister 128 | .set_document(saved_document) 129 | .map_err(Error::PersisterError)?; 130 | self.persister 131 | .remove_changes( 132 | changes 133 | .into_iter() 134 | .map(|c| (c.actor_id(), c.seq())) 135 | .collect(), 136 | ) 137 | .map_err(Error::PersisterError)?; 138 | self.persister 139 | .remove_sync_states(old_peer_ids) 140 | .map_err(Error::PersisterError)?; 141 | Ok(()) 142 | } 143 | 144 | /// Generate a sync message to be sent to a peer document. 145 | /// 146 | /// Peer id is intentionally low level and up to the user as it can be a DNS name, IP address or 147 | /// something else. 148 | /// 149 | /// This internally retrieves the previous sync state from storage and saves the new one 150 | /// afterwards. 151 | /// 152 | /// ```rust 153 | /// # use automerge_persistent::MemoryPersister; 154 | /// # use automerge_persistent::PersistentAutoCommit; 155 | /// # let persister = MemoryPersister::default(); 156 | /// # let mut doc = PersistentAutoCommit::load(persister).unwrap(); 157 | /// let message = doc.generate_sync_message(vec![], 100).unwrap(); 158 | /// ``` 159 | pub fn generate_sync_message( 160 | &mut self, 161 | peer_id: PeerId, 162 | max_size: usize, 163 | ) -> Result>, Error> { 164 | self.close_transaction()?; 165 | 166 | if !self.sync_states.contains_key(&peer_id) { 167 | if let Some(sync_state) = self 168 | .persister 169 | .get_sync_state(&peer_id) 170 | .map_err(Error::PersisterError)? 171 | { 172 | let s = sync::State::decode(&sync_state).map_err(Error::AutomergeDecodeError)?; 173 | self.sync_states.insert(peer_id.clone(), s); 174 | } 175 | } 176 | let sync_state = self.sync_states.entry(peer_id.clone()).or_default(); 177 | let message = self 178 | .document 179 | .sync() 180 | .generate_sync_message(sync_state, max_size) 181 | .map(|m| m.into_owned()); 182 | self.persister 183 | .set_sync_state(peer_id, sync_state.encode()) 184 | .map_err(Error::PersisterError)?; 185 | Ok(message) 186 | } 187 | 188 | /// Receive a sync message from a peer document. 189 | /// 190 | /// Peer id is intentionally low level and up to the user as it can be a DNS name, IP address or 191 | /// something else. 192 | /// 193 | /// This internally retrieves the previous sync state from storage and saves the new one 194 | /// afterwards. 195 | pub fn receive_sync_message( 196 | &mut self, 197 | peer_id: PeerId, 198 | message: sync::Message, 199 | ) -> Result<(), Error> { 200 | self.receive_sync_message_with(peer_id, message, &mut ()) 201 | } 202 | 203 | /// Receive a sync message from a peer document. 204 | /// 205 | /// Peer id is intentionally low level and up to the user as it can be a DNS name, IP address or 206 | /// something else. 207 | /// 208 | /// This internally retrieves the previous sync state from storage and saves the new one 209 | /// afterwards. 210 | pub fn receive_sync_message_with( 211 | &mut self, 212 | peer_id: PeerId, 213 | message: sync::Message, 214 | op_observer: &mut Obs, 215 | ) -> Result<(), Error> { 216 | self.close_transaction()?; 217 | 218 | if !self.sync_states.contains_key(&peer_id) { 219 | if let Some(sync_state) = self 220 | .persister 221 | .get_sync_state(&peer_id) 222 | .map_err(Error::PersisterError)? 223 | { 224 | let s = sync::State::decode(&sync_state).map_err(Error::AutomergeDecodeError)?; 225 | self.sync_states.insert(peer_id.clone(), s); 226 | } 227 | } 228 | let sync_state = self.sync_states.entry(peer_id.clone()).or_default(); 229 | 230 | let heads = self.document.get_heads(); 231 | self.document 232 | .sync() 233 | .receive_sync_message_with(sync_state, message, op_observer) 234 | .map_err(Error::AutomergeError)?; 235 | let changes = self.document.get_changes(&heads)?; 236 | self.persister 237 | .insert_changes( 238 | changes 239 | .into_iter() 240 | .map(|c| (c.actor_id().clone(), c.seq(), c.raw_bytes().to_vec())) 241 | .collect(), 242 | ) 243 | .map_err(Error::PersisterError)?; 244 | 245 | self.persister 246 | .set_sync_state(peer_id, sync_state.encode()) 247 | .map_err(Error::PersisterError)?; 248 | Ok(()) 249 | } 250 | 251 | /// Flush any data out to storage returning the number of bytes flushed. 252 | /// 253 | /// # Errors 254 | /// 255 | /// Returns the error returned by the persister during flushing. 256 | pub fn flush(&mut self) -> Result> { 257 | self.close_transaction()?; 258 | let bytes = self.persister.flush().map_err(Error::PersisterError)?; 259 | Ok(bytes) 260 | } 261 | 262 | /// Close any current transaction and write out the changes to disk. 263 | pub fn close_transaction(&mut self) -> Result<(), Error> { 264 | for change in self.document.get_changes(&self.saved_heads)? { 265 | self.persister 266 | .insert_changes(vec![( 267 | change.actor_id().clone(), 268 | change.seq(), 269 | change.raw_bytes().to_vec(), 270 | )]) 271 | .map_err(Error::PersisterError)? 272 | } 273 | self.saved_heads = self.document.get_heads(); 274 | Ok(()) 275 | } 276 | 277 | /// Close the document. 278 | /// 279 | /// This calls flush on the persister and returns it for potential use in other documents. 280 | /// 281 | /// # Errors 282 | /// 283 | /// Returns the error from flushing. 284 | pub fn close(mut self) -> Result> { 285 | self.flush()?; 286 | Ok(self.persister) 287 | } 288 | 289 | /// Obtain a reference to the persister. 290 | pub const fn persister(&self) -> &P { 291 | &self.persister 292 | } 293 | 294 | /// Reset the sync state for a peer. 295 | /// 296 | /// This is typically used when a peer disconnects, we need to reset the sync state for them as 297 | /// they may come back up with different state. 298 | pub fn reset_sync_state(&mut self, peer_id: &[u8]) -> Result<(), P::Error> { 299 | self.sync_states.remove(peer_id); 300 | self.persister.remove_sync_states(&[peer_id]) 301 | } 302 | } 303 | -------------------------------------------------------------------------------- /automerge-persistent/src/lib.rs: -------------------------------------------------------------------------------- 1 | // #![warn(missing_docs)] 2 | #![warn(missing_crate_level_docs)] 3 | #![warn(missing_doc_code_examples)] 4 | // #![warn(clippy::pedantic)] 5 | #![warn(clippy::nursery)] 6 | 7 | //! A library for constructing efficient persistent automerge documents. 8 | //! 9 | //! A [`PersistentAutomerge`] wraps an [`automerge::Automerge`] and handles making the changes applied 10 | //! to it durable. This works by persisting every change before it is applied to the document. Then 11 | //! occasionally the user should call `compact` to save the document in a more compact format and 12 | //! cleanup the included changes. This strategy aims to be fast while also being space efficient 13 | //! (up to the user's requirements). 14 | //! 15 | //! ```rust 16 | //! # use automerge_persistent::MemoryPersister; 17 | //! # use automerge_persistent::PersistentAutomerge; 18 | //! let persister = MemoryPersister::default(); 19 | //! let doc = PersistentAutomerge::load(persister).unwrap(); 20 | //! ``` 21 | 22 | mod autocommit; 23 | mod mem; 24 | mod persister; 25 | 26 | use std::{collections::HashMap, fmt::Debug}; 27 | 28 | pub use autocommit::PersistentAutoCommit; 29 | use automerge::{ 30 | op_observer::BranchableObserver, 31 | sync::{self, DecodeStateError, SyncDoc}, 32 | transaction::{CommitOptions, Failure, Observed, Success, Transaction, UnObserved}, 33 | Automerge, AutomergeError, Change, LoadChangeError, OpObserver, 34 | }; 35 | pub use mem::MemoryPersister; 36 | pub use persister::Persister; 37 | 38 | /// Bytes stored for each of the stored types. 39 | #[derive(Debug, Default, Clone)] 40 | pub struct StoredSizes { 41 | /// Total bytes stored for all changes. 42 | pub changes: u64, 43 | /// Total bytes stored in the document. 44 | pub document: u64, 45 | /// Total bytes stored for all sync states. 46 | pub sync_states: u64, 47 | } 48 | 49 | /// Errors that persistent documents can return. 50 | #[derive(Debug, thiserror::Error)] 51 | pub enum Error { 52 | /// An automerge error. 53 | #[error(transparent)] 54 | AutomergeError(#[from] AutomergeError), 55 | #[error(transparent)] 56 | AutomergeDecodeError(#[from] DecodeStateError), 57 | #[error(transparent)] 58 | AutomergeLoadChangeError(#[from] LoadChangeError), 59 | /// A persister error. 60 | #[error(transparent)] 61 | PersisterError(E), 62 | } 63 | 64 | /// Errors that persistent documents can return after a transaction. 65 | #[derive(Debug, thiserror::Error)] 66 | pub enum TransactionError { 67 | /// A persister error. 68 | #[error(transparent)] 69 | PersisterError(PE), 70 | /// A transaction error 71 | #[error(transparent)] 72 | TransactionError(#[from] Failure), 73 | } 74 | 75 | pub type TransactionResult = Result, TransactionError>; 76 | 77 | type PeerId = Vec; 78 | 79 | /// A wrapper for a persister and an automerge document. 80 | #[derive(Debug)] 81 | pub struct PersistentAutomerge

{ 82 | document: Automerge, 83 | sync_states: HashMap, 84 | persister: P, 85 | } 86 | 87 | impl

PersistentAutomerge

88 | where 89 | P: Persister + 'static, 90 | { 91 | pub const fn document(&self) -> &Automerge { 92 | &self.document 93 | } 94 | 95 | pub fn document_mut(&mut self) -> &mut Automerge { 96 | &mut self.document 97 | } 98 | 99 | pub fn transact(&mut self, f: F) -> TransactionResult 100 | where 101 | F: FnOnce(&mut Transaction) -> Result, 102 | { 103 | let result = self.document.transact(f)?; 104 | if let Err(e) = self.after_transaction() { 105 | return Err(TransactionError::PersisterError(e)); 106 | } 107 | Ok(result) 108 | } 109 | 110 | fn after_transaction(&mut self) -> Result<(), P::Error> { 111 | if let Some(change) = self.document.get_last_local_change() { 112 | self.persister.insert_changes(vec![( 113 | change.actor_id().clone(), 114 | change.seq(), 115 | change.raw_bytes().to_vec(), 116 | )])?; 117 | } 118 | Ok(()) 119 | } 120 | 121 | pub fn transact_with( 122 | &mut self, 123 | c: C, 124 | f: F, 125 | ) -> TransactionResult 126 | where 127 | F: FnOnce(&mut Transaction<'_, Observed>) -> Result, 128 | C: FnOnce(&O) -> CommitOptions, 129 | Obs: OpObserver + BranchableObserver + Default, 130 | { 131 | let result = self.document.transact_observed_with(c, f)?; 132 | if let Err(e) = self.after_transaction() { 133 | return Err(TransactionError::PersisterError(e)); 134 | } 135 | Ok(result) 136 | } 137 | 138 | /// Apply changes to this document. 139 | pub fn apply_changes( 140 | &mut self, 141 | changes: impl IntoIterator, 142 | ) -> Result<(), Error> { 143 | self.apply_changes_with::<_, ()>(changes, None) 144 | } 145 | 146 | pub fn apply_changes_with, Obs: OpObserver>( 147 | &mut self, 148 | changes: I, 149 | op_observer: Option<&mut Obs>, 150 | ) -> Result<(), Error> { 151 | let mut to_persist = vec![]; 152 | self.document.apply_changes_with( 153 | changes.into_iter().map(|change| { 154 | to_persist.push(( 155 | change.actor_id().clone(), 156 | change.seq(), 157 | change.raw_bytes().to_vec(), 158 | )); 159 | change 160 | }), 161 | op_observer, 162 | )?; 163 | self.persister 164 | .insert_changes(to_persist) 165 | .map_err(Error::PersisterError)?; 166 | Ok(()) 167 | } 168 | 169 | /// Load the persisted changes (both individual changes and a document) from storage and 170 | /// rebuild the Document. 171 | /// 172 | /// ```rust 173 | /// # use automerge_persistent::MemoryPersister; 174 | /// # use automerge_persistent::PersistentAutomerge; 175 | /// let persister = MemoryPersister::default(); 176 | /// let doc = PersistentAutomerge::load(persister).unwrap(); 177 | /// ``` 178 | pub fn load(persister: P) -> Result> { 179 | let document = persister.get_document().map_err(Error::PersisterError)?; 180 | let mut doc = if let Some(document) = document { 181 | Automerge::load(&document).map_err(Error::AutomergeError)? 182 | } else { 183 | Automerge::default() 184 | }; 185 | 186 | let change_bytes = persister.get_changes().map_err(Error::PersisterError)?; 187 | 188 | let mut changes = Vec::new(); 189 | for change_bytes in change_bytes { 190 | changes.push(Change::from_bytes(change_bytes).map_err(Error::AutomergeLoadChangeError)?) 191 | } 192 | 193 | doc.apply_changes(changes).map_err(Error::AutomergeError)?; 194 | Ok(Self { 195 | document: doc, 196 | sync_states: HashMap::new(), 197 | persister, 198 | }) 199 | } 200 | 201 | /// Compact the storage. 202 | /// 203 | /// This first obtains the changes currently in the document, saves the document and persists the 204 | /// saved document. We then can remove the previously obtained changes one by one. 205 | /// 206 | /// It also clears out the storage used up by old sync states for peers by removing those given 207 | /// in `old_peers`. 208 | /// 209 | /// ```rust 210 | /// # use automerge_persistent::MemoryPersister; 211 | /// # use automerge_persistent::PersistentAutomerge; 212 | /// # let persister = MemoryPersister::default(); 213 | /// # let mut document = PersistentAutomerge::load(persister).unwrap(); 214 | /// document.compact(&[]).unwrap(); 215 | /// ``` 216 | pub fn compact(&mut self, old_peer_ids: &[&[u8]]) -> Result<(), Error> { 217 | let saved_document = self.document.save(); 218 | let changes = self.document.get_changes(&[])?; 219 | self.persister 220 | .set_document(saved_document) 221 | .map_err(Error::PersisterError)?; 222 | self.persister 223 | .remove_changes( 224 | changes 225 | .into_iter() 226 | .map(|c| (c.actor_id(), c.seq())) 227 | .collect(), 228 | ) 229 | .map_err(Error::PersisterError)?; 230 | self.persister 231 | .remove_sync_states(old_peer_ids) 232 | .map_err(Error::PersisterError)?; 233 | Ok(()) 234 | } 235 | 236 | /// Generate a sync message to be sent to a peer document. 237 | /// 238 | /// Peer id is intentionally low level and up to the user as it can be a DNS name, IP address or 239 | /// something else. 240 | /// 241 | /// This internally retrieves the previous sync state from storage and saves the new one 242 | /// afterwards. 243 | /// 244 | /// ```rust 245 | /// # use automerge_persistent::MemoryPersister; 246 | /// # use automerge_persistent::PersistentAutomerge; 247 | /// # let persister = MemoryPersister::default(); 248 | /// # let mut document = PersistentAutomerge::load(persister).unwrap(); 249 | /// let message = document.generate_sync_message(vec![], 100).unwrap(); 250 | /// ``` 251 | pub fn generate_sync_message( 252 | &mut self, 253 | peer_id: PeerId, 254 | max_size: usize, 255 | ) -> Result, Error> { 256 | if !self.sync_states.contains_key(&peer_id) { 257 | if let Some(sync_state) = self 258 | .persister 259 | .get_sync_state(&peer_id) 260 | .map_err(Error::PersisterError)? 261 | { 262 | let s = sync::State::decode(&sync_state).map_err(Error::AutomergeDecodeError)?; 263 | self.sync_states.insert(peer_id.clone(), s); 264 | } 265 | } 266 | let sync_state = self.sync_states.entry(peer_id.clone()).or_default(); 267 | let message = self.document.generate_sync_message(sync_state, max_size); 268 | self.persister 269 | .set_sync_state(peer_id, sync_state.encode()) 270 | .map_err(Error::PersisterError)?; 271 | Ok(message) 272 | } 273 | 274 | /// Receive a sync message from a peer document. 275 | /// 276 | /// Peer id is intentionally low level and up to the user as it can be a DNS name, IP address or 277 | /// something else. 278 | /// 279 | /// This internally retrieves the previous sync state from storage and saves the new one 280 | /// afterwards. 281 | pub fn receive_sync_message( 282 | &mut self, 283 | peer_id: PeerId, 284 | message: sync::Message, 285 | ) -> Result<(), Error> { 286 | self.receive_sync_message_with(peer_id, message, &mut ()) 287 | } 288 | 289 | /// Receive a sync message from a peer document. 290 | /// 291 | /// Peer id is intentionally low level and up to the user as it can be a DNS name, IP address or 292 | /// something else. 293 | /// 294 | /// This internally retrieves the previous sync state from storage and saves the new one 295 | /// afterwards. 296 | pub fn receive_sync_message_with( 297 | &mut self, 298 | peer_id: PeerId, 299 | message: sync::Message, 300 | op_observer: &mut Obs, 301 | ) -> Result<(), Error> { 302 | if !self.sync_states.contains_key(&peer_id) { 303 | if let Some(sync_state) = self 304 | .persister 305 | .get_sync_state(&peer_id) 306 | .map_err(Error::PersisterError)? 307 | { 308 | let s = sync::State::decode(&sync_state).map_err(Error::AutomergeDecodeError)?; 309 | self.sync_states.insert(peer_id.clone(), s); 310 | } 311 | } 312 | let sync_state = self.sync_states.entry(peer_id.clone()).or_default(); 313 | 314 | let heads = self.document.get_heads(); 315 | self.document 316 | .receive_sync_message_with(sync_state, message, op_observer) 317 | .map_err(Error::AutomergeError)?; 318 | let changes = self.document.get_changes(&heads)?; 319 | self.persister 320 | .insert_changes( 321 | changes 322 | .into_iter() 323 | .map(|c| (c.actor_id().clone(), c.seq(), c.raw_bytes().to_vec())) 324 | .collect(), 325 | ) 326 | .map_err(Error::PersisterError)?; 327 | 328 | self.persister 329 | .set_sync_state(peer_id, sync_state.encode()) 330 | .map_err(Error::PersisterError)?; 331 | Ok(()) 332 | } 333 | 334 | /// Flush any data out to storage returning the number of bytes flushed. 335 | /// 336 | /// # Errors 337 | /// 338 | /// Returns the error returned by the persister during flushing. 339 | pub fn flush(&mut self) -> Result { 340 | self.persister.flush() 341 | } 342 | 343 | /// Close the document. 344 | /// 345 | /// This calls flush on the persister and returns it for potential use in other documents. 346 | /// 347 | /// # Errors 348 | /// 349 | /// Returns the error from flushing. 350 | pub fn close(mut self) -> Result { 351 | self.flush()?; 352 | Ok(self.persister) 353 | } 354 | 355 | /// Obtain a reference to the persister. 356 | pub const fn persister(&self) -> &P { 357 | &self.persister 358 | } 359 | 360 | /// Obtain a mut reference to the persister. 361 | pub fn persister_mut(&mut self) -> &mut P { 362 | &mut self.persister 363 | } 364 | 365 | /// Reset the sync state for a peer. 366 | /// 367 | /// This is typically used when a peer disconnects, we need to reset the sync state for them as 368 | /// they may come back up with different state. 369 | pub fn reset_sync_state(&mut self, peer_id: &[u8]) -> Result<(), P::Error> { 370 | self.sync_states.remove(peer_id); 371 | self.persister.remove_sync_states(&[peer_id]) 372 | } 373 | } 374 | -------------------------------------------------------------------------------- /automerge-persistent/src/mem.rs: -------------------------------------------------------------------------------- 1 | use std::collections::HashMap; 2 | 3 | use automerge::ActorId; 4 | 5 | use crate::{Persister, StoredSizes}; 6 | 7 | /// **For Testing** An in-memory persister. 8 | /// 9 | /// As this provides no actual persistence it should not be used for any real application, it 10 | /// actually reduces performance of the plain backend slightly due to tracking the changes itself. 11 | #[derive(Debug, Default)] 12 | pub struct MemoryPersister { 13 | changes: HashMap<(ActorId, u64), Vec>, 14 | document: Option>, 15 | sync_states: HashMap, Vec>, 16 | sizes: StoredSizes, 17 | } 18 | 19 | impl Persister for MemoryPersister { 20 | type Error = std::convert::Infallible; 21 | 22 | /// Get the changes out of the map. 23 | fn get_changes(&self) -> Result>, Self::Error> { 24 | Ok(self.changes.values().cloned().collect()) 25 | } 26 | 27 | /// Insert changes into the map. 28 | fn insert_changes(&mut self, changes: Vec<(ActorId, u64, Vec)>) -> Result<(), Self::Error> { 29 | for (a, u, c) in changes { 30 | self.sizes.changes += c.len() as u64; 31 | if let Some(old) = self.changes.insert((a, u), c) { 32 | self.sizes.changes -= old.len() as u64; 33 | } 34 | } 35 | Ok(()) 36 | } 37 | 38 | /// Remove changes from the map. 39 | fn remove_changes(&mut self, changes: Vec<(&ActorId, u64)>) -> Result<(), Self::Error> { 40 | for (a, u) in changes { 41 | if let Some(old) = self.changes.remove(&(a.clone(), u)) { 42 | self.sizes.changes -= old.len() as u64; 43 | } 44 | } 45 | Ok(()) 46 | } 47 | 48 | /// Get the document. 49 | fn get_document(&self) -> Result>, Self::Error> { 50 | Ok(self.document.clone()) 51 | } 52 | 53 | /// Set the document. 54 | fn set_document(&mut self, data: Vec) -> Result<(), Self::Error> { 55 | self.sizes.document = data.len() as u64; 56 | self.document = Some(data); 57 | Ok(()) 58 | } 59 | 60 | fn get_sync_state(&self, peer_id: &[u8]) -> Result>, Self::Error> { 61 | Ok(self.sync_states.get(peer_id).cloned()) 62 | } 63 | 64 | fn set_sync_state(&mut self, peer_id: Vec, sync_state: Vec) -> Result<(), Self::Error> { 65 | self.sizes.sync_states += sync_state.len() as u64; 66 | if let Some(old) = self.sync_states.insert(peer_id, sync_state) { 67 | self.sizes.sync_states -= old.len() as u64; 68 | } 69 | Ok(()) 70 | } 71 | 72 | fn remove_sync_states(&mut self, peer_ids: &[&[u8]]) -> Result<(), Self::Error> { 73 | for id in peer_ids { 74 | if let Some(old) = self.sync_states.remove(*id) { 75 | self.sizes.sync_states -= old.len() as u64; 76 | } 77 | } 78 | Ok(()) 79 | } 80 | 81 | fn get_peer_ids(&self) -> Result>, Self::Error> { 82 | Ok(self.sync_states.keys().cloned().collect()) 83 | } 84 | 85 | fn sizes(&self) -> StoredSizes { 86 | self.sizes.clone() 87 | } 88 | 89 | fn flush(&mut self) -> Result { 90 | Ok(0) 91 | } 92 | } 93 | -------------------------------------------------------------------------------- /automerge-persistent/src/persister.rs: -------------------------------------------------------------------------------- 1 | use std::error::Error; 2 | 3 | use automerge::ActorId; 4 | 5 | use crate::StoredSizes; 6 | 7 | /// A Persister persists both changes and documents to durable storage. 8 | /// 9 | /// In the event of a power loss changes should still be around for loading after. It is up to the 10 | /// implementation to decide on trade-offs regarding how often to fsync for example. 11 | /// 12 | /// Changes are identified by a pair of `actor_id` and `sequence_number`. This uniquely identifies a 13 | /// change and so is suitable for use as a key in the implementation. 14 | /// 15 | /// Documents are saved automerge Backends so are more compact than the raw changes they represent. 16 | pub trait Persister { 17 | /// The error type that the operations can produce 18 | type Error: Error + 'static; 19 | 20 | /// Returns all of the changes that have been persisted through this persister. 21 | /// Ordering is not specified as the automerge Backend should handle that. 22 | fn get_changes(&self) -> Result>, Self::Error>; 23 | 24 | /// Inserts the given change at the unique address specified by the `actor_id` and `sequence_number`. 25 | fn insert_changes(&mut self, changes: Vec<(ActorId, u64, Vec)>) -> Result<(), Self::Error>; 26 | 27 | /// Removes the change at the unique address specified by the `actor_id` and `sequence_number`. 28 | /// 29 | /// If the change does not exist this should not return an error. 30 | fn remove_changes(&mut self, changes: Vec<(&ActorId, u64)>) -> Result<(), Self::Error>; 31 | 32 | /// Returns the document, if one has been persisted previously. 33 | fn get_document(&self) -> Result>, Self::Error>; 34 | 35 | /// Sets the document to the given data. 36 | fn set_document(&mut self, data: Vec) -> Result<(), Self::Error>; 37 | 38 | /// Returns the sync state for the given peer if one exists. 39 | /// 40 | /// A peer id corresponds to an instance of a backend and may be serving multiple frontends so 41 | /// we cannot have it work on `ActorIds`. 42 | fn get_sync_state(&self, peer_id: &[u8]) -> Result>, Self::Error>; 43 | 44 | /// Sets the sync state for the given peer. 45 | /// 46 | /// A peer id corresponds to an instance of a backend and may be serving multiple frontends so 47 | /// we cannot have it work on `ActorIds`. 48 | fn set_sync_state(&mut self, peer_id: Vec, sync_state: Vec) -> Result<(), Self::Error>; 49 | 50 | /// Removes the sync states associated with the given `peer_ids`. 51 | fn remove_sync_states(&mut self, peer_ids: &[&[u8]]) -> Result<(), Self::Error>; 52 | 53 | /// Returns the list of peer ids with stored `SyncStates`. 54 | /// 55 | /// This is intended for use by users to see what `peer_ids` are taking space so that they can be 56 | /// removed during a compaction. 57 | fn get_peer_ids(&self) -> Result>, Self::Error>; 58 | 59 | /// Returns the sizes components being stored consume. 60 | /// 61 | /// This can be used as an indicator of when to compact the storage. 62 | fn sizes(&self) -> StoredSizes; 63 | 64 | /// Flush the data out to disk. 65 | fn flush(&mut self) -> Result; 66 | } 67 | -------------------------------------------------------------------------------- /flake.lock: -------------------------------------------------------------------------------- 1 | { 2 | "nodes": { 3 | "flake-utils": { 4 | "inputs": { 5 | "systems": "systems" 6 | }, 7 | "locked": { 8 | "lastModified": 1681202837, 9 | "narHash": "sha256-H+Rh19JDwRtpVPAWp64F+rlEtxUWBAQW28eAi3SRSzg=", 10 | "owner": "numtide", 11 | "repo": "flake-utils", 12 | "rev": "cfacdce06f30d2b68473a46042957675eebb3401", 13 | "type": "github" 14 | }, 15 | "original": { 16 | "owner": "numtide", 17 | "repo": "flake-utils", 18 | "type": "github" 19 | } 20 | }, 21 | "flake-utils_2": { 22 | "inputs": { 23 | "systems": "systems_2" 24 | }, 25 | "locked": { 26 | "lastModified": 1681202837, 27 | "narHash": "sha256-H+Rh19JDwRtpVPAWp64F+rlEtxUWBAQW28eAi3SRSzg=", 28 | "owner": "numtide", 29 | "repo": "flake-utils", 30 | "rev": "cfacdce06f30d2b68473a46042957675eebb3401", 31 | "type": "github" 32 | }, 33 | "original": { 34 | "owner": "numtide", 35 | "repo": "flake-utils", 36 | "type": "github" 37 | } 38 | }, 39 | "nixpkgs": { 40 | "locked": { 41 | "lastModified": 1683408522, 42 | "narHash": "sha256-9kcPh6Uxo17a3kK3XCHhcWiV1Yu1kYj22RHiymUhMkU=", 43 | "owner": "nixos", 44 | "repo": "nixpkgs", 45 | "rev": "897876e4c484f1e8f92009fd11b7d988a121a4e7", 46 | "type": "github" 47 | }, 48 | "original": { 49 | "owner": "nixos", 50 | "ref": "nixos-unstable", 51 | "repo": "nixpkgs", 52 | "type": "github" 53 | } 54 | }, 55 | "nixpkgs_2": { 56 | "locked": { 57 | "lastModified": 1681358109, 58 | "narHash": "sha256-eKyxW4OohHQx9Urxi7TQlFBTDWII+F+x2hklDOQPB50=", 59 | "owner": "NixOS", 60 | "repo": "nixpkgs", 61 | "rev": "96ba1c52e54e74c3197f4d43026b3f3d92e83ff9", 62 | "type": "github" 63 | }, 64 | "original": { 65 | "owner": "NixOS", 66 | "ref": "nixpkgs-unstable", 67 | "repo": "nixpkgs", 68 | "type": "github" 69 | } 70 | }, 71 | "root": { 72 | "inputs": { 73 | "flake-utils": "flake-utils", 74 | "nixpkgs": "nixpkgs", 75 | "rust-overlay": "rust-overlay" 76 | } 77 | }, 78 | "rust-overlay": { 79 | "inputs": { 80 | "flake-utils": "flake-utils_2", 81 | "nixpkgs": "nixpkgs_2" 82 | }, 83 | "locked": { 84 | "lastModified": 1683944292, 85 | "narHash": "sha256-ks2N8FtrUvePO5X2fN9WoelgMVwDa1jUA7XEyC9S+7g=", 86 | "owner": "oxalica", 87 | "repo": "rust-overlay", 88 | "rev": "7ec9793168e4c328f08d10ab7ef4a1ada2dbf93e", 89 | "type": "github" 90 | }, 91 | "original": { 92 | "owner": "oxalica", 93 | "repo": "rust-overlay", 94 | "type": "github" 95 | } 96 | }, 97 | "systems": { 98 | "locked": { 99 | "lastModified": 1681028828, 100 | "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=", 101 | "owner": "nix-systems", 102 | "repo": "default", 103 | "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e", 104 | "type": "github" 105 | }, 106 | "original": { 107 | "owner": "nix-systems", 108 | "repo": "default", 109 | "type": "github" 110 | } 111 | }, 112 | "systems_2": { 113 | "locked": { 114 | "lastModified": 1681028828, 115 | "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=", 116 | "owner": "nix-systems", 117 | "repo": "default", 118 | "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e", 119 | "type": "github" 120 | }, 121 | "original": { 122 | "owner": "nix-systems", 123 | "repo": "default", 124 | "type": "github" 125 | } 126 | } 127 | }, 128 | "root": "root", 129 | "version": 7 130 | } 131 | -------------------------------------------------------------------------------- /flake.nix: -------------------------------------------------------------------------------- 1 | { 2 | description = "automerge-persistent"; 3 | 4 | inputs = { 5 | nixpkgs.url = "github:nixos/nixpkgs/nixos-unstable"; 6 | rust-overlay.url = "github:oxalica/rust-overlay"; 7 | flake-utils.url = "github:numtide/flake-utils"; 8 | }; 9 | 10 | outputs = { 11 | self, 12 | nixpkgs, 13 | rust-overlay, 14 | flake-utils, 15 | }: 16 | flake-utils.lib.eachDefaultSystem 17 | ( 18 | system: let 19 | pkgs = import nixpkgs { 20 | overlays = [rust-overlay.overlays.default]; 21 | system = system; 22 | }; 23 | rust = pkgs.rust-bin.stable.latest.default; 24 | in { 25 | devShells.default = pkgs.mkShell { 26 | buildInputs = with pkgs; [ 27 | (rust.override { 28 | extensions = ["rust-src" "rustfmt"]; 29 | targets = ["wasm32-unknown-unknown"]; 30 | }) 31 | cargo-watch 32 | cargo-udeps 33 | cargo-expand 34 | cargo-outdated 35 | cargo-insta 36 | cargo-release 37 | 38 | wasm-pack 39 | nodejs 40 | ]; 41 | }; 42 | } 43 | ); 44 | } 45 | --------------------------------------------------------------------------------