├── .envrc ├── src ├── api.rs ├── snapshots │ ├── dson__macros__tests__crdt_map_literal_macro_array.snap │ ├── dson__macros__tests__crdt_map_literal_macro.snap │ └── dson__macros__tests__crdt_map_store_macro.snap ├── crdts │ ├── test_util │ │ ├── arbitrary_delta_impls.rs │ │ └── arbitrary_delta_impls │ │ │ ├── mvreg.rs │ │ │ └── orarray.rs │ ├── orarray │ │ └── position.rs │ └── test_util.rs ├── either.rs ├── api │ ├── register.rs │ ├── map.rs │ ├── timestamp.rs │ └── array.rs ├── transaction │ ├── delta.rs │ ├── conflicted.rs │ ├── mod.rs │ └── crdt_value.rs ├── datetime_literal.rs ├── dotstores │ └── recording_sentinel.rs ├── json.rs ├── macros.rs └── sentinel.rs ├── .github ├── scripts │ └── install_iai_callgrind_runner.sh ├── codecov.yml ├── dependabot.yml ├── DOCS.md └── workflows │ ├── scheduled.yml │ ├── check.yml │ └── test.yml ├── flake.nix ├── CHANGELOG.md ├── LICENSE-MIT ├── examples ├── transaction_basic.rs ├── transaction_conflicts.rs ├── nested_transactions.rs ├── transaction_sync.rs ├── simple.rs ├── transaction_nested.rs └── conflicts.rs ├── benches ├── nested_transactions.rs ├── iai.rs └── tango.rs ├── Cargo.toml ├── flake.lock ├── tests ├── transaction_api.rs ├── nested_transactions.rs └── transaction_rollback.rs ├── README.md └── LICENSE-APACHE /.envrc: -------------------------------------------------------------------------------- 1 | watch_file config.nix 2 | use flake 3 | -------------------------------------------------------------------------------- /src/api.rs: -------------------------------------------------------------------------------- 1 | // (c) Copyright 2025 Helsing GmbH. All rights reserved. 2 | pub mod array; 3 | pub mod map; 4 | pub mod register; 5 | pub mod timestamp; 6 | -------------------------------------------------------------------------------- /.github/scripts/install_iai_callgrind_runner.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -eo pipefail 3 | 4 | echo "::group::Install" 5 | version=$(cargo metadata --format-version=1 |\ 6 | jq '.packages[] | select(.name == "iai-callgrind").version' |\ 7 | tr -d '"' 8 | ) 9 | cargo binstall iai-callgrind-runner --version "$version" --no-confirm --no-symlinks --force 10 | echo "::endgroup::" 11 | echo "::group::Verification" 12 | which iai-callgrind-runner 13 | echo "::endgroup::" 14 | -------------------------------------------------------------------------------- /src/snapshots/dson__macros__tests__crdt_map_literal_macro_array.snap: -------------------------------------------------------------------------------- 1 | --- 2 | source: src/macros.rs 3 | expression: map 4 | --- 5 | { 6 | "field_x": v( 7 | ${(@1.1, 1): "Hello"}, 8 | ), 9 | "field_y": v( 10 | ${(@1.1, 2): "World"}, 11 | ), 12 | "field_z": v( 13 | []{Uid(@1.1, 4): v((${(@1.1, 3): "Banana"}, pos={(@1.1, 5): {(@1.1, 6): 42}})), Uid(@1.1, 8): v((${(@1.1, 7): "Cantaloupe"}, pos={(@1.1, 9): {(@1.1, 10): 43}}))}, 14 | ), 15 | } 16 | -------------------------------------------------------------------------------- /src/snapshots/dson__macros__tests__crdt_map_literal_macro.snap: -------------------------------------------------------------------------------- 1 | --- 2 | source: src/macros.rs 3 | expression: map 4 | --- 5 | { 6 | "field_x": v( 7 | ${(@1.1, 2): "Hello"}, 8 | ), 9 | "field_y": v( 10 | ${(@1.1, 3): "World"}, 11 | ), 12 | "field_z": v( 13 | { 14 | "field_x": v( 15 | ${(@1.1, 4): "Nested"}, 16 | ), 17 | "field_y": v( 18 | ${(@1.1, 5): "Nested"}, 19 | ), 20 | }, 21 | ), 22 | } 23 | -------------------------------------------------------------------------------- /.github/codecov.yml: -------------------------------------------------------------------------------- 1 | # ref: https://docs.codecov.com/docs/codecovyml-reference 2 | coverage: 3 | # Hold ourselves to a high bar 4 | range: 85..100 5 | round: down 6 | precision: 1 7 | status: 8 | # ref: https://docs.codecov.com/docs/commit-status 9 | project: 10 | default: 11 | # Avoid false negatives 12 | threshold: 1% 13 | 14 | # Test files aren't important for coverage 15 | ignore: 16 | - "tests" 17 | 18 | # Make comments less noisy 19 | comment: 20 | layout: "files" 21 | require_changes: true 22 | -------------------------------------------------------------------------------- /src/snapshots/dson__macros__tests__crdt_map_store_macro.snap: -------------------------------------------------------------------------------- 1 | --- 2 | source: src/macros.rs 3 | expression: map 4 | --- 5 | CausalDotStore { 6 | store: { 7 | "field_x": v( 8 | ${(@1.1, 2): "Hello"}, 9 | ), 10 | "field_y": v( 11 | ${(@1.1, 3): "World"}, 12 | ), 13 | "field_z": v( 14 | { 15 | "field_x": v( 16 | ${(@1.1, 4): "Nested"}, 17 | ), 18 | "field_y": v( 19 | ${(@1.1, 5): "Nested"}, 20 | ), 21 | }, 22 | ), 23 | }, 24 | context: CausalContext( 25 | { 26 | @1.1: { 27 | 2..=5, 28 | }, 29 | }, 30 | ), 31 | } 32 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | - package-ecosystem: github-actions 4 | directory: / 5 | schedule: 6 | interval: daily 7 | - package-ecosystem: cargo 8 | directory: / 9 | schedule: 10 | interval: daily 11 | ignore: 12 | - dependency-name: "*" 13 | # patch and minor updates don't matter for libraries as consumers of this library build 14 | # with their own lockfile, rather than the version specified in this library's lockfile 15 | # remove this ignore rule if your package has binaries to ensure that the binaries are 16 | # built with the exact set of dependencies and those are up to date. 17 | update-types: 18 | - "version-update:semver-patch" 19 | - "version-update:semver-minor" 20 | -------------------------------------------------------------------------------- /src/crdts/test_util/arbitrary_delta_impls.rs: -------------------------------------------------------------------------------- 1 | // (c) Copyright 2025 Helsing GmbH. All rights reserved. 2 | pub(crate) mod mvreg; 3 | pub(crate) mod orarray; 4 | pub(crate) mod ormap; 5 | 6 | pub(crate) use mvreg::RegisterOp; 7 | pub(crate) use orarray::ArrayOp; 8 | pub(crate) use ormap::MapOp; 9 | 10 | /// A type that holds a [`Delta`] for one of the known CRDT [`Delta`] types. 11 | /// 12 | /// This exists so that [`MapOp`] and [`ArrayOp`] don't need a separate operation type for each 13 | /// type of inner value they may want to insert or update at a given key. 14 | /// 15 | #[cfg_attr(feature = "serde", derive(::serde::Deserialize, ::serde::Serialize))] 16 | #[derive(Debug, Clone)] 17 | pub(crate) enum ValueDelta { 18 | Map(MapOp), 19 | Array(ArrayOp), 20 | Register(RegisterOp), 21 | } 22 | -------------------------------------------------------------------------------- /flake.nix: -------------------------------------------------------------------------------- 1 | { 2 | description = "dson devshell"; 3 | 4 | inputs = { 5 | nixpkgs.url = "github:NixOS/nixpkgs/nixos-unstable"; 6 | rust-overlay.url = "github:oxalica/rust-overlay"; 7 | flake-utils.url = "github:numtide/flake-utils"; 8 | }; 9 | 10 | outputs = { self, nixpkgs, rust-overlay, flake-utils, ... }: 11 | flake-utils.lib.eachDefaultSystem (system: 12 | let 13 | overlays = [ (import rust-overlay) ]; 14 | pkgs = import nixpkgs { 15 | inherit system overlays; 16 | }; 17 | in 18 | { 19 | devShells.default = with pkgs; mkShell { 20 | buildInputs = [ 21 | (rust-bin.stable.latest.default.override { 22 | extensions = [ 23 | "rust-src" 24 | "rust-analyzer" 25 | ]; 26 | }) 27 | ]; 28 | }; 29 | } 30 | ); 31 | } 32 | -------------------------------------------------------------------------------- /src/either.rs: -------------------------------------------------------------------------------- 1 | // (c) Copyright 2025 Helsing GmbH. All rights reserved. 2 | 3 | //! The enum Either with variants Left and Right is a general purpose sum type 4 | //! with two cases. 5 | #[derive(Debug, Clone, Ord, PartialOrd, PartialEq, Eq)] 6 | pub enum Either { 7 | Left(A), 8 | Right(B), 9 | } 10 | 11 | impl Either, B> { 12 | /// Converts from `Either>` to `Either`. 13 | pub fn flatten(self) -> Either { 14 | match self { 15 | Either::Left(nested) => nested, 16 | Either::Right(b) => Either::Right(b), 17 | } 18 | } 19 | } 20 | 21 | impl Either> { 22 | /// Converts from `Either>` to `Either`. 23 | pub fn flatten(self) -> Either { 24 | match self { 25 | Either::Left(a) => Either::Left(a), 26 | Either::Right(nested) => nested, 27 | } 28 | } 29 | } 30 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Changelog 2 | 3 | All notable changes to this project will be documented in this file. 4 | 5 | The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/), 6 | and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). 7 | 8 | ## [Unreleased] 9 | 10 | ### Added 11 | 12 | ### Changed 13 | 14 | ### Deprecated 15 | 16 | ### Removed 17 | 18 | ### Fixed 19 | 20 | ### Security 21 | 22 | ## [0.2.0] 23 | 24 | ### Added 25 | - Add support for custom leaf values. 26 | 27 | ## [0.1.1] 28 | 29 | ### Fixed 30 | - Ensure all feature combinations compile. 31 | 32 | ## [0.1.0] 33 | 34 | Initial public release. 35 | 36 | [Unreleased]: https://github.com/helsing-ai/dson/compare/v0.2.0...HEAD 37 | [0.2.0]: https://github.com/helsing-ai/dson/releases/tag/v0.1.1...v0.2.0 38 | [0.1.1]: https://github.com/helsing-ai/dson/releases/tag/v0.1.0...v0.1.1 39 | [0.1.0]: https://github.com/helsing-ai/dson/releases/tag/v0.1.0 40 | -------------------------------------------------------------------------------- /.github/DOCS.md: -------------------------------------------------------------------------------- 1 | # Github config and workflows 2 | 3 | In this folder there is configuration for codecoverage, dependabot, and ci 4 | workflows that check the library more deeply than the default configurations. 5 | 6 | This folder can be or was merged using a --allow-unrelated-histories merge 7 | strategy from which provides a 8 | reasonably sensible base for writing your own ci on. By using this strategy 9 | the history of the CI repo is included in your repo, and future updates to 10 | the CI can be merged later. 11 | 12 | To perform this merge run: 13 | 14 | ```shell 15 | git remote add ci https://github.com/jonhoo/rust-ci-conf.git 16 | git fetch ci 17 | git merge --allow-unrelated-histories ci/main 18 | ``` 19 | 20 | An overview of the files in this project is available at: 21 | , which contains some 22 | rationale for decisions and runs through an example of solving minimal version 23 | and OpenSSL issues. 24 | -------------------------------------------------------------------------------- /src/api/register.rs: -------------------------------------------------------------------------------- 1 | // (c) Copyright 2025 Helsing GmbH. All rights reserved. 2 | use crate::{ 3 | CausalContext, CausalDotStore, Identifier, MvReg, 4 | crdts::{ 5 | mvreg::MvRegValue, 6 | snapshot::{SingleValueError, ToValue}, 7 | }, 8 | }; 9 | 10 | /// Returns the values of this register without collapsing conflicts. 11 | pub fn values(m: &MvReg) -> impl ExactSizeIterator { 12 | m.values().into_iter() 13 | } 14 | 15 | /// Returns the value of this register assuming (and asserting) no conflicts on element values. 16 | pub fn value(m: &MvReg) -> Result<&MvRegValue, Box>> { 17 | m.value() 18 | } 19 | 20 | /// Writes a value to the register. 21 | pub fn write( 22 | v: MvRegValue, 23 | ) -> impl FnMut(&MvReg, &CausalContext, Identifier) -> CausalDotStore { 24 | move |m, cc, id| m.write(v.clone(), cc, id) 25 | } 26 | 27 | /// Clears the register. 28 | pub fn clear() -> impl Fn(&MvReg, &CausalContext, Identifier) -> CausalDotStore { 29 | move |m, _cc, _id| m.clear() 30 | } 31 | -------------------------------------------------------------------------------- /LICENSE-MIT: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2025 Helsing Germany GmbH 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. -------------------------------------------------------------------------------- /src/transaction/delta.rs: -------------------------------------------------------------------------------- 1 | /// Changes to a CRDT, not full state. 2 | /// 3 | /// Prevents accidental misuse through type safety. Access the inner 4 | /// `CausalDotStore` via the public field. 5 | /// 6 | /// # Example 7 | /// ``` 8 | /// use dson::{Delta, CausalDotStore, OrMap}; 9 | /// 10 | /// # fn example(delta: Delta>>) { 11 | /// // Access inner value 12 | /// let store = delta.0; 13 | /// # } 14 | /// ``` 15 | #[derive(Debug, Clone, PartialEq, Eq, Hash)] 16 | #[cfg_attr(feature = "serde", derive(::serde::Deserialize, ::serde::Serialize))] 17 | #[must_use = "deltas should be sent to other replicas or applied to stores"] 18 | pub struct Delta(pub T); 19 | 20 | impl Delta { 21 | /// Creates a new Delta wrapping the given value. 22 | pub fn new(value: T) -> Self { 23 | Self(value) 24 | } 25 | 26 | /// Unwraps the Delta, returning the inner value. 27 | pub fn into_inner(self) -> T { 28 | self.0 29 | } 30 | } 31 | 32 | #[cfg(test)] 33 | mod tests { 34 | use super::*; 35 | use crate::{CausalDotStore, OrMap}; 36 | 37 | #[test] 38 | fn delta_new_and_into_inner() { 39 | let store = CausalDotStore::>::default(); 40 | let delta = Delta::new(store.clone()); 41 | assert_eq!(delta.into_inner(), store); 42 | } 43 | 44 | #[test] 45 | fn delta_access_inner_via_field() { 46 | let store = CausalDotStore::>::default(); 47 | let delta = Delta::new(store.clone()); 48 | assert_eq!(delta.0, store); 49 | } 50 | } 51 | -------------------------------------------------------------------------------- /src/datetime_literal.rs: -------------------------------------------------------------------------------- 1 | // (c) Copyright 2025 Helsing GmbH. All rights reserved. 2 | /// Declarative macro to create a [`chrono::DateTime`] suitable 3 | /// for const evaluation, as this is otherwise cumbersome. 4 | /// 5 | /// Usage: 6 | /// ```rust 7 | /// # use chrono::{DateTime, Utc}; 8 | /// # use dson::datetime; 9 | /// let datetime: DateTime = datetime!( 2024-12-24 15:00:00 Z); 10 | /// # let _ = datetime; 11 | /// ``` 12 | #[macro_export] 13 | macro_rules! datetime { 14 | ( $year:literal-$month:literal-$day:literal $(T)? $hour:literal:$min:literal:$second:literal Z) => { 15 | const { 16 | #[allow(clippy::zero_prefixed_literal)] 17 | $crate::chrono::DateTime::<$crate::chrono::Utc>::from_naive_utc_and_offset( 18 | datetime!($year - $month - $day $hour:$min:$second), 19 | $crate::chrono::Utc 20 | ) } 21 | }; 22 | ( $year:literal-$month:literal-$day:literal $(T)? $hour:literal:$min:literal:$second:literal) => { 23 | const { 24 | #[allow(clippy::zero_prefixed_literal)] 25 | $crate::chrono::NaiveDateTime::new( 26 | match $crate::chrono::NaiveDate::from_ymd_opt($year, $month, $day) { 27 | Some(date) => date, 28 | None => ::std::panic!("year-month-day outside expected range.") 29 | }, 30 | match $crate::chrono::NaiveTime::from_hms_opt($hour, $min, $second) { 31 | Some(time) => time, 32 | None => ::std::panic!("hour:min:second outside expected range.") 33 | } 34 | ) } 35 | }; 36 | } 37 | -------------------------------------------------------------------------------- /examples/transaction_basic.rs: -------------------------------------------------------------------------------- 1 | use dson::{ 2 | CausalDotStore, Identifier, OrMap, 3 | crdts::{mvreg::MvRegValue, snapshot::ToValue}, 4 | transaction::CrdtValue, 5 | }; 6 | 7 | fn main() { 8 | // Create a DSON store 9 | let mut store = CausalDotStore::>::default(); 10 | let id = Identifier::new(0, 0); 11 | 12 | // Write some data using the transaction API 13 | { 14 | let mut tx = store.transact(id); 15 | tx.write_register("name", MvRegValue::String("Alice".to_string())); 16 | tx.write_register("age", MvRegValue::U64(30)); 17 | tx.write_register("active", MvRegValue::Bool(true)); 18 | 19 | let delta = tx.commit(); 20 | println!( 21 | "Created delta with {} bytes", 22 | serde_json::to_string(&delta.0).unwrap().len() 23 | ); 24 | } 25 | 26 | // Read the data back 27 | { 28 | let tx = store.transact(id); 29 | 30 | match tx.get(&"name".to_string()) { 31 | Some(CrdtValue::Register(reg)) => { 32 | if let Ok(MvRegValue::String(name)) = reg.value() { 33 | println!("Name: {name}"); 34 | } 35 | } 36 | _ => println!("Name not found or wrong type"), 37 | } 38 | 39 | match tx.get(&"age".to_string()) { 40 | Some(CrdtValue::Register(reg)) => { 41 | if let Ok(MvRegValue::U64(age)) = reg.value() { 42 | println!("Age: {age}"); 43 | } 44 | } 45 | _ => println!("Age not found or wrong type"), 46 | } 47 | } 48 | 49 | println!("\nTransaction API makes DSON easy to use!"); 50 | } 51 | -------------------------------------------------------------------------------- /benches/nested_transactions.rs: -------------------------------------------------------------------------------- 1 | use dson::{ 2 | CausalDotStore, Identifier, OrMap, crdts::mvreg::MvRegValue, transaction::MapTransaction, 3 | }; 4 | use iai_callgrind::{library_benchmark, library_benchmark_group, main}; 5 | 6 | #[library_benchmark] 7 | fn nested_transaction_3_levels() { 8 | let mut store = CausalDotStore::>::default(); 9 | let id = Identifier::new(0, 0); 10 | 11 | let mut tx = MapTransaction::new(&mut store, id); 12 | tx.in_map("level1", |l1_tx| { 13 | l1_tx.in_map("level2", |l2_tx| { 14 | l2_tx.write_register("value", MvRegValue::U64(42)); 15 | }); 16 | }); 17 | let _delta = tx.commit(); 18 | } 19 | 20 | #[library_benchmark] 21 | fn direct_crdt_api_3_levels() { 22 | let mut store = CausalDotStore::>::default(); 23 | let id = Identifier::new(0, 0); 24 | 25 | // Direct CRDT operations without transaction API 26 | let delta = store.store.apply_to_map( 27 | |l1, ctx1, id1| { 28 | l1.apply_to_map( 29 | |l2, ctx2, id2| { 30 | l2.apply_to_register( 31 | |reg, ctx3, id3| reg.write(MvRegValue::U64(42), ctx3, id3), 32 | "value".to_string(), 33 | ctx2, 34 | id2, 35 | ) 36 | }, 37 | "level2".to_string(), 38 | ctx1, 39 | id1, 40 | ) 41 | }, 42 | "level1".to_string(), 43 | &store.context, 44 | id, 45 | ); 46 | store.join_or_replace_with(delta.store, &delta.context); 47 | } 48 | 49 | library_benchmark_group!( 50 | name = nested_transaction_benches; 51 | benchmarks = nested_transaction_3_levels, direct_crdt_api_3_levels 52 | ); 53 | 54 | #[cfg(target_os = "linux")] 55 | main!(library_benchmark_groups = nested_transaction_benches); 56 | 57 | #[cfg(not(target_os = "linux"))] 58 | fn main() {} 59 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "dson" 3 | version = "0.3.0" 4 | edition = "2024" 5 | license = "MIT OR Apache-2.0" 6 | 7 | repository = "https://github.com/helsing-ai/dson" 8 | homepage = "https://github.com/helsing-ai/dson" 9 | documentation = "https://docs.rs/dson" 10 | description = "A delta-state CRDT implementation" 11 | readme = "README.md" 12 | keywords = ["crdt", "delta-state", "distributed", "concurrency", "peer-to-peer"] 13 | categories = ["data-structures", "concurrency"] 14 | 15 | [features] 16 | default = ["json", "chrono", "ulid"] 17 | ## Implements `quickcheck::Arbitrary` for CRDT types, useful for property-based testing. 18 | arbitrary = ["bimap", "quickcheck", "rand", "dep:chrono"] 19 | ## Enables serialization and deserialization of DSON CRDTs to and from `serde_json::Value`. 20 | json = ["serde", "dep:base64", "dep:serde_bytes", "dep:serde_json"] 21 | ## Provides `serde` support for all CRDT types. 22 | serde = ["dep:serde", "dep:serde_bytes", "smallvec/serde", "ulid/serde"] 23 | ## Enables `chrono` support for `Timestamp`. 24 | chrono = ["dep:chrono"] 25 | ## Enables registers to hold ulids. 26 | ulid = ["dep:ulid"] 27 | 28 | [dependencies] 29 | ahash = "0.8.11" 30 | base64 = { version = "0.22.1", optional = true } 31 | bimap = { version = "0.6.3", optional = true } 32 | chrono = { version = "0.4.40", optional = true } 33 | quickcheck = { version = "1.0.3", optional = true } 34 | rand = { version = "0.9.0", optional = true } 35 | serde = { version = "1.0.219", optional = true, features = ["derive"] } 36 | serde_bytes = { version = "0.11.17", optional = true } 37 | serde_json = { version = "1.0.140", optional = true } 38 | smallvec = "1.14.0" 39 | ulid = { version = "1.2.0", optional = true } 40 | 41 | [dev-dependencies] 42 | bimap = "0.6.3" 43 | iai-callgrind = "0.16.1" 44 | insta = "1.43.1" 45 | quickcheck = "1.0.3" 46 | quickcheck_macros = "1.0.0" 47 | rand = "0.9.0" 48 | tango-bench = "0.6" 49 | tracing-subscriber = { version = "0.3.19", features = ["env-filter"] } 50 | 51 | [build-dependencies] 52 | rand = "0.9.0" 53 | rand_distr = "0.5.1" 54 | 55 | [[bench]] 56 | harness = false 57 | name = "tango" 58 | 59 | [[bench]] 60 | harness = false 61 | name = "iai" 62 | 63 | [[bench]] 64 | harness = false 65 | name = "nested_transactions" 66 | -------------------------------------------------------------------------------- /src/dotstores/recording_sentinel.rs: -------------------------------------------------------------------------------- 1 | // (c) Copyright 2025 Helsing GmbH. All rights reserved. 2 | //! This module contains an implementation of Sentinel that simply records 3 | //! all calls in a human readable form. This is mostly useful for tests. 4 | 5 | use crate::{ 6 | crdts::ValueType, 7 | sentinel::{KeySentinel, Sentinel, TypeSentinel, ValueSentinel, Visit}, 8 | }; 9 | use std::{convert::Infallible, fmt::Debug}; 10 | 11 | /// A sentinel that records all calls. 12 | #[derive(Default)] 13 | pub struct RecordingSentinel { 14 | path: Vec, 15 | /// A string-representation of each call that the sentinel has received. 16 | /// This is mostly useful for tests. 17 | pub changes_seen: Vec, 18 | } 19 | impl RecordingSentinel { 20 | /// Create a new PeekingSentinel 21 | pub fn new() -> RecordingSentinel { 22 | RecordingSentinel { 23 | path: vec![], 24 | changes_seen: vec![], 25 | } 26 | } 27 | } 28 | impl Sentinel for RecordingSentinel { 29 | type Error = Infallible; 30 | } 31 | impl Visit for RecordingSentinel { 32 | fn enter(&mut self, key: &K) -> Result<(), Self::Error> { 33 | self.path.push(format!("{key:?}")); 34 | Ok(()) 35 | } 36 | fn exit(&mut self) -> Result<(), Self::Error> { 37 | self.path.pop(); 38 | Ok(()) 39 | } 40 | } 41 | impl KeySentinel for RecordingSentinel { 42 | fn create_key(&mut self) -> Result<(), Self::Error> { 43 | self.changes_seen 44 | .push(format!("create_key at {}", self.path.join("/"))); 45 | Ok(()) 46 | } 47 | 48 | fn delete_key(&mut self) -> Result<(), Self::Error> { 49 | self.changes_seen 50 | .push(format!("delete_key at {}", self.path.join("/"))); 51 | Ok(()) 52 | } 53 | } 54 | impl ValueSentinel for RecordingSentinel { 55 | fn set(&mut self, value: &V) -> Result<(), Self::Error> { 56 | self.changes_seen.push(format!("set {value:?}")); 57 | Ok(()) 58 | } 59 | fn unset(&mut self, value: V) -> Result<(), Self::Error> { 60 | self.changes_seen.push(format!("unset {:?}", &value)); 61 | Ok(()) 62 | } 63 | } 64 | impl TypeSentinel for RecordingSentinel { 65 | fn set_type(&mut self, value_type: ValueType) -> Result<(), Self::Error> { 66 | self.changes_seen.push(format!("set_type {value_type:?}")); 67 | Ok(()) 68 | } 69 | fn unset_type(&mut self, value_type: ValueType) -> Result<(), Self::Error> { 70 | self.changes_seen.push(format!("unset_type {value_type:?}")); 71 | Ok(()) 72 | } 73 | } 74 | -------------------------------------------------------------------------------- /examples/transaction_conflicts.rs: -------------------------------------------------------------------------------- 1 | use dson::{ 2 | CausalDotStore, Identifier, OrMap, 3 | crdts::{mvreg::MvRegValue, snapshot::ToValue}, 4 | transaction::CrdtValue, 5 | }; 6 | 7 | fn main() { 8 | // Create two replicas 9 | let mut replica_a = CausalDotStore::>::default(); 10 | let mut replica_b = CausalDotStore::>::default(); 11 | 12 | let id_a = Identifier::new(0, 0); 13 | let id_b = Identifier::new(1, 0); 14 | 15 | // Replica A writes a string value 16 | let delta_a = { 17 | let mut tx = replica_a.transact(id_a); 18 | tx.write_register("data", MvRegValue::String("text value".to_string())); 19 | tx.commit() 20 | }; 21 | 22 | // Replica B concurrently writes a map at the same key 23 | let delta_b = { 24 | let mut tx = replica_b.transact(id_b); 25 | tx.in_map("data", |data_tx| { 26 | data_tx.write_register("count", MvRegValue::U64(42)); 27 | }); 28 | tx.commit() 29 | }; 30 | 31 | // Both replicas receive each other's deltas 32 | replica_a.join_or_replace_with(delta_b.0.store, &delta_b.0.context); 33 | replica_b.join_or_replace_with(delta_a.0.store, &delta_a.0.context); 34 | 35 | // Both replicas should converge to the same state 36 | assert_eq!(replica_a, replica_b); 37 | 38 | // Inspect the type conflict on replica A 39 | { 40 | let tx = replica_a.transact(id_a); 41 | 42 | match tx.get(&"data".to_string()) { 43 | Some(CrdtValue::Conflicted(conflicts)) => { 44 | let has_register = conflicts.has_register(); 45 | let has_map = conflicts.has_map(); 46 | let conflict_count = conflicts.conflict_count(); 47 | 48 | println!("Type conflict detected!"); 49 | println!(" Has register: {has_register}"); 50 | println!(" Has map: {has_map}"); 51 | println!(" Total conflicts: {conflict_count}"); 52 | 53 | // Application can access both values 54 | if let Some(reg) = conflicts.register() { 55 | if let Ok(MvRegValue::String(s)) = reg.value() { 56 | println!(" Register value: {s}"); 57 | } 58 | } 59 | 60 | if let Some(_map) = conflicts.map() { 61 | println!(" Map value is present"); 62 | } 63 | 64 | println!("\nThe transaction API makes conflicts explicit!"); 65 | println!("Your application can decide how to resolve them."); 66 | } 67 | _ => println!("Expected a type conflict"), 68 | } 69 | } 70 | } 71 | -------------------------------------------------------------------------------- /src/crdts/test_util/arbitrary_delta_impls/mvreg.rs: -------------------------------------------------------------------------------- 1 | // (c) Copyright 2025 Helsing GmbH. All rights reserved. 2 | use crate::{ 3 | CausalContext, CausalDotStore, Identifier, MvReg, 4 | crdts::{ 5 | mvreg::MvRegValue, 6 | test_util::{ArbitraryDelta, Delta, KeyTracker}, 7 | }, 8 | }; 9 | use quickcheck::{Arbitrary, Gen}; 10 | use std::{fmt, ops::RangeBounds}; 11 | 12 | #[cfg_attr(feature = "serde", derive(::serde::Deserialize, ::serde::Serialize))] 13 | #[derive(Debug, Clone)] 14 | pub(crate) struct RegisterOp(pub(crate) Option); 15 | 16 | impl fmt::Display for RegisterOp { 17 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 18 | if self.0.is_some() { 19 | write!(f, "writes a value to the register") 20 | } else { 21 | write!(f, "clears the register") 22 | } 23 | } 24 | } 25 | 26 | impl ArbitraryDelta for MvReg { 27 | type Delta = RegisterOp; 28 | 29 | fn arbitrary_delta( 30 | &self, 31 | cc: &CausalContext, 32 | id: Identifier, 33 | _keys: &mut KeyTracker, 34 | g: &mut Gen, 35 | depth: usize, 36 | ) -> (Self::Delta, CausalDotStore) { 37 | // NOTE: it's tempting to assert that keys.inner_keys.is_empty(), but since we 38 | // generate traces where values change _type_, inner_keys may actually hold things for 39 | // "when this value is an array". 40 | let indent = " ".repeat(depth); 41 | // TODO: we currently do not generate clear()s as they do _really_ weird things to 42 | // registers. see the OrArray push_bottom test. 43 | #[expect(clippy::overly_complex_bool_expr)] 44 | if false && bool::arbitrary(g) { 45 | eprintln!("{indent} -> clearing register"); 46 | (RegisterOp(None), self.clear()) 47 | } else { 48 | let v = MvRegValue::arbitrary(g); 49 | eprintln!("{indent} -> writing to register ({v:?})"); 50 | (RegisterOp(Some(v.clone())), self.write(v, cc, id)) 51 | } 52 | } 53 | } 54 | 55 | impl Delta for RegisterOp { 56 | type DS = MvReg; 57 | 58 | fn depends_on_keyi_in>(&self, _range: R) -> bool { 59 | // TODO: how can we support shrinking MvRegs given they don't have keys? 60 | false 61 | } 62 | 63 | fn into_crdt( 64 | self, 65 | ds: &Self::DS, 66 | cc: &CausalContext, 67 | id: Identifier, 68 | _keys: &mut KeyTracker, 69 | ) -> CausalDotStore { 70 | // NOTE: same as in arbitrary_delta, we cannot assert that keys.inner_keys.is_empty() 71 | if let Some(v) = self.0 { 72 | ds.write(v, cc, id) 73 | } else { 74 | ds.clear() 75 | } 76 | } 77 | } 78 | -------------------------------------------------------------------------------- /flake.lock: -------------------------------------------------------------------------------- 1 | { 2 | "nodes": { 3 | "flake-utils": { 4 | "inputs": { 5 | "systems": "systems" 6 | }, 7 | "locked": { 8 | "lastModified": 1731533236, 9 | "narHash": "sha256-l0KFg5HjrsfsO/JpG+r7fRrqm12kzFHyUHqHCVpMMbI=", 10 | "owner": "numtide", 11 | "repo": "flake-utils", 12 | "rev": "11707dc2f618dd54ca8739b309ec4fc024de578b", 13 | "type": "github" 14 | }, 15 | "original": { 16 | "owner": "numtide", 17 | "repo": "flake-utils", 18 | "type": "github" 19 | } 20 | }, 21 | "nixpkgs": { 22 | "locked": { 23 | "lastModified": 1751792365, 24 | "narHash": "sha256-J1kI6oAj25IG4EdVlg2hQz8NZTBNYvIS0l4wpr9KcUo=", 25 | "owner": "NixOS", 26 | "repo": "nixpkgs", 27 | "rev": "1fd8bada0b6117e6c7eb54aad5813023eed37ccb", 28 | "type": "github" 29 | }, 30 | "original": { 31 | "owner": "NixOS", 32 | "ref": "nixos-unstable", 33 | "repo": "nixpkgs", 34 | "type": "github" 35 | } 36 | }, 37 | "nixpkgs_2": { 38 | "locked": { 39 | "lastModified": 1744536153, 40 | "narHash": "sha256-awS2zRgF4uTwrOKwwiJcByDzDOdo3Q1rPZbiHQg/N38=", 41 | "owner": "NixOS", 42 | "repo": "nixpkgs", 43 | "rev": "18dd725c29603f582cf1900e0d25f9f1063dbf11", 44 | "type": "github" 45 | }, 46 | "original": { 47 | "owner": "NixOS", 48 | "ref": "nixpkgs-unstable", 49 | "repo": "nixpkgs", 50 | "type": "github" 51 | } 52 | }, 53 | "root": { 54 | "inputs": { 55 | "flake-utils": "flake-utils", 56 | "nixpkgs": "nixpkgs", 57 | "rust-overlay": "rust-overlay" 58 | } 59 | }, 60 | "rust-overlay": { 61 | "inputs": { 62 | "nixpkgs": "nixpkgs_2" 63 | }, 64 | "locked": { 65 | "lastModified": 1752028888, 66 | "narHash": "sha256-LRj3/PUpII6taWOrX1w/OeI6f1ncND02PP/kEHvPCqU=", 67 | "owner": "oxalica", 68 | "repo": "rust-overlay", 69 | "rev": "a0f1c656e053463b47639234b151a05e4441bb19", 70 | "type": "github" 71 | }, 72 | "original": { 73 | "owner": "oxalica", 74 | "repo": "rust-overlay", 75 | "type": "github" 76 | } 77 | }, 78 | "systems": { 79 | "locked": { 80 | "lastModified": 1681028828, 81 | "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=", 82 | "owner": "nix-systems", 83 | "repo": "default", 84 | "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e", 85 | "type": "github" 86 | }, 87 | "original": { 88 | "owner": "nix-systems", 89 | "repo": "default", 90 | "type": "github" 91 | } 92 | } 93 | }, 94 | "root": "root", 95 | "version": 7 96 | } 97 | -------------------------------------------------------------------------------- /examples/nested_transactions.rs: -------------------------------------------------------------------------------- 1 | use dson::{ 2 | CausalDotStore, Identifier, OrMap, crdts::mvreg::MvRegValue, transaction::MapTransaction, 3 | }; 4 | 5 | fn main() { 6 | println!("Nested Transaction API Demo\n"); 7 | 8 | let mut store = CausalDotStore::>::default(); 9 | let id = Identifier::new(0, 0); 10 | 11 | // Create deeply nested structure 12 | { 13 | let mut tx = MapTransaction::new(&mut store, id); 14 | 15 | // Simple register 16 | tx.write_register("app_name", MvRegValue::String("TaskManager".to_string())); 17 | 18 | // Nested map 19 | tx.in_map("settings", |settings_tx| { 20 | settings_tx.write_register("theme", MvRegValue::String("dark".to_string())); 21 | settings_tx.write_register("notifications", MvRegValue::Bool(true)); 22 | }); 23 | 24 | // Array of maps 25 | tx.in_array("users", |users_tx| { 26 | users_tx.insert_map(0, |user_tx| { 27 | user_tx.write_register("name", MvRegValue::String("Alice".to_string())); 28 | user_tx.write_register("role", MvRegValue::String("admin".to_string())); 29 | }); 30 | 31 | users_tx.insert_map(1, |user_tx| { 32 | user_tx.write_register("name", MvRegValue::String("Bob".to_string())); 33 | user_tx.write_register("role", MvRegValue::String("user".to_string())); 34 | }); 35 | }); 36 | 37 | // Deeply nested: map -> array -> map -> array 38 | tx.in_map("projects", |projects_tx| { 39 | projects_tx.in_array("active", |active_tx| { 40 | active_tx.insert_map(0, |project_tx| { 41 | project_tx 42 | .write_register("name", MvRegValue::String("Website Redesign".to_string())); 43 | 44 | project_tx.in_array("tasks", |tasks_tx| { 45 | tasks_tx 46 | .insert_register(0, MvRegValue::String("Design mockups".to_string())); 47 | tasks_tx.insert_register( 48 | 1, 49 | MvRegValue::String("Implement frontend".to_string()), 50 | ); 51 | tasks_tx.insert_register(2, MvRegValue::String("Deploy".to_string())); 52 | }); 53 | }); 54 | }); 55 | }); 56 | 57 | let _delta = tx.commit(); 58 | } 59 | 60 | println!("Created nested structure!"); 61 | println!(" - Simple register: app_name"); 62 | println!(" - Nested map: settings.theme, settings.notifications"); 63 | println!(" - Array of maps: users[0..1]"); 64 | println!(" - 4-level nesting: projects.active[0].tasks[0..2]"); 65 | 66 | // No callbacks, no manual context management. 67 | // The same simple API at every level. 68 | 69 | println!("\n✓ Nested transactions eliminate callback hell!"); 70 | } 71 | -------------------------------------------------------------------------------- /tests/transaction_api.rs: -------------------------------------------------------------------------------- 1 | use dson::{CausalDotStore, Identifier, OrMap, crdts::mvreg::MvRegValue, transaction::CrdtValue}; 2 | 3 | #[test] 4 | fn simple_register_write_and_read() { 5 | let mut store = CausalDotStore::>::default(); 6 | let id = Identifier::new(0, 0); 7 | 8 | // Write using transaction 9 | { 10 | let mut tx = store.transact(id); 11 | tx.write_register("email", MvRegValue::String("alice@example.com".to_string())); 12 | let _delta = tx.commit(); 13 | } 14 | 15 | // Read using transaction 16 | { 17 | let tx = store.transact(id); 18 | match tx.get(&"email".to_string()) { 19 | Some(CrdtValue::Register(reg)) => { 20 | use dson::crdts::snapshot::ToValue; 21 | assert_eq!( 22 | reg.value().unwrap(), 23 | &MvRegValue::String("alice@example.com".to_string()) 24 | ); 25 | } 26 | _ => panic!("Expected register"), 27 | } 28 | } 29 | } 30 | 31 | #[test] 32 | fn two_replica_sync_with_transactions() { 33 | // Replica A 34 | let mut replica_a = CausalDotStore::>::default(); 35 | let id_a = Identifier::new(0, 0); 36 | 37 | // Replica B 38 | let mut replica_b = CausalDotStore::>::default(); 39 | let id_b = Identifier::new(1, 0); 40 | 41 | // A writes initial value 42 | let delta_a1 = { 43 | let mut tx = replica_a.transact(id_a); 44 | tx.write_register("count", MvRegValue::U64(0)); 45 | tx.commit() 46 | }; 47 | 48 | // B receives delta from A 49 | replica_b.join_or_replace_with(delta_a1.0.store, &delta_a1.0.context); 50 | 51 | // Both replicas should be in sync 52 | assert_eq!(replica_a, replica_b); 53 | 54 | // A and B concurrently increment 55 | let delta_a2 = { 56 | let mut tx = replica_a.transact(id_a); 57 | tx.write_register("count", MvRegValue::U64(1)); 58 | tx.commit() 59 | }; 60 | 61 | let delta_b1 = { 62 | let mut tx = replica_b.transact(id_b); 63 | tx.write_register("count", MvRegValue::U64(1)); 64 | tx.commit() 65 | }; 66 | 67 | // Exchange deltas 68 | replica_a.join_or_replace_with(delta_b1.0.store, &delta_b1.0.context); 69 | replica_b.join_or_replace_with(delta_a2.0.store, &delta_a2.0.context); 70 | 71 | // Both should converge 72 | assert_eq!(replica_a, replica_b); 73 | 74 | // Should have register with concurrent values 75 | let tx = replica_a.transact(id_a); 76 | match tx.get(&"count".to_string()) { 77 | Some(CrdtValue::Register(reg)) => { 78 | use dson::crdts::snapshot::ToValue; 79 | let values: Vec<_> = reg.values().into_iter().collect(); 80 | // Both concurrent writes are preserved 81 | assert_eq!(values.len(), 2); 82 | // Both values are U64(1), but from different replicas 83 | assert!(values.iter().all(|v| **v == MvRegValue::U64(1))); 84 | } 85 | _ => panic!("Expected register"), 86 | } 87 | } 88 | -------------------------------------------------------------------------------- /examples/transaction_sync.rs: -------------------------------------------------------------------------------- 1 | use dson::{ 2 | CausalDotStore, Identifier, OrMap, 3 | crdts::{mvreg::MvRegValue, snapshot::ToValue}, 4 | transaction::CrdtValue, 5 | }; 6 | 7 | fn main() { 8 | // Simulate a distributed system with 3 replicas 9 | let mut replica_a = CausalDotStore::>::default(); 10 | let mut replica_b = CausalDotStore::>::default(); 11 | let mut replica_c = CausalDotStore::>::default(); 12 | 13 | let id_a = Identifier::new(0, 0); 14 | let id_b = Identifier::new(1, 0); 15 | let id_c = Identifier::new(2, 0); 16 | 17 | println!("Three replicas start with empty state\n"); 18 | 19 | // Replica A initializes a counter 20 | let delta_a1 = { 21 | let mut tx = replica_a.transact(id_a); 22 | tx.write_register("counter", MvRegValue::U64(0)); 23 | tx.commit() 24 | }; 25 | println!("Replica A: initialized counter to 0"); 26 | 27 | // Broadcast delta_a1 to all replicas 28 | replica_b.join_or_replace_with(delta_a1.0.store.clone(), &delta_a1.0.context); 29 | replica_c.join_or_replace_with(delta_a1.0.store, &delta_a1.0.context); 30 | println!("Replicas B and C: received initialization\n"); 31 | 32 | // All three replicas concurrently increment 33 | let delta_a2 = { 34 | let mut tx = replica_a.transact(id_a); 35 | tx.write_register("counter", MvRegValue::U64(1)); 36 | tx.commit() 37 | }; 38 | println!("Replica A: incremented to 1"); 39 | 40 | let delta_b1 = { 41 | let mut tx = replica_b.transact(id_b); 42 | tx.write_register("counter", MvRegValue::U64(1)); 43 | tx.commit() 44 | }; 45 | println!("Replica B: incremented to 1"); 46 | 47 | let delta_c1 = { 48 | let mut tx = replica_c.transact(id_c); 49 | tx.write_register("counter", MvRegValue::U64(1)); 50 | tx.commit() 51 | }; 52 | println!("Replica C: incremented to 1\n"); 53 | 54 | // Exchange deltas (full mesh) 55 | println!("Synchronizing replicas..."); 56 | replica_a.join_or_replace_with(delta_b1.0.store.clone(), &delta_b1.0.context); 57 | replica_a.join_or_replace_with(delta_c1.0.store.clone(), &delta_c1.0.context); 58 | 59 | replica_b.join_or_replace_with(delta_a2.0.store.clone(), &delta_a2.0.context); 60 | replica_b.join_or_replace_with(delta_c1.0.store.clone(), &delta_c1.0.context); 61 | 62 | replica_c.join_or_replace_with(delta_a2.0.store, &delta_a2.0.context); 63 | replica_c.join_or_replace_with(delta_b1.0.store, &delta_b1.0.context); 64 | 65 | // Verify convergence 66 | assert_eq!(replica_a, replica_b); 67 | assert_eq!(replica_b, replica_c); 68 | println!("All replicas converged to the same state!\n"); 69 | 70 | // Read final value 71 | { 72 | let tx = replica_a.transact(id_a); 73 | if let Some(CrdtValue::Register(reg)) = tx.get(&"counter".to_string()) { 74 | if let Ok(MvRegValue::U64(value)) = reg.value() { 75 | println!("Final counter value: {value}"); 76 | } 77 | } 78 | } 79 | 80 | println!("\nThe transaction API makes distributed systems easy!"); 81 | println!("Deltas are small, composable, and automatically managed."); 82 | } 83 | -------------------------------------------------------------------------------- /.github/workflows/scheduled.yml: -------------------------------------------------------------------------------- 1 | # Run scheduled (rolling) jobs on a nightly basis, as your crate may break independently of any 2 | # given PR. E.g., updates to rust nightly and updates to this crates dependencies. See check.yml for 3 | # information about how the concurrency cancellation and workflow triggering works 4 | permissions: 5 | contents: read 6 | on: 7 | push: 8 | branches: [main] 9 | pull_request: 10 | schedule: 11 | - cron: '7 7 * * *' 12 | concurrency: 13 | group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} 14 | cancel-in-progress: true 15 | name: rolling 16 | jobs: 17 | # https://twitter.com/mycoliza/status/1571295690063753218 18 | nightly: 19 | runs-on: ubuntu-latest 20 | name: ubuntu / nightly 21 | steps: 22 | - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # tag=v6.0.1 23 | with: 24 | submodules: true 25 | - name: Install nightly 26 | uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # branch=master 27 | with: 28 | toolchain: nightly 29 | - name: cargo generate-lockfile 30 | if: hashFiles('Cargo.lock') == '' 31 | run: cargo generate-lockfile 32 | - name: Install Valgrind 33 | run: sudo apt update && sudo apt install valgrind 34 | - uses: cargo-bins/cargo-binstall@main 35 | - name: Install iai-callgrind-runner 36 | run: ./.github/scripts/install_iai_callgrind_runner.sh 37 | - name: cargo test --locked 38 | run: cargo test --locked --all-features --all-targets 39 | # https://twitter.com/alcuadrado/status/1571291687837732873 40 | update: 41 | # This action checks that updating the dependencies of this crate to the latest available that 42 | # satisfy the versions in Cargo.toml does not break this crate. This is important as consumers 43 | # of this crate will generally use the latest available crates. This is subject to the standard 44 | # Cargo semver rules (i.e cargo does not update to a new major version unless explicitly told 45 | # to). 46 | runs-on: ubuntu-latest 47 | name: ubuntu / beta / updated 48 | # There's no point running this if no Cargo.lock was checked in in the first place, since we'd 49 | # just redo what happened in the regular test job. Unfortunately, hashFiles only works in if on 50 | # steps, so we repeat it. 51 | steps: 52 | - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # tag=v6.0.1 53 | with: 54 | submodules: true 55 | - name: Install beta 56 | if: hashFiles('Cargo.lock') != '' 57 | uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # branch=master 58 | with: 59 | toolchain: beta 60 | - name: cargo update 61 | if: hashFiles('Cargo.lock') != '' 62 | run: cargo update 63 | - name: Install Valgrind 64 | run: sudo apt update && sudo apt install valgrind 65 | - uses: cargo-bins/cargo-binstall@main 66 | - name: Install Valgrind 67 | run: sudo apt update && sudo apt install valgrind 68 | - name: Install iai-callgrind-runner 69 | run: ./.github/scripts/install_iai_callgrind_runner.sh 70 | - name: cargo test 71 | if: hashFiles('Cargo.lock') != '' 72 | run: cargo test --locked --all-features --all-targets 73 | env: 74 | RUSTFLAGS: -D deprecated 75 | -------------------------------------------------------------------------------- /src/crdts/orarray/position.rs: -------------------------------------------------------------------------------- 1 | // (c) Copyright 2025 Helsing GmbH. All rights reserved. 2 | use std::cmp::Ordering; 3 | 4 | // NOTE: the original implementation has an atoms field with node identifiers in them stored 5 | // inside each Position, but none of that is actually _used_ anywhere, so it's been left over. one 6 | // of the original DSON paper authors confirmed by email on 2023-08-25 that the atoms/nodeid bits 7 | // are leftover from an earlier algorithm they used. 8 | // 9 | // TODO: in the same email, the author suggests that even `f64` may not be an ideal choice 10 | // here since the algorithm assumes that between every two points there exist a third, which is 11 | // true for real numbers, but only kind of true for `f64`. One option is to use a SmallVec 12 | // so that for the happy case (fewer than 64 pushes) we use no more space, and with more we 13 | // seamlessly transition to a bigger type. 14 | /// A position in an [`OrArray`](super::OrArray). 15 | /// 16 | /// This is a wrapper around an `f64` that represents a position in an ordered sequence. The 17 | /// positions are used to determine the order of elements in the array. 18 | // TODO: Consider replacing `Position(f64)` with an unbounded rational 19 | // identifier such as `Fraction`, which stores each coordinate as a growable 20 | // vector of 31-bit digits (base = 2^31). A 64-bit float yields only 2^52 21 | // distinct values in our interval, so after roughly fifty “insert-the-average” 22 | // operations in the same gap the two neighbours become bit-identical and 23 | // `between()` can no longer create a fresh position, forcing an expensive 24 | // renumbering of the entire list. By contrast, a vector-based representation can 25 | // always append another digit to refine the interval, ensuring that a new 26 | // position can be generated. 27 | #[derive(Clone, Copy)] 28 | #[cfg_attr(feature = "serde", derive(::serde::Deserialize, ::serde::Serialize))] 29 | pub struct Position(pub(in super::super) f64); 30 | 31 | impl std::fmt::Debug for Position { 32 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 33 | write!(f, "{}", self.0) 34 | } 35 | } 36 | 37 | impl PartialEq for Position { 38 | fn eq(&self, other: &Self) -> bool { 39 | self.cmp(other) == Ordering::Equal 40 | } 41 | } 42 | 43 | impl Eq for Position {} 44 | 45 | impl PartialOrd for Position { 46 | fn partial_cmp(&self, other: &Self) -> Option { 47 | Some(self.cmp(other)) 48 | } 49 | } 50 | 51 | impl Ord for Position { 52 | fn cmp(&self, other: &Self) -> std::cmp::Ordering { 53 | self.0.total_cmp(&other.0) 54 | } 55 | } 56 | 57 | impl Position { 58 | pub(crate) const LOWER: f64 = 0.0; 59 | pub(crate) const UPPER: f64 = 32767.0; 60 | 61 | /// Returns a new position between two existing positions. 62 | pub fn between(left: Option, right: Option) -> Self { 63 | // NOTE: the original implementation also takes a node id (ie, `Identifier`), but then 64 | // never does anything with it, so we leave it off here. 65 | Self( 66 | (left.map(|p| p.0).unwrap_or(Position::LOWER) 67 | + right.map(|p| p.0).unwrap_or(Position::UPPER)) 68 | / 2.0, 69 | ) 70 | } 71 | 72 | /// Creates a `Position` from a raw `f64` value. 73 | /// 74 | /// Returns `None` if the value is outside the valid range. 75 | pub fn from_raw(value: f64) -> Option { 76 | (Position::LOWER..=Position::UPPER) 77 | .contains(&value) 78 | .then_some(Self(value)) 79 | } 80 | 81 | /// Returns the raw `f64` value of the position. 82 | pub fn as_raw(&self) -> f64 { 83 | self.0 84 | } 85 | } 86 | -------------------------------------------------------------------------------- /examples/simple.rs: -------------------------------------------------------------------------------- 1 | // (c) Copyright 2025 Helsing GmbH. All rights reserved. 2 | //! The example simulates a scenario where two replicas modify the same data and 3 | //! then synchronize their states, arriving at a consistent final result. 4 | 5 | use dson::{CausalDotStore, Identifier, OrMap, crdts::NoExtensionTypes}; 6 | use std::error::Error; 7 | 8 | fn main() -> Result<(), Box> { 9 | // Create a unique identifier for replica A. 10 | let replica_a_id = Identifier::new(0, 0); 11 | 12 | // Initialize the state for replica A. The `CausalDotStore` holds the CRDT data 13 | // and its associated causal context. We use an `OrMap` (Observed-Remove Map) 14 | // with String values as our top-level CRDT. 15 | let mut replica_a_state = CausalDotStore::>::default(); 16 | 17 | // --- Replica A: Set email for "alice" --- 18 | // The following operation creates a delta that represents the change of setting 19 | // the email for the key "alice". This delta only contains the change set, not 20 | // the full state. 21 | let delta_from_a = dson::api::map::apply_to_map::<_, NoExtensionTypes, _>( 22 | |inner_map, ctx, id| { 23 | // Within the "alice" map, we apply a change to the "email" register. 24 | dson::api::map::apply_to_register( 25 | // The new value for the register. 26 | |reg, ctx, id| reg.write("alice@example.com".to_string().into(), ctx, id), 27 | "email".to_string(), 28 | )(inner_map, ctx, id) 29 | }, 30 | "alice".to_string(), 31 | )( 32 | // The operation is based on the current state of replica A. 33 | &replica_a_state.store, 34 | &replica_a_state.context, 35 | replica_a_id, 36 | ); 37 | 38 | // Apply the generated delta to replica A's own state. 39 | replica_a_state.join_or_replace_with(delta_from_a.store.clone(), &delta_from_a.context); 40 | 41 | // --- Synchronization: A -> B --- 42 | // In a real-world scenario, the `delta_from_a` would be sent over a network 43 | // to other replicas. Here, we simulate this by creating a second replica and 44 | // applying the delta to it. 45 | 46 | // Create a unique identifier for replica B. 47 | let replica_b_id = Identifier::new(1, 0); 48 | 49 | // Initialize the state for replica B. 50 | let mut replica_b_state = CausalDotStore::>::default(); 51 | 52 | // Apply the delta from replica A to replica B's state. 53 | replica_b_state.join_or_replace_with(delta_from_a.store.clone(), &delta_from_a.context); 54 | 55 | // After synchronization, the states of both replicas should be identical. 56 | assert_eq!(replica_a_state, replica_b_state); 57 | 58 | // --- Replica B: Update email for "alice" --- 59 | // Now, replica B makes a change to the same data. This will create a new 60 | // delta based on replica B's current state. 61 | let delta_from_b = dson::api::map::apply_to_map::<_, NoExtensionTypes, _>( 62 | |inner_map, ctx, id| { 63 | dson::api::map::apply_to_register( 64 | |reg, ctx, id| reg.write("bob@example.com".to_string().into(), ctx, id), 65 | "email".to_string(), 66 | )(inner_map, ctx, id) 67 | }, 68 | "alice".to_string(), 69 | )( 70 | &replica_b_state.store, 71 | &replica_b_state.context, 72 | replica_b_id, 73 | ); 74 | 75 | // Apply the new delta to replica B's own state. 76 | replica_b_state.join_or_replace_with(delta_from_b.store.clone(), &delta_from_b.context); 77 | 78 | // --- Synchronization: B -> A --- 79 | // Propagate the delta from replica B back to replica A. 80 | replica_a_state.join_or_replace_with(delta_from_b.store.clone(), &delta_from_b.context); 81 | 82 | // After this final synchronization, both replicas should once again have 83 | // identical states, reflecting the latest change made by replica B. 84 | assert_eq!(replica_a_state, replica_b_state); 85 | 86 | Ok(()) 87 | } 88 | -------------------------------------------------------------------------------- /src/api/map.rs: -------------------------------------------------------------------------------- 1 | // (c) Copyright 2025 Helsing GmbH. All rights reserved. 2 | use crate::{ 3 | CausalContext, CausalDotStore, ExtensionType, Identifier, MvReg, OrArray, OrMap, 4 | crdts::{ 5 | TypeVariantValue, Value, 6 | snapshot::{self, ToValue}, 7 | }, 8 | }; 9 | use std::{borrow::Borrow, fmt, hash::Hash}; 10 | 11 | /// Returns the values of this map without collapsing conflicts. 12 | pub fn values( 13 | m: &OrMap, 14 | ) -> snapshot::OrMap<'_, K, snapshot::AllValues<'_, C::ValueRef<'_>>> 15 | where 16 | K: Hash + Eq + fmt::Display, 17 | C: ExtensionType, 18 | { 19 | m.values() 20 | } 21 | 22 | /// Returns the values of this map assuming (and asserting) no conflicts on element values. 23 | // NOTE: A type alias won't help much here :melt:. 24 | #[allow(clippy::type_complexity)] 25 | pub fn value( 26 | m: &OrMap, 27 | ) -> Result< 28 | snapshot::OrMap<'_, K, snapshot::CollapsedValue<'_, C::ValueRef<'_>>>, 29 | Box as ToValue>::LeafValue>>, 30 | > 31 | where 32 | K: Hash + Eq + fmt::Debug + fmt::Display + Clone, 33 | C: ExtensionType, 34 | { 35 | m.value() 36 | } 37 | 38 | /// Creates a new map. 39 | pub fn create() 40 | -> impl Fn(&OrMap, &CausalContext, Identifier) -> CausalDotStore> 41 | where 42 | K: Hash + Eq + fmt::Debug + Clone, 43 | C: ExtensionType, 44 | { 45 | move |m, cc, id| m.create(cc, id) 46 | } 47 | 48 | /// Applies a function to the value at the given key. 49 | pub fn apply( 50 | o: O, 51 | k: K, 52 | ) -> impl FnOnce(&OrMap, &CausalContext, Identifier) -> CausalDotStore> 53 | where 54 | K: Hash + Eq + fmt::Debug + Clone, 55 | O: FnOnce(&TypeVariantValue, &CausalContext, Identifier) -> CausalDotStore>, 56 | C: ExtensionType, 57 | { 58 | move |m, cc, id| m.apply(o, k.clone(), cc, id) 59 | } 60 | 61 | /// Applies a function to the map at the given key. 62 | pub fn apply_to_map( 63 | o: O, 64 | k: K, 65 | ) -> impl FnOnce(&OrMap, &CausalContext, Identifier) -> CausalDotStore> 66 | where 67 | K: Hash + Eq + fmt::Debug + Clone, 68 | O: FnOnce(&OrMap, &CausalContext, Identifier) -> CausalDotStore>, 69 | C: ExtensionType, 70 | { 71 | move |m, cc, id| m.apply_to_map(o, k.clone(), cc, id) 72 | } 73 | 74 | /// Applies a function to the array at the given key. 75 | pub fn apply_to_array( 76 | o: O, 77 | k: K, 78 | ) -> impl FnOnce(&OrMap, &CausalContext, Identifier) -> CausalDotStore> 79 | where 80 | K: Hash + Eq + fmt::Debug + Clone, 81 | O: FnOnce(&OrArray, &CausalContext, Identifier) -> CausalDotStore>, 82 | C: ExtensionType, 83 | { 84 | move |m, cc, id| m.apply_to_array(o, k.clone(), cc, id) 85 | } 86 | 87 | /// Applies a function to the register at the given key. 88 | pub fn apply_to_register( 89 | o: O, 90 | k: K, 91 | ) -> impl FnOnce(&OrMap, &CausalContext, Identifier) -> CausalDotStore> 92 | where 93 | K: Hash + Eq + fmt::Debug + Clone, 94 | O: FnOnce(&MvReg, &CausalContext, Identifier) -> CausalDotStore, 95 | C: ExtensionType, 96 | { 97 | move |m, cc, id| m.apply_to_register(o, k.clone(), cc, id) 98 | } 99 | 100 | /// Removes a key from the map. 101 | pub fn remove( 102 | k: &Q, 103 | ) -> impl Fn(&OrMap, &CausalContext, Identifier) -> CausalDotStore> + '_ 104 | where 105 | K: Hash + Eq + fmt::Debug + Clone + Borrow, 106 | Q: Hash + Eq + ?Sized, 107 | C: ExtensionType, 108 | { 109 | move |m, cc, id| m.remove(k, cc, id) 110 | } 111 | 112 | /// Clears the map. 113 | pub fn clear() 114 | -> impl Fn(&OrMap, &CausalContext, Identifier) -> CausalDotStore> 115 | where 116 | K: Hash + Eq + fmt::Debug + Clone, 117 | C: ExtensionType, 118 | { 119 | move |m, cc, id| m.clear(cc, id) 120 | } 121 | -------------------------------------------------------------------------------- /src/crdts/test_util.rs: -------------------------------------------------------------------------------- 1 | // (c) Copyright 2025 Helsing GmbH. All rights reserved. 2 | use super::Value; 3 | use crate::{ 4 | CausalContext, CausalDotStore, DotStoreJoin, Identifier, 5 | dotstores::recording_sentinel::RecordingSentinel, 6 | sentinel::{DummySentinel, Sentinel}, 7 | }; 8 | use quickcheck::Gen; 9 | use std::{fmt, ops::RangeBounds}; 10 | 11 | mod arbitrary_delta_impls; 12 | mod qc_arbitrary_impls; 13 | mod qc_arbitrary_ops; 14 | #[cfg_attr(feature = "arbitrary", allow(dead_code))] 15 | pub(crate) fn join_harness( 16 | zero: DS, 17 | init: Init, 18 | w1: W1, 19 | w2: W2, 20 | mut sentinel: S, 21 | check: C, 22 | ) where 23 | DS: DotStoreJoin + DotStoreJoin + Default + Clone, 24 | S: Sentinel, 25 | S::Error: fmt::Debug, 26 | Init: FnOnce(CausalDotStore, Identifier) -> CausalDotStore, 27 | W1: FnOnce(&DS, CausalContext, Identifier) -> CausalDotStore, 28 | W2: FnOnce(&DS, CausalContext, Identifier) -> CausalDotStore, 29 | C: FnOnce(CausalDotStore, S), 30 | { 31 | let v = zero; 32 | let init_id = Identifier::new(9, 0); 33 | let v = init( 34 | CausalDotStore { 35 | store: v, 36 | context: CausalContext::new(), 37 | }, 38 | init_id, 39 | ); 40 | let w1_id = Identifier::new(0, 0); 41 | let mut w1_v = w1(&v.store, v.context.clone(), w1_id); 42 | let w2_id = Identifier::new(1, 0); 43 | let w2_v = w2(&v.store, v.context.clone(), w2_id); 44 | w1_v.test_join_with_and_track(w2_v.store, &w2_v.context, &mut |_| {}, &mut sentinel) 45 | .unwrap(); 46 | check(w1_v, sentinel) 47 | } 48 | 49 | /// Types that can construct descriptors of an arbitrary modification to themselves. 50 | pub(crate) trait ArbitraryDelta: Sized { 51 | #[cfg(not(feature = "serde"))] 52 | /// The type of the descriptor. 53 | type Delta: Delta; 54 | #[cfg(feature = "serde")] 55 | type Delta: Delta + ::serde::Serialize + ::serde::de::DeserializeOwned; 56 | 57 | /// Produces a descriptor for an arbitrary modification to `&self`. 58 | /// 59 | /// If the descriptor produces a new key in `self`, it should represent that key as a `usize` 60 | /// as returned by the `add_*_key` methods on [`KeyTracker`]. Any deltas to inner collections 61 | /// should be passed `&mut keys.inner_keys[keyi]` so they can also track their collections. 62 | /// 63 | /// `depth` is used solely to produce visual guides (eg, indents) so that nested calls to 64 | /// `arbitrary_delta` are easier to distinguish. 65 | fn arbitrary_delta( 66 | &self, 67 | cc: &CausalContext, 68 | id: Identifier, 69 | keys: &mut KeyTracker, 70 | g: &mut Gen, 71 | depth: usize, 72 | ) -> (Self::Delta, CausalDotStore); 73 | } 74 | 75 | /// Types that describe a modification to an instance of [`Delta::DS`]. 76 | pub(crate) trait Delta: Sized + fmt::Display { 77 | /// The [`DotStore`] type that this delta applies to. 78 | type DS: DotStoreJoin; 79 | 80 | /// Returns true if this delta specifically depends on a key in the given keyi range. 81 | /// 82 | /// Some examples: 83 | /// 84 | /// - an `Update(keyi = 42)` should return `true` when passed a range `16..`. 85 | /// - an `Update(keyi = 4)` should return `false` when passed a range `16..`. 86 | /// - an `Insert(keyi = 42)` should return `true` when passed a range `16..`. 87 | /// - a `Clear` should return `false` when passed a range `16..`. 88 | fn depends_on_keyi_in>(&self, range: R) -> bool; 89 | 90 | /// Turns this modification description into a CRDT over `ds` that, when joined with `ds`, will 91 | /// produce the desired modification. 92 | /// 93 | /// `keys` tracks the sequence of keys produced so far. See [`ArbitraryDelta::arbitrary_delta`] 94 | /// for details. 95 | #[cfg_attr(feature = "arbitrary", allow(dead_code))] 96 | fn into_crdt( 97 | self, 98 | ds: &Self::DS, 99 | cc: &CausalContext, 100 | id: Identifier, 101 | keys: &mut KeyTracker, 102 | ) -> CausalDotStore; 103 | } 104 | 105 | pub(crate) use qc_arbitrary_ops::KeyTracker; 106 | #[cfg(test)] 107 | pub(crate) use qc_arbitrary_ops::Ops; 108 | -------------------------------------------------------------------------------- /src/json.rs: -------------------------------------------------------------------------------- 1 | // (c) Copyright 2025 Helsing GmbH. All rights reserved. 2 | //! JSON representation 3 | //! 4 | //! Value-level conflicts, which can only occur in [`MvReg`s](crate::crdts::MvReg), are 5 | //! represented as a JSON array of the conflicting values in an **arbitrary but deterministic order**. 6 | //! 7 | //! # Examples 8 | //! 9 | //! ## A simple document without conflicts 10 | //! 11 | //! ```json 12 | //! { 13 | //! "name": "John Doe", 14 | //! "age": 43, 15 | //! "phones": [ 16 | //! "+44 1234567", 17 | //! "+44 2345678" 18 | //! ] 19 | //! } 20 | //! ``` 21 | //! 22 | //! ## A document with a value conflict 23 | //! 24 | //! If two users concurrently edit the "name" field, the conflict is preserved. 25 | //! 26 | //! ```json 27 | //! { 28 | //! "name": ["John Doe", "Jon Dough"], 29 | //! "age": 43, 30 | //! "phones": [ 31 | //! "+44 1234567", 32 | //! "+44 2345678" 33 | //! ] 34 | //! } 35 | //! ``` 36 | use crate::{ 37 | ExtensionType, 38 | api::timestamp, 39 | crdts::{ 40 | ValueRef, 41 | mvreg::MvRegValue, 42 | snapshot::{self, ToValue}, 43 | }, 44 | }; 45 | use serde_json::Value; 46 | use std::{fmt, hash::Hash}; 47 | 48 | /// Converts a [`MvRegValue`] to a [`serde_json::Value`]. 49 | impl From for Value { 50 | fn from(val: MvRegValue) -> Self { 51 | match val { 52 | MvRegValue::Bytes(v) => { 53 | base64::Engine::encode(&base64::engine::general_purpose::STANDARD, v).into() 54 | } 55 | MvRegValue::String(v) => v.into(), 56 | MvRegValue::Float(v) => v.into(), 57 | MvRegValue::Double(v) => v.into(), 58 | MvRegValue::U64(v) => v.into(), 59 | MvRegValue::I64(v) => v.into(), 60 | MvRegValue::Bool(v) => v.into(), 61 | MvRegValue::Timestamp(v) => timestamp_to_json(v), 62 | #[cfg(feature = "ulid")] 63 | MvRegValue::Ulid(v) => serde_json::to_value(v).expect("ULID is JSON serializable"), 64 | } 65 | } 66 | } 67 | 68 | #[cfg(feature = "chrono")] 69 | fn timestamp_to_json(v: timestamp::Timestamp) -> Value { 70 | v.into() 71 | } 72 | 73 | #[cfg(not(feature = "chrono"))] 74 | fn timestamp_to_json(v: timestamp::Timestamp) -> Value { 75 | v.as_millis().into() 76 | } 77 | 78 | /// Converts a [`snapshot::AllValues`] to a [`serde_json::Value`]. 79 | impl From> for Value 80 | where 81 | C: ToValue, 82 | serde_json::Value: From, 83 | { 84 | fn from(value: snapshot::AllValues<'_, C>) -> Self { 85 | match value { 86 | snapshot::AllValues::Register(reg) => reg.into(), 87 | snapshot::AllValues::Map(map) => map.into(), 88 | snapshot::AllValues::Array(arr) => arr.into(), 89 | snapshot::AllValues::Custom(c) => c.into(), 90 | } 91 | } 92 | } 93 | 94 | /// Converts a [`ValueRef`] to a `serde_json::Value`. 95 | impl From> for Value 96 | where 97 | C: ExtensionType, 98 | for<'doc> serde_json::Value: From< as ToValue>::Values>, 99 | { 100 | fn from(value: ValueRef<'_, C>) -> Self { 101 | value.values().into() 102 | } 103 | } 104 | 105 | /// Converts a [`snapshot::OrMap`] to a `serde_json::Value`. 106 | impl From> for serde_json::Value 107 | where 108 | K: Hash + Eq + fmt::Display, 109 | V: Into, 110 | { 111 | fn from(value: snapshot::OrMap<'_, K, V>) -> Self { 112 | let obj = value 113 | .map 114 | .into_iter() 115 | .map(|(k, v)| (k.to_string(), v.into())) 116 | .collect(); 117 | serde_json::Value::Object(obj) 118 | } 119 | } 120 | 121 | /// Converts a [`snapshot::OrArray`] to a `serde_json::Value`. 122 | impl From> for serde_json::Value 123 | where 124 | V: Into, 125 | { 126 | fn from(value: snapshot::OrArray) -> Self { 127 | // NOTE: items are sorted by the dot, which we need for handling 128 | // single-writer (temporary) conflicts client-side. 129 | let arr = value.list.into_iter().map(Into::into).collect(); 130 | serde_json::Value::Array(arr) 131 | } 132 | } 133 | 134 | /// Converts a [`snapshot::MvReg`] to a `serde_json::Value`. 135 | /// 136 | /// * If the register is empty, it returns `Null`. 137 | /// * If the register has one value, it returns that value. 138 | /// * If the register has multiple values, it returns an array of those values. 139 | impl From> for serde_json::Value { 140 | fn from(reg: snapshot::MvReg<'_>) -> Self { 141 | match reg.values.len() { 142 | 0 => serde_json::Value::Null, 143 | 1 => (reg.get(0).expect("len > 0")).clone().into(), 144 | _ => serde_json::Value::Array(reg.into_iter().map(|x| (*x).clone().into()).collect()), 145 | } 146 | } 147 | } 148 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # DSON: A Delta-State CRDT for JSON-like Data Structures 2 | 3 | [![Crates.io](https://img.shields.io/crates/v/dson.svg)](https://crates.io/crates/dson) 4 | [![Docs.rs](https://docs.rs/dson/badge.svg)](https://docs.rs/dson) 5 | 6 | This crate provides a Rust implementation of **DSON**, a space-efficient, delta-state Conflict-Free Replicated Datatype (CRDT) for JSON-like data structures. It is based on the research paper ["DSON: JSON CRDT Using Delta-Mutations For Document Stores"][dson-paper] and started as a port of the original author's [JavaScript implementation][js-impl]. 7 | 8 | The primary goal of this library is to enable robust, and efficient 9 | multi-writer collaboration in extremely constrained environments (high 10 | latency and low bandwidth; opportunistic networking). 11 | 12 | See also [Helsing's announcement blog post][blog-post]. 13 | 14 | 15 | [dson-paper]: https://dl.acm.org/doi/10.14778/3510397.3510403 16 | [js-impl]: https://github.com/crdt-ibm-research/json-delta-crdt 17 | [blog-post]: https://blog.helsing.ai/dson-a-delta-state-crdt-for-resilient-peer-to-peer-communication-7823349a042c 18 | 19 | ## Core Concepts 20 | 21 | DSON provides three fundamental, composable CRDTs: 22 | 23 | - `OrMap`: An **Observed-Remove Map**, mapping string keys to other CRDT values. 24 | - `OrArray`: An **Observed-Remove Array**, providing a list-like structure. 25 | - `MvReg`: A **Multi-Value Register**, for storing primitive values. When 26 | concurrent writes occur, the register holds all conflicting values. 27 | 28 | These primitives can be nested to create arbitrarily complex data structures. 29 | All modifications produce a **delta**, which is a small set of changes that can 30 | be transmitted to other replicas. 31 | 32 | ## Observed-Remove Semantics 33 | 34 | DSON uses **Observed-Remove (OR)** semantics. This means an element can only be 35 | removed if its addition has been observed. If an element is updated concurrently 36 | with its removal, the update "wins" and the element is preserved. 37 | 38 | ## Causal CRDTs and Tombstone-Free Removals 39 | 40 | DSON is a **causal** CRDT, using causal history to resolve conflicts. A key 41 | advantage of this model is the elimination of **tombstones**, which prevents 42 | unbounded metadata growth in long-lived documents. 43 | 44 | ## Scope of this Crate 45 | 46 | This crate provides the core data structures and algorithms for DSON. It is a 47 | low-level library, and you will likely want to build a typed abstraction layer 48 | on top of it. 49 | 50 | **It does not include any networking protocols.** You are responsible for 51 | implementing the transport layer to broadcast deltas to other replicas. 52 | 53 | ## Attribution 54 | 55 | The initial version of this crate was based on the 56 | [JavaScript implementation][js-impl] by the [DSON paper][dson-paper] 57 | authors. 58 | 59 | The following people have contributed to this implementation: 60 | 61 | - [@jonhoo](https://github.com/jonhoo) 62 | - [@wngr](https://github.com/wngr) 63 | - [@asmello](https://github.com/asmello) 64 | - [@avl](https://github.com/avl) 65 | - [@oktal](https://github.com/oktal) 66 | 67 | 68 | ## License 69 | 70 | This project is licensed under either of 71 | 72 | - Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0) 73 | - MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT) 74 | 75 | at your option. 76 | 77 | ## Further Resources 78 | 79 | - [Talk on DSON and CRDTs](https://www.youtube.com/watch?v=4QkLD7JhD_I) - A presentation covering CRDTs in general and DSON in particular. 80 | 81 | ## Development 82 | 83 | This repository provides a [Nix](https://nixos.org) development shell. 84 | If you have Nix installed, you can enter the shell by running: 85 | 86 | ```sh 87 | nix develop 88 | ``` 89 | 90 | This will provide you with a consistent development environment, including the 91 | correct Rust toolchain and other helpful dependencies. 92 | 93 | In general, `dson` has very little dependencies, so you should expect to run `cargo build`/`cargo test` 94 | just fine. 95 | 96 | ### Benchmarks 97 | 98 | Two benchmark harnesses are available: 99 | 100 | **Tango** - Paired-testing benchmarks for detecting performance regressions: 101 | 102 | ```sh 103 | # Establish baseline 104 | cargo bench --bench tango 105 | 106 | # After making changes, compare against baseline 107 | cargo bench --bench tango -- compare 108 | 109 | # Filter specific benchmarks 110 | cargo bench --bench tango -- compare -f "transaction::map::*" 111 | 112 | # Quick absolute timings (non-comparative mode) 113 | cargo bench --bench tango -- solo -f "transaction::map::*" -s 100 114 | ``` 115 | 116 | **IAI-Callgrind** - Instruction-level profiling using Valgrind (Linux only): 117 | 118 | ```sh 119 | # Run IAI benchmarks (requires Linux and Valgrind) 120 | cargo bench --bench iai 121 | cargo bench --bench nested_transactions 122 | ``` 123 | 124 | ## Documentation 125 | 126 | For a complete guide, including detailed explanations of the core concepts, 127 | advanced topics, and API usage, please refer to the 128 | [**generated docs**](https://docs.rs/dson). 129 | -------------------------------------------------------------------------------- /tests/nested_transactions.rs: -------------------------------------------------------------------------------- 1 | //! Integration tests for nested transaction API. 2 | 3 | use dson::{ 4 | CausalDotStore, Identifier, OrMap, crdts::mvreg::MvRegValue, transaction::MapTransaction, 5 | }; 6 | 7 | #[test] 8 | fn deeply_nested_map_array_map() { 9 | // Structure: map -> array -> map 10 | // Like: { "projects": [{ "name": "DSON", "tasks": [...] }] } 11 | 12 | let mut store = CausalDotStore::>::default(); 13 | let id = Identifier::new(0, 0); 14 | 15 | { 16 | let mut tx = MapTransaction::new(&mut store, id); 17 | 18 | tx.in_array("projects", |projects_tx| { 19 | projects_tx.insert_map(0, |project_tx| { 20 | project_tx.write_register("name", MvRegValue::String("DSON".to_string())); 21 | project_tx.write_register("priority", MvRegValue::U64(1)); 22 | 23 | project_tx.in_array("tasks", |tasks_tx| { 24 | tasks_tx 25 | .insert_register(0, MvRegValue::String("Implement nested TX".to_string())); 26 | tasks_tx.insert_register(1, MvRegValue::String("Write tests".to_string())); 27 | }); 28 | }); 29 | }); 30 | 31 | let _delta = tx.commit(); 32 | } 33 | 34 | // Verify structure was created 35 | use dson::crdts::snapshot::ToValue; 36 | 37 | let projects_val = store.store.get(&"projects".to_string()).unwrap(); 38 | assert_eq!(projects_val.array.len(), 1); 39 | 40 | let project = projects_val.array.get(0).unwrap(); 41 | let name = project.map.get(&"name".to_string()).unwrap(); 42 | assert_eq!( 43 | name.reg.value().unwrap(), 44 | &MvRegValue::String("DSON".to_string()) 45 | ); 46 | 47 | let tasks_val = project.map.get(&"tasks".to_string()).unwrap(); 48 | assert_eq!(tasks_val.array.len(), 2); 49 | } 50 | 51 | #[test] 52 | fn array_of_arrays() { 53 | // Test [[1, 2], [3, 4]] 54 | 55 | let mut store = CausalDotStore::>::default(); 56 | let id = Identifier::new(0, 0); 57 | 58 | { 59 | let mut tx = MapTransaction::new(&mut store, id); 60 | 61 | tx.in_array("matrix", |matrix_tx| { 62 | matrix_tx.insert_array(0, |row_tx| { 63 | row_tx.insert_register(0, MvRegValue::U64(1)); 64 | row_tx.insert_register(1, MvRegValue::U64(2)); 65 | }); 66 | 67 | matrix_tx.insert_array(1, |row_tx| { 68 | row_tx.insert_register(0, MvRegValue::U64(3)); 69 | row_tx.insert_register(1, MvRegValue::U64(4)); 70 | }); 71 | }); 72 | 73 | let _delta = tx.commit(); 74 | } 75 | 76 | // Verify 2x2 matrix 77 | use dson::crdts::snapshot::ToValue; 78 | 79 | let matrix = store.store.get(&"matrix".to_string()).unwrap(); 80 | assert_eq!(matrix.array.len(), 2); 81 | 82 | let row0 = matrix.array.get(0).unwrap(); 83 | assert_eq!(row0.array.len(), 2); 84 | assert_eq!( 85 | row0.array.get(0).unwrap().reg.value().unwrap(), 86 | &MvRegValue::U64(1) 87 | ); 88 | assert_eq!( 89 | row0.array.get(1).unwrap().reg.value().unwrap(), 90 | &MvRegValue::U64(2) 91 | ); 92 | 93 | let row1 = matrix.array.get(1).unwrap(); 94 | assert_eq!(row1.array.len(), 2); 95 | assert_eq!( 96 | row1.array.get(0).unwrap().reg.value().unwrap(), 97 | &MvRegValue::U64(3) 98 | ); 99 | assert_eq!( 100 | row1.array.get(1).unwrap().reg.value().unwrap(), 101 | &MvRegValue::U64(4) 102 | ); 103 | } 104 | 105 | #[test] 106 | fn concurrent_nested_modifications() { 107 | // Two replicas modify nested structures concurrently 108 | 109 | let id1 = Identifier::new(0, 0); 110 | let id2 = Identifier::new(1, 0); 111 | 112 | let mut replica1 = CausalDotStore::>::default(); 113 | let mut replica2 = CausalDotStore::>::default(); 114 | 115 | // Both create initial structure 116 | let init_delta = { 117 | let mut tx = MapTransaction::new(&mut replica1, id1); 118 | tx.in_map("config", |cfg_tx| { 119 | cfg_tx.write_register("version", MvRegValue::U64(1)); 120 | }); 121 | tx.commit() 122 | }; 123 | 124 | replica1.join_or_replace_with(init_delta.0.store.clone(), &init_delta.0.context); 125 | replica2.join_or_replace_with(init_delta.0.store, &init_delta.0.context); 126 | 127 | // Replica 1: adds array to config 128 | let delta1 = { 129 | let mut tx = MapTransaction::new(&mut replica1, id1); 130 | tx.in_map("config", |cfg_tx| { 131 | cfg_tx.in_array("features", |features_tx| { 132 | features_tx.insert_register(0, MvRegValue::String("fast".to_string())); 133 | }); 134 | }); 135 | tx.commit() 136 | }; 137 | 138 | // Replica 2: updates version concurrently 139 | let delta2 = { 140 | let mut tx = MapTransaction::new(&mut replica2, id2); 141 | tx.in_map("config", |cfg_tx| { 142 | cfg_tx.write_register("version", MvRegValue::U64(2)); 143 | }); 144 | tx.commit() 145 | }; 146 | 147 | // Exchange deltas 148 | replica1.join_or_replace_with(delta2.0.store, &delta2.0.context); 149 | replica2.join_or_replace_with(delta1.0.store, &delta1.0.context); 150 | 151 | // Both should converge 152 | assert_eq!(replica1, replica2); 153 | 154 | // Verify both changes present 155 | use dson::crdts::snapshot::ToValue; 156 | 157 | let config = replica1.store.get(&"config".to_string()).unwrap(); 158 | let version = config.map.get(&"version".to_string()).unwrap(); 159 | assert!(version.reg.value().unwrap() == &MvRegValue::U64(2)); 160 | 161 | let features = config.map.get(&"features".to_string()).unwrap(); 162 | assert_eq!(features.array.len(), 1); 163 | } 164 | -------------------------------------------------------------------------------- /examples/transaction_nested.rs: -------------------------------------------------------------------------------- 1 | use dson::{ 2 | CausalDotStore, Identifier, OrMap, 3 | crdts::{mvreg::MvRegValue, snapshot::ToValue}, 4 | transaction::CrdtValue, 5 | }; 6 | 7 | fn main() { 8 | let mut store = CausalDotStore::>::default(); 9 | let id = Identifier::new(0, 0); 10 | 11 | // Create nested data structure 12 | { 13 | let mut tx = store.transact(id); 14 | 15 | // Write to nested map for user "alice" 16 | tx.in_map("alice", |alice_tx| { 17 | alice_tx.write_register("email", MvRegValue::String("alice@example.com".to_string())); 18 | alice_tx.write_register("age", MvRegValue::U64(30)); 19 | }); 20 | 21 | // Write to nested map for user "bob" 22 | tx.in_map("bob", |bob_tx| { 23 | bob_tx.write_register("email", MvRegValue::String("bob@example.com".to_string())); 24 | bob_tx.write_register("active", MvRegValue::Bool(true)); 25 | }); 26 | 27 | let _delta = tx.commit(); 28 | } 29 | 30 | // Read nested data 31 | { 32 | let tx = store.transact(id); 33 | 34 | // Read Alice's data 35 | let alice_map = tx.get(&"alice".to_string()).expect("Alice should exist"); 36 | let CrdtValue::Map(alice_map) = alice_map else { 37 | panic!("Alice value should be a map"); 38 | }; 39 | println!("Alice's data:"); 40 | 41 | let alice_email = alice_map 42 | .get(&"email".to_string()) 43 | .expect("Alice email should exist"); 44 | let email = alice_email 45 | .reg 46 | .value() 47 | .expect("Alice email should have a value"); 48 | println!(" Email: {email:?}"); 49 | assert_eq!(email, &MvRegValue::String("alice@example.com".to_string())); 50 | 51 | let alice_age = alice_map 52 | .get(&"age".to_string()) 53 | .expect("Alice age should exist"); 54 | let age = alice_age 55 | .reg 56 | .value() 57 | .expect("Alice age should have a value"); 58 | println!(" Age: {age:?}"); 59 | assert_eq!(age, &MvRegValue::U64(30)); 60 | 61 | // Read Bob's data 62 | let bob_map = tx.get(&"bob".to_string()).expect("Bob should exist"); 63 | let CrdtValue::Map(bob_map) = bob_map else { 64 | panic!("Bob value should be a map"); 65 | }; 66 | println!("\nBob's data:"); 67 | 68 | let bob_email = bob_map 69 | .get(&"email".to_string()) 70 | .expect("Bob email should exist"); 71 | let email = bob_email 72 | .reg 73 | .value() 74 | .expect("Bob email should have a value"); 75 | println!(" Email: {email:?}"); 76 | assert_eq!(email, &MvRegValue::String("bob@example.com".to_string())); 77 | 78 | let bob_active = bob_map 79 | .get(&"active".to_string()) 80 | .expect("Bob active should exist"); 81 | let active = bob_active 82 | .reg 83 | .value() 84 | .expect("Bob active should have a value"); 85 | println!(" Active: {active:?}"); 86 | assert_eq!(active, &MvRegValue::Bool(true)); 87 | } 88 | 89 | // Deeply nested structure: map -> array -> map -> array 90 | // Structure: projects -> items -> properties 91 | { 92 | let mut tx = store.transact(id); 93 | 94 | tx.in_map("project", |project_tx| { 95 | project_tx.write_register("name", MvRegValue::String("Website Redesign".to_string())); 96 | 97 | // Array of task maps 98 | project_tx.in_array("tasks", |tasks_tx| { 99 | tasks_tx.insert_map(0, |task_tx| { 100 | task_tx 101 | .write_register("title", MvRegValue::String("Design mockups".to_string())); 102 | task_tx.write_register("done", MvRegValue::Bool(true)); 103 | }); 104 | 105 | tasks_tx.insert_map(1, |task_tx| { 106 | task_tx.write_register( 107 | "title", 108 | MvRegValue::String("Implement frontend".to_string()), 109 | ); 110 | task_tx.write_register("done", MvRegValue::Bool(false)); 111 | }); 112 | }); 113 | }); 114 | 115 | let _delta = tx.commit(); 116 | } 117 | 118 | // Read deeply nested data 119 | { 120 | let tx = store.transact(id); 121 | 122 | println!("\nDeeply nested structure:"); 123 | 124 | let project = tx 125 | .get(&"project".to_string()) 126 | .expect("project should exist"); 127 | let CrdtValue::Map(project_map) = project else { 128 | panic!("project should be a map"); 129 | }; 130 | 131 | let name = project_map 132 | .get(&"name".to_string()) 133 | .expect("name should exist"); 134 | println!(" Project: {:?}", name.reg.value().unwrap()); 135 | 136 | let tasks = project_map 137 | .get(&"tasks".to_string()) 138 | .expect("tasks should exist"); 139 | let tasks_len = tasks.array.len(); 140 | println!(" {tasks_len} tasks"); 141 | 142 | let task0 = tasks.array.get(0).expect("task 0 should exist"); 143 | let title0 = task0 144 | .map 145 | .get(&"title".to_string()) 146 | .expect("title should exist"); 147 | println!(" Task 0: {:?}", title0.reg.value().unwrap()); 148 | 149 | let task1 = tasks.array.get(1).expect("task 1 should exist"); 150 | let title1 = task1 151 | .map 152 | .get(&"title".to_string()) 153 | .expect("title should exist"); 154 | println!(" Task 1: {:?}", title1.reg.value().unwrap()); 155 | } 156 | 157 | println!("\nNested transactions make hierarchical data simple!"); 158 | } 159 | -------------------------------------------------------------------------------- /.github/workflows/check.yml: -------------------------------------------------------------------------------- 1 | # This workflow runs whenever a PR is opened or updated, or a commit is pushed to main. It runs 2 | # several checks: 3 | # - fmt: checks that the code is formatted according to rustfmt 4 | # - clippy: checks that the code does not contain any clippy warnings 5 | # - doc: checks that the code can be documented without errors 6 | # - hack: check combinations of feature flags 7 | # - msrv: check that the msrv specified in the crate is correct 8 | permissions: 9 | contents: read 10 | # This configuration allows maintainers of this repo to create a branch and pull request based on 11 | # the new branch. Restricting the push trigger to the main branch ensures that the PR only gets 12 | # built once. 13 | on: 14 | push: 15 | branches: [main] 16 | pull_request: 17 | # If new code is pushed to a PR branch, then cancel in progress workflows for that PR. Ensures that 18 | # we don't waste CI time, and returns results quicker https://github.com/jonhoo/rust-ci-conf/pull/5 19 | concurrency: 20 | group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} 21 | cancel-in-progress: true 22 | name: check 23 | jobs: 24 | fmt: 25 | runs-on: ubuntu-latest 26 | name: stable / fmt 27 | steps: 28 | - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # tag=v6.0.1 29 | with: 30 | submodules: true 31 | - name: Install stable 32 | uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # branch=master 33 | with: 34 | toolchain: stable 35 | components: rustfmt 36 | - name: cargo fmt --check 37 | run: cargo fmt --check 38 | clippy: 39 | runs-on: ubuntu-latest 40 | name: ${{ matrix.toolchain }} / clippy 41 | permissions: 42 | contents: read 43 | checks: write 44 | strategy: 45 | fail-fast: false 46 | matrix: 47 | # Get early warning of new lints which are regularly introduced in beta channels. 48 | toolchain: [stable, beta] 49 | steps: 50 | - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # tag=v6.0.1 51 | with: 52 | submodules: true 53 | - name: Install ${{ matrix.toolchain }} 54 | uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # branch=master 55 | with: 56 | toolchain: ${{ matrix.toolchain }} 57 | components: clippy 58 | - name: cargo clippy 59 | uses: giraffate/clippy-action@13b9d32482f25d29ead141b79e7e04e7900281e0 # tag=v1.0.1 60 | with: 61 | reporter: 'github-pr-check' 62 | github_token: ${{ secrets.GITHUB_TOKEN }} 63 | semver: 64 | runs-on: ubuntu-latest 65 | name: semver 66 | steps: 67 | - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # tag=v6.0.1 68 | with: 69 | submodules: true 70 | - name: Install stable 71 | uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # branch=master 72 | with: 73 | toolchain: stable 74 | components: rustfmt 75 | - name: cargo-semver-checks 76 | uses: obi1kenobi/cargo-semver-checks-action@5b298c9520f7096a4683c0bd981a7ac5a7e249ae # tag=v2 77 | doc: 78 | # run docs generation on nightly rather than stable. This enables features like 79 | # https://doc.rust-lang.org/beta/unstable-book/language-features/doc-cfg.html which allows an 80 | # API be documented as only available in some specific platforms. 81 | runs-on: ubuntu-latest 82 | name: nightly / doc 83 | steps: 84 | - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # tag=v6.0.1 85 | with: 86 | submodules: true 87 | - name: Install nightly 88 | uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # branch=master 89 | with: 90 | toolchain: nightly 91 | - name: Install cargo-docs-rs 92 | uses: dtolnay/install@982daea0f5d846abc3c83e01a6a1d73c040047c1 # branch=cargo-docs-rs 93 | - name: cargo docs-rs 94 | run: cargo docs-rs 95 | hack: 96 | # cargo-hack checks combinations of feature flags to ensure that features are all additive 97 | # which is required for feature unification 98 | runs-on: ubuntu-latest 99 | name: ubuntu / stable / features 100 | steps: 101 | - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # tag=v6.0.1 102 | with: 103 | submodules: true 104 | - name: Install stable 105 | uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # branch=master 106 | with: 107 | toolchain: stable 108 | - name: cargo install cargo-hack 109 | uses: taiki-e/install-action@cf46383a970594553a83cc6140075ef6a7f54dee # tag=v2.62.64 110 | with: 111 | tool: cargo-hack 112 | # intentionally no target specifier; see https://github.com/jonhoo/rust-ci-conf/pull/4 113 | # --feature-powerset runs for every combination of features 114 | - name: cargo hack 115 | run: cargo hack --feature-powerset check 116 | msrv: 117 | # check that we can build using the minimal rust version that is specified by this crate 118 | runs-on: ubuntu-latest 119 | # we use a matrix here just because env can't be used in job names 120 | # https://docs.github.com/en/actions/learn-github-actions/contexts#context-availability 121 | strategy: 122 | matrix: 123 | msrv: ["1.88.0"] # 2024 edition requires 1.85.0; and we use 124 | # if-and-whilet-let chains https://github.com/rust-lang/rust/issues/53667 125 | name: ubuntu / ${{ matrix.msrv }} 126 | steps: 127 | - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # tag=v6.0.1 128 | with: 129 | submodules: true 130 | - name: Install ${{ matrix.msrv }} 131 | uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # branch=master 132 | with: 133 | toolchain: ${{ matrix.msrv }} 134 | - name: cargo +${{ matrix.msrv }} check 135 | run: cargo check 136 | -------------------------------------------------------------------------------- /src/transaction/conflicted.rs: -------------------------------------------------------------------------------- 1 | use crate::crdts::TypeVariantValue; 2 | use crate::dotstores::DotStore; 3 | use crate::{ExtensionType, MvReg, OrArray, OrMap}; 4 | use std::{fmt, hash::Hash}; 5 | 6 | /// A value with concurrent type conflicts. 7 | /// 8 | /// When replicas concurrently write different types to the same key 9 | /// (e.g., one writes a map, another an array), DSON preserves both 10 | /// in a [`TypeVariantValue`]. This type exposes methods to inspect conflicts. 11 | /// 12 | /// # Example 13 | /// 14 | /// ```no_run 15 | /// # use dson::transaction::ConflictedValue; 16 | /// # use dson::crdts::NoExtensionTypes; 17 | /// # let conflicted: ConflictedValue = todo!(); 18 | /// if conflicted.has_map() && conflicted.has_array() { 19 | /// println!("Map and array were written concurrently!"); 20 | /// // Application must decide how to resolve this 21 | /// } 22 | /// ``` 23 | pub struct ConflictedValue<'tx, K, C> 24 | where 25 | K: Hash + Eq, 26 | C: ExtensionType, 27 | { 28 | inner: &'tx TypeVariantValue, 29 | // K appears in CrdtValue<'tx, K, C> but TypeVariantValue doesn't use it. 30 | // PhantomData maintains consistent type parameters across the API. 31 | _phantom: std::marker::PhantomData, 32 | } 33 | 34 | impl<'tx, K, C> ConflictedValue<'tx, K, C> 35 | where 36 | K: Hash + Eq + fmt::Debug + Clone, 37 | C: ExtensionType, 38 | { 39 | pub(crate) fn new(value: &'tx TypeVariantValue) -> Self { 40 | Self { 41 | inner: value, 42 | _phantom: std::marker::PhantomData, 43 | } 44 | } 45 | 46 | /// Returns true if a map value is present in the conflict. 47 | pub fn has_map(&self) -> bool { 48 | !self.inner.map.is_bottom() 49 | } 50 | 51 | /// Returns true if an array value is present in the conflict. 52 | pub fn has_array(&self) -> bool { 53 | !self.inner.array.is_bottom() 54 | } 55 | 56 | /// Returns true if a register value is present in the conflict. 57 | pub fn has_register(&self) -> bool { 58 | !self.inner.reg.is_bottom() 59 | } 60 | 61 | /// Returns a reference to the map value, if present. 62 | pub fn map(&self) -> Option<&OrMap> { 63 | if self.has_map() { 64 | Some(&self.inner.map) 65 | } else { 66 | None 67 | } 68 | } 69 | 70 | /// Returns a reference to the array value, if present. 71 | pub fn array(&self) -> Option<&OrArray> { 72 | if self.has_array() { 73 | Some(&self.inner.array) 74 | } else { 75 | None 76 | } 77 | } 78 | 79 | /// Returns a reference to the register value, if present. 80 | pub fn register(&self) -> Option<&MvReg> { 81 | if self.has_register() { 82 | Some(&self.inner.reg) 83 | } else { 84 | None 85 | } 86 | } 87 | 88 | /// Returns the number of different types present in this conflict. 89 | /// 90 | /// A value of 0 means the key exists but is empty (all types are bottom). 91 | /// A value of 1 means there's no actual type conflict. 92 | /// A value > 1 indicates a genuine type conflict. 93 | pub fn conflict_count(&self) -> usize { 94 | let mut count = 0; 95 | if self.has_map() { 96 | count += 1; 97 | } 98 | if self.has_array() { 99 | count += 1; 100 | } 101 | if self.has_register() { 102 | count += 1; 103 | } 104 | count 105 | } 106 | } 107 | 108 | impl<'tx, K, C> fmt::Debug for ConflictedValue<'tx, K, C> 109 | where 110 | K: Hash + Eq + fmt::Debug + Clone, 111 | C: ExtensionType + fmt::Debug, 112 | { 113 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 114 | f.debug_struct("ConflictedValue") 115 | .field("has_map", &self.has_map()) 116 | .field("has_array", &self.has_array()) 117 | .field("has_register", &self.has_register()) 118 | .finish() 119 | } 120 | } 121 | 122 | #[cfg(test)] 123 | mod tests { 124 | use super::*; 125 | use crate::crdts::NoExtensionTypes; 126 | use crate::{CausalDotStore, Identifier, OrMap}; 127 | 128 | #[test] 129 | fn conflicted_value_empty() { 130 | use crate::crdts::TypeVariantValue; 131 | let value = TypeVariantValue::::default(); 132 | let conflicted = ConflictedValue::::new(&value); 133 | assert_eq!(conflicted.conflict_count(), 0); 134 | } 135 | 136 | #[test] 137 | fn conflicted_value_single_type() { 138 | // Create a real map with a value using CRDT operations 139 | let store = CausalDotStore::>::default(); 140 | let id = Identifier::new(0, 0); 141 | 142 | let delta = store.store.apply_to_register( 143 | |reg, ctx, id| reg.write("test".to_string().into(), ctx, id), 144 | "key".to_string(), 145 | &store.context, 146 | id, 147 | ); 148 | 149 | // The delta contains a non-bottom register 150 | let value = delta.store.get(&"key".to_string()).unwrap(); 151 | 152 | let conflicted = ConflictedValue::::new(value); 153 | assert!(!conflicted.has_map()); 154 | assert!(!conflicted.has_array()); 155 | assert!(conflicted.has_register()); 156 | assert_eq!(conflicted.conflict_count(), 1); 157 | assert!(conflicted.register().is_some()); 158 | } 159 | 160 | #[test] 161 | fn conflicted_value_conflict_detection() { 162 | // Test that we can detect when there are multiple types 163 | // This is more of a structural test - we verify the logic works 164 | use crate::crdts::TypeVariantValue; 165 | 166 | let value = TypeVariantValue::::default(); 167 | let conflicted = ConflictedValue::::new(&value); 168 | assert_eq!(conflicted.conflict_count(), 0); 169 | assert!(!conflicted.has_map()); 170 | assert!(!conflicted.has_array()); 171 | assert!(!conflicted.has_register()); 172 | } 173 | } 174 | -------------------------------------------------------------------------------- /.github/workflows/test.yml: -------------------------------------------------------------------------------- 1 | # This is the main CI workflow that runs the test suite on all pushes to main and all pull requests. 2 | # It runs the following jobs: 3 | # - required: runs the test suite on ubuntu with stable and beta rust toolchains 4 | # - minimal: runs the test suite with the minimal versions of the dependencies that satisfy the 5 | # requirements of this crate, and its dependencies 6 | # - os-check: runs the test suite on mac and windows 7 | # See check.yml for information about how the concurrency cancellation and workflow triggering works 8 | permissions: 9 | contents: read 10 | on: 11 | push: 12 | branches: [main] 13 | pull_request: 14 | concurrency: 15 | group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} 16 | cancel-in-progress: true 17 | name: test 18 | jobs: 19 | required: 20 | runs-on: ubuntu-latest 21 | name: ubuntu / ${{ matrix.toolchain }} 22 | strategy: 23 | matrix: 24 | # run on stable and beta to ensure that tests won't break on the next version of the rust 25 | # toolchain 26 | toolchain: [stable, beta] 27 | steps: 28 | - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # tag=v6.0.1 29 | with: 30 | submodules: true 31 | - name: Install ${{ matrix.toolchain }} 32 | uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # branch=master 33 | with: 34 | toolchain: ${{ matrix.toolchain }} 35 | - name: cargo generate-lockfile 36 | # enable this ci template to run regardless of whether the lockfile is checked in or not 37 | if: hashFiles('Cargo.lock') == '' 38 | run: cargo generate-lockfile 39 | - name: Install Valgrind 40 | run: sudo apt update && sudo apt install valgrind 41 | - uses: cargo-bins/cargo-binstall@main 42 | - name: Install iai-callgrind-runner 43 | run: ./.github/scripts/install_iai_callgrind_runner.sh 44 | # https://twitter.com/jonhoo/status/1571290371124260865 45 | - name: cargo test --locked 46 | run: cargo test --locked --all-features --all-targets 47 | # https://github.com/rust-lang/cargo/issues/6669 48 | - name: cargo test --doc 49 | run: cargo test --locked --all-features --doc 50 | minimal: 51 | # This action chooses the oldest version of the dependencies permitted by Cargo.toml to ensure 52 | # that this crate is compatible with the minimal version that this crate and its dependencies 53 | # require. This will pickup issues where this create relies on functionality that was introduced 54 | # later than the actual version specified (e.g., when we choose just a major version, but a 55 | # method was added after this version). 56 | # 57 | # This particular check can be difficult to get to succeed as often transitive dependencies may 58 | # be incorrectly specified (e.g., a dependency specifies 1.0 but really requires 1.1.5). There 59 | # is an alternative flag available -Zdirect-minimal-versions that uses the minimal versions for 60 | # direct dependencies of this crate, while selecting the maximal versions for the transitive 61 | # dependencies. Alternatively, you can add a line in your Cargo.toml to artificially increase 62 | # the minimal dependency, which you do with e.g.: 63 | # ```toml 64 | # # for minimal-versions 65 | # [target.'cfg(any())'.dependencies] 66 | # openssl = { version = "0.10.55", optional = true } # needed to allow foo to build with -Zminimal-versions 67 | # ``` 68 | # The optional = true is necessary in case that dependency isn't otherwise transitively required 69 | # by your library, and the target bit is so that this dependency edge never actually affects 70 | # Cargo build order. See also 71 | # https://github.com/jonhoo/fantoccini/blob/fde336472b712bc7ebf5b4e772023a7ba71b2262/Cargo.toml#L47-L49. 72 | # This action is run on ubuntu with the stable toolchain, as it is not expected to fail 73 | runs-on: ubuntu-latest 74 | name: ubuntu / stable / minimal-versions 75 | steps: 76 | - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # tag=v6.0.1 77 | with: 78 | submodules: true 79 | - name: Install stable 80 | uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # branch=master 81 | with: 82 | toolchain: stable 83 | - name: Install nightly for -Zminimal-versions 84 | uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # branch=master 85 | with: 86 | toolchain: nightly 87 | - name: rustup default stable 88 | run: rustup default stable 89 | - name: cargo update -Zminimal-versions 90 | run: cargo +nightly update -Zminimal-versions 91 | - name: Install Valgrind 92 | run: sudo apt update && sudo apt install valgrind 93 | - uses: cargo-bins/cargo-binstall@main 94 | - name: Install iai-callgrind-runner 95 | run: ./.github/scripts/install_iai_callgrind_runner.sh 96 | - name: cargo test 97 | run: cargo test --locked --all-features --all-targets 98 | os-check: 99 | # run cargo test on mac and windows 100 | runs-on: ${{ matrix.os }} 101 | name: ${{ matrix.os }} / stable 102 | strategy: 103 | fail-fast: false 104 | matrix: 105 | os: [macos-latest, windows-latest] 106 | steps: 107 | # if your project needs OpenSSL, uncomment this to fix Windows builds. 108 | # it's commented out by default as the install command takes 5-10m. 109 | # - run: echo "VCPKG_ROOT=$env:VCPKG_INSTALLATION_ROOT" | Out-File -FilePath $env:GITHUB_ENV -Append 110 | # if: runner.os == 'Windows' 111 | # - run: vcpkg install openssl:x64-windows-static-md 112 | # if: runner.os == 'Windows' 113 | - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # tag=v6.0.1 114 | with: 115 | submodules: true 116 | - name: Install stable 117 | uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # branch=master 118 | with: 119 | toolchain: stable 120 | - name: cargo generate-lockfile 121 | if: hashFiles('Cargo.lock') == '' 122 | run: cargo generate-lockfile 123 | - name: cargo test 124 | run: cargo test --locked --all-features --all-targets 125 | -------------------------------------------------------------------------------- /src/macros.rs: -------------------------------------------------------------------------------- 1 | // (c) Copyright 2025 Helsing GmbH. All rights reserved. 2 | /// Convenience macro for creating dot values. 3 | /// 4 | /// NOTE! This is mostly useful for tests, since it does not provide control 5 | /// over the app or priority fields of a dot. 6 | #[macro_export] 7 | macro_rules! dot { 8 | ($seq:expr) => { 9 | const { 10 | $crate::causal_context::Dot::mint( 11 | $crate::causal_context::Identifier::new(1, 1), 12 | $seq, 13 | ) 14 | } 15 | }; 16 | ($node:expr, $seq:expr) => { 17 | const { 18 | $crate::causal_context::Dot::mint( 19 | $crate::causal_context::Identifier::new($node, 1), 20 | $seq, 21 | ) 22 | } 23 | }; 24 | ($node:expr, $app:expr, $seq:expr) => { 25 | const { 26 | $crate::causal_context::Dot::mint( 27 | $crate::causal_context::Identifier::new($node, $app), 28 | $seq, 29 | ) 30 | } 31 | }; 32 | } 33 | 34 | /// Convenience macro for creating a OrMap instance. 35 | /// 36 | /// Use the [`crdt_map_store`](crate::crdt_map_store) literal to also create a matching 37 | /// CausalContext. 38 | /// 39 | /// ```rust 40 | /// # use dson::{crdt_map_literal, dot}; 41 | /// let map = crdt_map_literal! { 42 | /// "field_x" => ("Hello", dot!(1,2)), 43 | /// "field_y" => ("World", dot!(1,3)), 44 | /// "field_z" => { 45 | /// "field_x" => ("Nested", dot!(1,4)), 46 | /// "field_y" => ("Nested", dot!(1,5)) 47 | /// } 48 | /// }; 49 | /// ``` 50 | /// 51 | #[macro_export] 52 | macro_rules! crdt_map_literal { 53 | ($($k:literal => $v:tt),*) => { 54 | $crate::crdt_literal!( { $( $k => $v ),* } ).map 55 | }; 56 | } 57 | 58 | /// Convenience macro for creating a TypeVariantValue, of either map, array or register type. 59 | /// 60 | /// 61 | /// Register literal: 62 | /// ```rust 63 | /// # use dson::{crdt_literal, dot}; 64 | /// let reg = crdt_literal!( ("hello", dot!(1))); 65 | /// ``` 66 | /// 67 | /// Conflicted register literal: 68 | /// ```rust 69 | /// # use dson::{crdt_literal, dot}; 70 | /// let reg = crdt_literal!( ("Hello", dot!(1); "Bonjour", dot!(2); )); 71 | /// ``` 72 | /// 73 | /// Map literal (note the '{' and '}'): 74 | /// ```rust 75 | /// # use dson::{crdt_literal, dot}; 76 | /// let reg = crdt_literal!( { 77 | /// "Greeting" => ("Hello", dot!(1)) 78 | /// } ); 79 | /// ``` 80 | /// 81 | /// Array literal (note the '[' and ']'): 82 | /// ```rust 83 | /// # use dson::{crdt_literal, dot}; 84 | /// let reg = crdt_literal!( [ 85 | /// (("Banana", dot!(3)), dot!(4), dot!(5), dot!(6), 42.0), 86 | /// (("Cantaloupe", dot!(7)), dot!(8), dot!(9), dot!(10), 43.0) 87 | /// ] ); 88 | /// ``` 89 | /// The first tuple is the actual value in the array, with its dot. 90 | /// The remaining 4 parameters are: Uid, 2 array position dots (for dotfunmap + 91 | /// dotfun), and the f64 value that decides the sorting order of the array. 92 | /// 93 | /// See section 5, about the OrArray algorithm, in the DSON paper for more information. 94 | /// 95 | /// Note that this macro does not generate a CausalContext. 96 | #[macro_export] 97 | macro_rules! crdt_literal { 98 | // Map 99 | ({$($k:literal => $v:tt),*}) => { 100 | { 101 | let mut map = $crate::OrMap::::default(); 102 | $( { $crate::crdt_literal!(map_insert, map, $k, $v); } )* 103 | $crate::crdts::TypeVariantValue { 104 | map, 105 | ..$crate::crdts::TypeVariantValue::<$crate::crdts::NoExtensionTypes>::default() 106 | } 107 | } 108 | }; 109 | 110 | // Array 111 | ([$($v:tt),*]) => { 112 | { 113 | let mut array = $crate::OrArray::<$crate::crdts::NoExtensionTypes>::default(); 114 | $( $crate::crdt_literal!(array_element, array, $v); )* 115 | $crate::crdts::TypeVariantValue { 116 | array, 117 | ..$crate::crdts::TypeVariantValue::<$crate::crdts::NoExtensionTypes>::default() 118 | } 119 | } 120 | }; 121 | 122 | // Mvreg 123 | ( ($($v:expr, $dot:expr $(;)? )* ) ) => { 124 | { 125 | let mut reg = $crate::crdts::mvreg::MvReg::default(); 126 | $( reg.push($dot, $v); )* 127 | $crate::crdts::TypeVariantValue { 128 | reg, 129 | ..$crate::crdts::TypeVariantValue::<$crate::crdts::NoExtensionTypes>::default() 130 | } 131 | } 132 | }; 133 | 134 | // Helper for creating map elements 135 | (map_insert, $temp:ident, $k:literal , $v: tt) => { 136 | $temp.insert($k.into(), $crate::crdt_literal!($v)); 137 | }; 138 | 139 | // Helper for creating array elements 140 | (array_element, $temp:ident, ($v:tt, $uid: expr, $dot1:expr, $dot2:expr, $pos_f64:expr)) => { 141 | let val = $crate::crdt_literal!($v); 142 | $temp.insert_raw($crate::crdts::orarray::Uid::from($uid), std::iter::once(($dot1,$dot2,$pos_f64)), val); 143 | }; 144 | 145 | } 146 | 147 | #[macro_export] 148 | macro_rules! crdt_map_store { 149 | ($($k:literal => $v:tt),*) => { 150 | { 151 | use $crate::{DotStore, CausalDotStore}; 152 | let ormap = $crate::crdt_map_literal!($($k => $v),*); 153 | let dots = ormap.dots(); 154 | CausalDotStore { 155 | store: ormap, 156 | context: dots 157 | } 158 | } 159 | } 160 | } 161 | 162 | #[cfg(test)] 163 | mod tests { 164 | use crate::enable_determinism; 165 | use insta::assert_debug_snapshot; 166 | 167 | #[test] 168 | fn crdt_map_literal_macro() { 169 | enable_determinism(); 170 | let map = crdt_map_literal! { 171 | "field_x" => ("Hello", dot!(1,2)), 172 | "field_y" => ("World", dot!(1,3)), 173 | "field_z" => { 174 | "field_x" => ("Nested", dot!(1,4)), 175 | "field_y" => ("Nested", dot!(1,5)) 176 | } 177 | }; 178 | assert_debug_snapshot!(map); 179 | } 180 | #[test] 181 | fn crdt_map_store_macro() { 182 | enable_determinism(); 183 | let map = crdt_map_store! { 184 | "field_x" => ("Hello", dot!(1,2)), 185 | "field_y" => ("World", dot!(1,3)), 186 | "field_z" => { 187 | "field_x" => ("Nested", dot!(1,4)), 188 | "field_y" => ("Nested", dot!(1,5)) 189 | } 190 | }; 191 | assert_debug_snapshot!(map); 192 | } 193 | #[test] 194 | fn crdt_map_literal_macro_array() { 195 | enable_determinism(); 196 | let map = crdt_map_literal! { 197 | "field_x" => ("Hello", dot!(1)), 198 | "field_y" => ("World", dot!(2)), 199 | "field_z" => [ 200 | (("Banana", dot!(3)), dot!(4), dot!(5), dot!(6), 42.0), 201 | (("Cantaloupe", dot!(7)), dot!(8), dot!(9), dot!(10), 43.0) 202 | ] 203 | }; 204 | assert_debug_snapshot!(map); 205 | } 206 | } 207 | -------------------------------------------------------------------------------- /src/transaction/mod.rs: -------------------------------------------------------------------------------- 1 | //! Transaction-based API for ergonomic CRDT mutations. 2 | //! 3 | //! This module provides a transaction-based API for making changes to DSON stores. 4 | //! Unlike the callback-based `api` module, transactions provide: 5 | //! 6 | //! - **Method chaining** - No nested callbacks 7 | //! - **Explicit conflict handling** - Enums force handling of type conflicts 8 | //! - **Automatic rollback** - Changes drop unless you call `commit()` 9 | //! - **Automatic delta management** - Deltas accumulate and return on commit 10 | //! 11 | //! # Example 12 | //! 13 | //! ``` 14 | //! use dson::{CausalDotStore, Identifier, OrMap, crdts::mvreg::MvRegValue, transaction::CrdtValue}; 15 | //! use dson::crdts::snapshot::ToValue; 16 | //! 17 | //! let mut store = CausalDotStore::>::default(); 18 | //! let id = Identifier::new(0, 0); 19 | //! 20 | //! // Create a transaction 21 | //! let mut tx = store.transact(id); 22 | //! 23 | //! // Write values 24 | //! tx.write_register("name", MvRegValue::String("Alice".to_string())); 25 | //! tx.write_register("age", MvRegValue::U64(30)); 26 | //! 27 | //! // IMPORTANT: You must call commit() or changes are lost 28 | //! let delta = tx.commit(); 29 | //! 30 | //! // Read with explicit type handling 31 | //! let tx = store.transact(id); 32 | //! match tx.get(&"name".to_string()) { 33 | //! Some(CrdtValue::Register(reg)) => { 34 | //! println!("Name: {:?}", reg.value().unwrap()); 35 | //! } 36 | //! Some(CrdtValue::Conflicted(conflicts)) => { 37 | //! println!("Type conflict!"); 38 | //! } 39 | //! None => { 40 | //! println!("Key not found"); 41 | //! } 42 | //! _ => {} 43 | //! } 44 | //! ``` 45 | //! 46 | //! # Transaction Semantics 47 | //! 48 | //! Both [`MapTransaction`] and [`ArrayTransaction`] clone the store and work on the copy. 49 | //! Changes apply immediately to the clone, enabling reads within the transaction to see 50 | //! uncommitted changes. Call `commit()` to apply changes permanently. Drop the transaction 51 | //! without committing to discard all changes (automatic rollback). 52 | //! 53 | //! ## How Transactions Work 54 | //! 55 | //! - **On creation**: The store is cloned 56 | //! - **During operations**: Changes apply to the cloned store 57 | //! - **On commit**: The clone swaps back into the original store 58 | //! - **On drop**: Changes discard automatically if not committed 59 | //! 60 | //! ## Why This Design 61 | //! 62 | //! This provides: 63 | //! - **Automatic rollback**: Drop the transaction to undo changes 64 | //! - **Isolation**: Reads see uncommitted changes within the same transaction 65 | //! - **Simplicity**: What you write is what you read 66 | //! 67 | //! ## Performance Tradeoff 68 | //! 69 | //! The transaction API trades performance for ergonomics. Top-level transactions clone the store 70 | //! on creation and apply each operation eagerly to the clone. This enables rollback support 71 | //! and ensures reads within the transaction see uncommitted changes. 72 | //! 73 | //! Benchmarks on an empty map show **2-2.5x overhead** compared to the raw API: 74 | //! 75 | //! | Operation | Raw API | Transaction | Overhead | 76 | //! |-----------|---------|-------------|----------| 77 | //! | Insert | 156 ns | 347 ns | 2.2x | 78 | //! | Update | 159 ns | 344 ns | 2.2x | 79 | //! | Remove | 50 ns | 69 ns | 1.4x | 80 | //! 81 | //! The overhead stems from the clone-and-swap implementation. Top-level transactions clone the 82 | //! store on creation and apply each operation eagerly to the clone. This ensures reads within 83 | //! the transaction see uncommitted changes and enables automatic rollback on drop. 84 | //! 85 | //! ### Nested Transaction Optimization 86 | //! 87 | //! Nested transactions (`in_map`, `in_array`, `insert_map`, `insert_array`) use `mem::take` 88 | //! instead of cloning the parent store. This moves nested structures without copying: 89 | //! 90 | //! - **Shallow nesting (1-2 levels)**: Minimal impact 91 | //! - **Deep nesting (3+ levels)**: Savings from avoided parent store clones 92 | //! - **Large nested collections**: Saves proportional to parent store size 93 | //! 94 | //! The ~200-300ns overhead per operation is acceptable for most applications. For 95 | //! latency-critical single-field updates, use [`api`](crate::api). For complex mutations 96 | //! where clarity and safety outweigh microseconds, use transactions 97 | //! 98 | //! # Type Conflict Handling 99 | //! 100 | //! DSON's unique feature is preserving type conflicts. When different replicas 101 | //! concurrently write different types to the same key, DSON preserves both. 102 | //! The transaction API exposes this through the [`CrdtValue`] enum: 103 | //! 104 | //! ```no_run 105 | //! # use dson::transaction::{MapTransaction, CrdtValue}; 106 | //! # let tx: MapTransaction = todo!(); 107 | //! match tx.get(&"field".to_string()) { 108 | //! Some(CrdtValue::Map(map)) => { /* single type: map */ } 109 | //! Some(CrdtValue::Array(array)) => { /* single type: array */ } 110 | //! Some(CrdtValue::Register(reg)) => { /* single type: register */ } 111 | //! Some(CrdtValue::Conflicted(c)) => { 112 | //! // Type conflict! 113 | //! if c.has_map() && c.has_array() { 114 | //! // Application must resolve 115 | //! } 116 | //! } 117 | //! None => { /* key doesn't exist */ } 118 | //! Some(CrdtValue::Empty) => { /* key exists but is empty */ } 119 | //! } 120 | //! ``` 121 | //! 122 | //! # Nested Operations 123 | //! 124 | //! The transaction API provides uniform ergonomics at all nesting levels: 125 | //! 126 | //! ``` 127 | //! # use dson::{CausalDotStore, Identifier, OrMap}; 128 | //! # use dson::crdts::mvreg::MvRegValue; 129 | //! # let mut store = CausalDotStore::>::default(); 130 | //! # let id = Identifier::new(0, 0); 131 | //! let mut tx = store.transact(id); 132 | //! 133 | //! tx.in_map("user", |user_tx| { 134 | //! user_tx.write_register("email", MvRegValue::String("alice@example.com".to_string())); 135 | //! user_tx.write_register("age", MvRegValue::U64(30)); 136 | //! 137 | //! user_tx.in_array("tags", |tags_tx| { 138 | //! tags_tx.insert_register(0, MvRegValue::String("admin".to_string())); 139 | //! // Nested transaction commits automatically when closure returns 140 | //! }); 141 | //! // Nested transaction commits automatically when closure returns 142 | //! }); 143 | //! 144 | //! // Top-level transaction requires explicit commit 145 | //! let delta = tx.commit(); 146 | //! ``` 147 | //! 148 | //! **Important**: Nested transactions (`in_map`, `in_array`, `insert_map`, `insert_array`) 149 | //! commit automatically when their closure returns. Only the top-level transaction requires 150 | //! an explicit `commit()` call. 151 | //! 152 | //! Use [`MapTransaction::in_map`] and [`MapTransaction::in_array`] for nesting. 153 | //! Use [`ArrayTransaction::insert_map`] and [`ArrayTransaction::insert_array`] 154 | //! for arrays containing collections. 155 | 156 | mod array_transaction; 157 | mod conflicted; 158 | mod crdt_value; 159 | mod delta; 160 | mod map_transaction; 161 | 162 | pub use array_transaction::ArrayTransaction; 163 | pub use conflicted::ConflictedValue; 164 | pub use crdt_value::CrdtValue; 165 | pub use delta::Delta; 166 | pub use map_transaction::MapTransaction; 167 | -------------------------------------------------------------------------------- /src/api/timestamp.rs: -------------------------------------------------------------------------------- 1 | // (c) Copyright 2025 Helsing GmbH. All rights reserved. 2 | //! This module provides a `Timestamp` type for efficient encoding of UTC datetimes. 3 | //! 4 | //! The `Timestamp` is represented as a 64-bit integer of milliseconds since the 5 | //! UNIX epoch, but is constrained to a range of years from 0 to 9999. 6 | //! This allows for compact and performant representation of datetimes. 7 | use std::fmt; 8 | 9 | #[cfg(feature = "chrono")] 10 | use crate::datetime; 11 | #[cfg(feature = "chrono")] 12 | use chrono::{DateTime, Datelike, Utc}; 13 | #[cfg(feature = "chrono")] 14 | use std::str::FromStr; 15 | 16 | /// Error returned when creating or parsing a `Timestamp`. 17 | #[derive(Debug, Clone, PartialEq, Eq)] 18 | pub enum TimestampError { 19 | /// The year is outside the supported range of `0` to `9999`. 20 | InvalidYear(i32), 21 | /// The string could not be parsed as a valid RFC 3339 datetime. 22 | Parse(String), 23 | } 24 | 25 | impl fmt::Display for TimestampError { 26 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 27 | match self { 28 | TimestampError::InvalidYear(year) => write!( 29 | f, 30 | "invalid year {year}, supported years are between 0 and 9999 included" 31 | ), 32 | TimestampError::Parse(s) => { 33 | write!(f, "failed to parse date {s} in rfc3339 format") 34 | } 35 | } 36 | } 37 | } 38 | 39 | impl std::error::Error for TimestampError {} 40 | 41 | /// Represents a UTC datetime with millisecond precision. 42 | /// 43 | /// `Timestamp` is stored as an `i64` representing the number of milliseconds since the 44 | /// UNIX epoch. 45 | /// 46 | /// The valid range for a `Timestamp` is from `0000-01-01T00:00:00.000Z` to 47 | /// `9999-12-31T23:59:59.999Z`. 48 | #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord)] 49 | #[cfg_attr(feature = "serde", derive(::serde::Deserialize, ::serde::Serialize))] 50 | pub struct Timestamp(i64); 51 | 52 | impl Timestamp { 53 | /// Creates a new `Timestamp` from a `chrono::DateTime`. 54 | /// 55 | /// The datetime is truncated to millisecond precision. 56 | /// 57 | /// # Errors 58 | /// 59 | /// Returns an error if the year is outside the supported range of `0` to `9999`. 60 | #[cfg(feature = "chrono")] 61 | pub fn new(datetime: DateTime) -> Result { 62 | let year = datetime.year(); 63 | // NOTE: This is arguably more clear. 64 | #[expect(clippy::manual_range_contains)] 65 | if year < 0 || year > 9999 { 66 | return Err(TimestampError::InvalidYear(year)); 67 | } 68 | let truncated_timestamp = datetime.timestamp_millis(); 69 | Ok(Timestamp(truncated_timestamp)) 70 | } 71 | 72 | #[cfg(not(feature = "chrono"))] 73 | /// Creates a new `Timestamp` from an i64. This operation always succeeds. 74 | pub fn new(val: i64) -> Result { 75 | Ok(Self(val)) 76 | } 77 | 78 | /// Creates a `Timestamp` from a number of milliseconds since the UNIX epoch. 79 | /// 80 | /// Returns `None` if the number of milliseconds corresponds to a datetime outside 81 | /// the supported range. 82 | #[cfg(feature = "chrono")] 83 | pub fn from_millis(milliseconds: i64) -> Option { 84 | (Self::MIN.as_millis()..=Self::MAX.as_millis()) 85 | .contains(&milliseconds) 86 | .then_some(Self(milliseconds)) 87 | } 88 | 89 | #[cfg(not(feature = "chrono"))] 90 | /// Creates a `Timestamp` from a number of milliseconds since the UNIX epoch. 91 | /// This operation always succeeds. 92 | pub fn from_millis(milliseconds: i64) -> Option { 93 | Some(Self(milliseconds)) 94 | } 95 | 96 | /// Returns the number of milliseconds since the UNIX epoch as an `i64`. 97 | pub fn as_millis(&self) -> i64 { 98 | self.0 99 | } 100 | 101 | /// Converts the `Timestamp` to a `chrono::DateTime`. 102 | #[cfg(feature = "chrono")] 103 | pub(crate) fn as_datetime(&self) -> DateTime { 104 | DateTime::from_timestamp_millis(self.0) 105 | .expect("roundtrips with `DateTime::timestamp_millis`") 106 | } 107 | 108 | /// The minimum supported `Timestamp`: `0000-01-01T00:00:00.000Z`. 109 | #[cfg(feature = "chrono")] 110 | pub const MIN: Timestamp = Timestamp(datetime!(0000-01-01 00:00:00 Z).timestamp_millis()); 111 | /// The maximum supported `Timestamp`: `9999-12-31T23:59:59.999Z`. 112 | #[cfg(feature = "chrono")] 113 | pub const MAX: Timestamp = Timestamp(datetime!(10000-01-01 00:00:00 Z).timestamp_millis() - 1); 114 | } 115 | 116 | #[cfg(feature = "chrono")] 117 | impl fmt::Display for Timestamp { 118 | // Formats the `Timestamp` as an RFC 3339 string. 119 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 120 | self.as_datetime().fmt(f) 121 | } 122 | } 123 | 124 | #[cfg(not(feature = "chrono"))] 125 | impl fmt::Display for Timestamp { 126 | // Formats the `Timestamp` as an RFC 3339 string. 127 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 128 | write!(f, "{}", self.0) 129 | } 130 | } 131 | 132 | impl fmt::Debug for Timestamp { 133 | // Formats the `Timestamp` as an RFC 3339 string for debugging. 134 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 135 | write!(f, "{self}") 136 | } 137 | } 138 | 139 | #[cfg(all(feature = "json", feature = "chrono"))] 140 | impl From for serde_json::Value { 141 | // The string is formatted according to RFC 3339 with millisecond precision. 142 | fn from(value: Timestamp) -> Self { 143 | serde_json::Value::String( 144 | value 145 | .as_datetime() 146 | .to_rfc3339_opts(chrono::SecondsFormat::Millis, true) 147 | .to_string(), 148 | ) 149 | } 150 | } 151 | 152 | #[cfg(feature = "chrono")] 153 | impl FromStr for Timestamp { 154 | type Err = TimestampError; 155 | 156 | fn from_str(s: &str) -> Result { 157 | let datetime = 158 | DateTime::parse_from_rfc3339(s).map_err(|_| TimestampError::Parse(s.to_string()))?; 159 | Timestamp::new(datetime.to_utc()) 160 | } 161 | } 162 | 163 | #[cfg(all(test, feature = "chrono"))] 164 | mod tests { 165 | use super::*; 166 | use chrono::{DateTime, Utc}; 167 | 168 | #[test] 169 | fn new_timestamp_truncates_at_millisecond_precision() { 170 | assert_eq!( 171 | "1996-12-19T16:39:57.123555Z".parse::().unwrap(), 172 | "1996-12-19T16:39:57.123Z".parse::().unwrap() 173 | ) 174 | } 175 | 176 | #[test] 177 | fn constants_are_correctly_computed() { 178 | assert_eq!( 179 | "0000-01-01T00:00:00Z".parse::().unwrap(), 180 | Timestamp::MIN 181 | ); 182 | 183 | assert_eq!( 184 | "9999-12-31T23:59:59.999Z".parse::().unwrap(), 185 | Timestamp::MAX 186 | ); 187 | } 188 | 189 | #[test] 190 | fn timestamp_constructors() { 191 | let unparsable_timestamp: Result = "0000-01-01T00:00:00ZTR".parse(); 192 | assert!(unparsable_timestamp.is_err()); 193 | 194 | let out_of_range_year = DateTime::::UNIX_EPOCH.with_year(10_000).unwrap(); 195 | assert!(Timestamp::new(out_of_range_year).is_err()); 196 | 197 | let parseable_timestamp: Result = "0000-01-01T00:00:00Z".parse(); 198 | assert!(parseable_timestamp.is_ok()) 199 | } 200 | 201 | #[test] 202 | fn parse_accepts_any_timezone() { 203 | assert_eq!( 204 | "0000-01-01T00:00:00Z".parse::().unwrap(), 205 | "0000-01-01T01:00:00+01:00".parse::().unwrap() 206 | ); 207 | } 208 | } 209 | -------------------------------------------------------------------------------- /src/transaction/crdt_value.rs: -------------------------------------------------------------------------------- 1 | use super::ConflictedValue; 2 | use crate::crdts::TypeVariantValue; 3 | use crate::{ExtensionType, MvReg, OrArray, OrMap}; 4 | use std::{fmt, hash::Hash}; 5 | 6 | /// Result of reading a value from a transaction. 7 | /// 8 | /// DSON preserves type conflicts, so reads must handle multiple possibilities. 9 | /// This enum forces explicit handling of: 10 | /// - Map 11 | /// - Array 12 | /// - Register 13 | /// - Concurrent type conflicts 14 | /// - Missing key 15 | /// 16 | /// # Example 17 | /// 18 | /// ```no_run 19 | /// # use dson::transaction::{MapTransaction, CrdtValue}; 20 | /// # let tx: MapTransaction = todo!(); 21 | /// match tx.get(&"user".to_string()) { 22 | /// Some(CrdtValue::Map(map)) => { /* work with map */ } 23 | /// Some(CrdtValue::Conflicted(conflicts)) => { /* resolve conflict */ } 24 | /// None => { /* key doesn't exist */ } 25 | /// _ => { /* other types */ } 26 | /// } 27 | /// ``` 28 | #[derive(Debug)] 29 | pub enum CrdtValue<'tx, K, C = crate::crdts::NoExtensionTypes> 30 | where 31 | K: Hash + Eq + fmt::Debug + Clone, 32 | C: ExtensionType, 33 | { 34 | /// The value is a map (no type conflict). 35 | Map(&'tx OrMap), 36 | 37 | /// The value is an array (no type conflict). 38 | Array(&'tx OrArray), 39 | 40 | /// The value is a register (no type conflict). 41 | Register(&'tx MvReg), 42 | 43 | /// The value has concurrent type conflicts. 44 | Conflicted(ConflictedValue<'tx, K, C>), 45 | 46 | /// The key exists but all types are empty (bottom). 47 | Empty, 48 | } 49 | 50 | impl<'tx, K, C> CrdtValue<'tx, K, C> 51 | where 52 | K: Hash + Eq + fmt::Debug + Clone, 53 | C: ExtensionType, 54 | { 55 | /// Creates a CrdtValue by classifying a TypeVariantValue. 56 | /// 57 | /// Inspects which CRDT types are non-empty (non-bottom) and returns 58 | /// the appropriate variant: 59 | /// - If multiple types are present: `Conflicted` 60 | /// - If only one type is present: the specific variant (Map/Array/Register) 61 | /// - If all types are empty: `Empty` 62 | pub fn from_type_variant(value: &'tx TypeVariantValue) -> Self { 63 | use crate::dotstores::DotStore; 64 | 65 | // Check if there's a type conflict (multiple types are non-bottom) 66 | let has_multiple_types = { 67 | let mut count = 0; 68 | if !value.map.is_bottom() { 69 | count += 1; 70 | } 71 | if !value.array.is_bottom() { 72 | count += 1; 73 | } 74 | if !value.reg.is_bottom() { 75 | count += 1; 76 | } 77 | count > 1 78 | }; 79 | 80 | if has_multiple_types { 81 | CrdtValue::Conflicted(ConflictedValue::new(value)) 82 | } else if !value.reg.is_bottom() { 83 | CrdtValue::Register(&value.reg) 84 | } else if !value.map.is_bottom() { 85 | CrdtValue::Map(&value.map) 86 | } else if !value.array.is_bottom() { 87 | CrdtValue::Array(&value.array) 88 | } else { 89 | CrdtValue::Empty 90 | } 91 | } 92 | } 93 | 94 | #[cfg(test)] 95 | mod tests { 96 | use super::*; 97 | use crate::crdts::mvreg::MvRegValue; 98 | use crate::crdts::{NoExtensionTypes, TypeVariantValue}; 99 | use crate::dotstores::DotStore; 100 | use crate::sentinel::DummySentinel; 101 | use crate::{CausalDotStore, Identifier, OrMap}; 102 | 103 | #[test] 104 | fn from_type_variant_register_only() { 105 | // Create a TypeVariantValue with only register populated 106 | let store = CausalDotStore::>::default(); 107 | let id = Identifier::new(0, 0); 108 | 109 | let delta = store.store.apply_to_register( 110 | |reg, ctx, id| reg.write(MvRegValue::U64(42), ctx, id), 111 | "key".to_string(), 112 | &store.context, 113 | id, 114 | ); 115 | 116 | let type_variant = delta.store.get(&"key".to_string()).unwrap(); 117 | 118 | // Test from_type_variant 119 | let value: CrdtValue<'_, String> = CrdtValue::from_type_variant(type_variant); 120 | 121 | match value { 122 | CrdtValue::Register(reg) => { 123 | use crate::crdts::snapshot::ToValue; 124 | assert_eq!(reg.value().unwrap(), &MvRegValue::U64(42)); 125 | } 126 | _ => panic!("Expected Register variant"), 127 | } 128 | } 129 | 130 | #[test] 131 | fn from_type_variant_empty() { 132 | // Empty TypeVariantValue (all fields are bottom) 133 | let type_variant = TypeVariantValue::::default(); 134 | let value: CrdtValue<'_, String> = CrdtValue::from_type_variant(&type_variant); 135 | 136 | match value { 137 | CrdtValue::Empty => { /* expected */ } 138 | _ => panic!("Expected Empty variant"), 139 | } 140 | } 141 | 142 | #[test] 143 | fn from_type_variant_map_only() { 144 | // Create a TypeVariantValue with only map populated 145 | let store = CausalDotStore::>::default(); 146 | let id = Identifier::new(0, 0); 147 | 148 | // Create nested map 149 | let delta = store.store.apply_to_map( 150 | |map, ctx, id| { 151 | map.apply_to_register( 152 | |reg, ctx, id| reg.write(MvRegValue::String("test".to_string()), ctx, id), 153 | "field".to_string(), 154 | ctx, 155 | id, 156 | ) 157 | }, 158 | "key".to_string(), 159 | &store.context, 160 | id, 161 | ); 162 | 163 | let type_variant = delta.store.get(&"key".to_string()).unwrap(); 164 | let value: CrdtValue<'_, String> = CrdtValue::from_type_variant(type_variant); 165 | 166 | match value { 167 | CrdtValue::Map(map) => { 168 | assert!(!map.is_bottom()); 169 | } 170 | _ => panic!("Expected Map variant"), 171 | } 172 | } 173 | 174 | #[test] 175 | fn from_type_variant_array_only() { 176 | // Create a TypeVariantValue with only array populated 177 | let store = CausalDotStore::>::default(); 178 | let id = Identifier::new(0, 0); 179 | 180 | // Create array with one element 181 | let delta = store.store.apply_to_array( 182 | |array, ctx, id| array.insert_idx_register(0, MvRegValue::U64(1), ctx, id), 183 | "key".to_string(), 184 | &store.context, 185 | id, 186 | ); 187 | 188 | let type_variant = delta.store.get(&"key".to_string()).unwrap(); 189 | let value: CrdtValue<'_, String> = CrdtValue::from_type_variant(type_variant); 190 | 191 | match value { 192 | CrdtValue::Array(array) => { 193 | assert_eq!(array.len(), 1); 194 | } 195 | _ => panic!("Expected Array variant"), 196 | } 197 | } 198 | 199 | #[test] 200 | fn from_type_variant_conflicted() { 201 | // Create a TypeVariantValue with multiple types (type conflict) 202 | let store = CausalDotStore::>::default(); 203 | let id1 = Identifier::new(0, 0); 204 | let id2 = Identifier::new(1, 0); 205 | 206 | // Replica 1 writes register 207 | let delta1 = store.store.apply_to_register( 208 | |reg, ctx, id| reg.write(MvRegValue::U64(42), ctx, id), 209 | "key".to_string(), 210 | &store.context, 211 | id1, 212 | ); 213 | 214 | // Replica 2 writes array (concurrent with delta1) 215 | let delta2 = store.store.apply_to_array( 216 | |array, ctx, id| { 217 | array.insert_idx_register(0, MvRegValue::String("conflict".to_string()), ctx, id) 218 | }, 219 | "key".to_string(), 220 | &store.context, 221 | id2, 222 | ); 223 | 224 | // Join both deltas to create conflict 225 | let combined = delta1.join(delta2, &mut DummySentinel).unwrap(); 226 | let type_variant = combined.store.get(&"key".to_string()).unwrap(); 227 | 228 | let value: CrdtValue<'_, String> = CrdtValue::from_type_variant(type_variant); 229 | 230 | match value { 231 | CrdtValue::Conflicted(conflicts) => { 232 | assert!(conflicts.has_register()); 233 | assert!(conflicts.has_array()); 234 | assert_eq!(conflicts.conflict_count(), 2); 235 | } 236 | _ => panic!("Expected Conflicted variant, got {value:?}"), 237 | } 238 | } 239 | } 240 | -------------------------------------------------------------------------------- /benches/iai.rs: -------------------------------------------------------------------------------- 1 | // (c) Copyright 2025 Helsing GmbH. All rights reserved. 2 | #![cfg_attr(not(target_os = "linux"), allow(dead_code, unused_imports))] 3 | 4 | use dson::{ 5 | CausalContext, CausalDotStore, Dot, Identifier, MvReg, OrArray, OrMap, api, 6 | crdts::{NoExtensionTypes, mvreg::MvRegValue}, 7 | sentinel::DummySentinel, 8 | }; 9 | use iai_callgrind::{library_benchmark, library_benchmark_group, main}; 10 | use std::hint::black_box; 11 | 12 | include!(concat!(env!("OUT_DIR"), "/random_dots.rs")); 13 | 14 | fn setup_array(n: usize) -> (Identifier, CausalDotStore>) { 15 | dson::enable_determinism(); 16 | 17 | let omni_id = Identifier::new(1, 0); 18 | let mut omni = CausalDotStore::>::default(); 19 | for i in 0..n { 20 | let add = api::array::insert_register( 21 | |cc, id| MvReg::default().write(MvRegValue::Bool(true), cc, id), 22 | i, 23 | )(&omni.store, &omni.context, omni_id); 24 | omni.consume(add, &mut DummySentinel).unwrap(); 25 | } 26 | (omni_id, omni) 27 | } 28 | 29 | #[library_benchmark] 30 | #[bench::medium(setup_array(255))] 31 | fn array_unshift((id, omni): (Identifier, CausalDotStore>)) { 32 | let omni = black_box(omni); 33 | let insert = api::array::insert_register( 34 | |cc, id| MvReg::default().write(MvRegValue::Bool(true), cc, id), 35 | 0, 36 | )(&omni.store, &omni.context, id); 37 | black_box(insert); 38 | } 39 | 40 | #[library_benchmark] 41 | #[bench::medium(setup_array(255))] 42 | fn array_delete((id, omni): (Identifier, CausalDotStore>)) { 43 | let omni = black_box(omni); 44 | let delete = api::array::delete(128)(&omni.store, &omni.context, id); 45 | black_box(delete); 46 | } 47 | 48 | #[library_benchmark] 49 | #[bench::medium(setup_array(255))] 50 | fn array_update((id, omni): (Identifier, CausalDotStore>)) { 51 | let omni = black_box(omni); 52 | let update = api::array::apply_to_register( 53 | |old, cc, id| old.write(MvRegValue::Bool(false), cc, id), 54 | 128, 55 | )(&omni.store, &omni.context, id); 56 | black_box(update); 57 | } 58 | 59 | #[library_benchmark] 60 | #[bench::medium(setup_array(255))] 61 | fn array_insert((id, omni): (Identifier, CausalDotStore>)) { 62 | let omni = black_box(omni); 63 | let insert = api::array::insert_register( 64 | |cc, id| MvReg::default().write(MvRegValue::Bool(false), cc, id), 65 | 128, 66 | )(&omni.store, &omni.context, id); 67 | black_box(insert); 68 | } 69 | 70 | #[library_benchmark] 71 | #[bench::medium(setup_array(255))] 72 | fn array_push((id, omni): (Identifier, CausalDotStore>)) { 73 | let omni = black_box(omni); 74 | let insert = api::array::insert_register( 75 | |cc, id| MvReg::default().write(MvRegValue::Bool(true), cc, id), 76 | omni.store.len(), 77 | )(&omni.store, &omni.context, id); 78 | black_box(insert); 79 | } 80 | 81 | fn setup_map(n: usize) -> (Identifier, CausalDotStore>) { 82 | dson::enable_determinism(); 83 | 84 | let omni_id = Identifier::new(1, 0); 85 | let mut omni = CausalDotStore::>::default(); 86 | for i in 0..n { 87 | let add = api::map::apply_to_register( 88 | |_, cc, id| MvReg::default().write(MvRegValue::Bool(true), cc, id), 89 | i.to_string(), 90 | )(&omni.store, &omni.context, omni_id); 91 | omni.consume(add, &mut DummySentinel).unwrap(); 92 | } 93 | (omni_id, omni) 94 | } 95 | 96 | fn setup_direct_crdt_map( 97 | n: usize, 98 | ) -> (Identifier, CausalDotStore>) { 99 | dson::enable_determinism(); 100 | 101 | let omni_id = Identifier::new(1, 0); 102 | let mut omni = CausalDotStore::>::default(); 103 | for i in 0..n { 104 | let delta = omni.store.apply_to_register( 105 | |reg, ctx, id| reg.write(MvRegValue::Bool(true), ctx, id), 106 | i.to_string(), 107 | &omni.context, 108 | omni_id, 109 | ); 110 | omni.consume(delta, &mut DummySentinel).unwrap(); 111 | } 112 | (omni_id, omni) 113 | } 114 | 115 | #[library_benchmark] 116 | #[bench::medium(setup_map(255))] 117 | fn map_insert((id, omni): (Identifier, CausalDotStore>)) { 118 | let omni = black_box(omni); 119 | let insert = api::map::apply_to_register( 120 | |_, cc, id| MvReg::default().write(MvRegValue::Bool(true), cc, id), 121 | "duck".into(), 122 | )(&omni.store, &omni.context, id); 123 | black_box(insert); 124 | } 125 | 126 | #[library_benchmark] 127 | #[bench::medium(setup_map(255))] 128 | fn map_remove((id, omni): (Identifier, CausalDotStore>)) { 129 | let omni = black_box(omni); 130 | let remove = api::map::remove("128")(&omni.store, &omni.context, id); 131 | black_box(remove); 132 | } 133 | 134 | #[library_benchmark] 135 | #[bench::medium(setup_map(255))] 136 | fn map_update((id, omni): (Identifier, CausalDotStore>)) { 137 | let omni = black_box(omni); 138 | let update = api::map::apply_to_register( 139 | |old, cc, id| old.write(MvRegValue::Bool(true), cc, id), 140 | "128".into(), 141 | )(&omni.store, &omni.context, id); 142 | black_box(update); 143 | } 144 | 145 | #[library_benchmark] 146 | #[bench::medium(setup_direct_crdt_map(255))] 147 | fn direct_crdt_map_insert( 148 | (id, omni): (Identifier, CausalDotStore>), 149 | ) { 150 | let omni = black_box(omni); 151 | let insert = omni.store.apply_to_register( 152 | |reg, ctx, id| reg.write(MvRegValue::Bool(true), ctx, id), 153 | "duck".to_string(), 154 | &omni.context, 155 | id, 156 | ); 157 | black_box(insert); 158 | } 159 | 160 | #[library_benchmark] 161 | #[bench::medium(setup_direct_crdt_map(255))] 162 | fn direct_crdt_map_remove( 163 | (id, omni): (Identifier, CausalDotStore>), 164 | ) { 165 | let omni = black_box(omni); 166 | let remove = omni.store.remove(&"128".to_string(), &omni.context, id); 167 | black_box(remove); 168 | } 169 | 170 | #[library_benchmark] 171 | #[bench::medium(setup_direct_crdt_map(255))] 172 | fn direct_crdt_map_update( 173 | (id, omni): (Identifier, CausalDotStore>), 174 | ) { 175 | let omni = black_box(omni); 176 | let update = omni.store.apply_to_register( 177 | |reg, ctx, id| reg.write(MvRegValue::Bool(true), ctx, id), 178 | "128".to_string(), 179 | &omni.context, 180 | id, 181 | ); 182 | black_box(update); 183 | } 184 | 185 | fn setup_register() -> (Identifier, CausalDotStore) { 186 | dson::enable_determinism(); 187 | 188 | let omni_id = Identifier::new(1, 0); 189 | let omni = CausalDotStore::::default(); 190 | let write = api::register::write(MvRegValue::Bool(false))(&omni.store, &omni.context, omni_id); 191 | (omni_id, omni.join(write, &mut DummySentinel).unwrap()) 192 | } 193 | 194 | #[library_benchmark] 195 | #[bench::bool(setup_register())] 196 | fn register_write((id, omni): (Identifier, CausalDotStore)) { 197 | let omni = black_box(omni); 198 | let write = api::register::write(MvRegValue::Bool(true))(&omni.store, &omni.context, id); 199 | black_box(write); 200 | } 201 | 202 | #[library_benchmark] 203 | #[bench::bool(setup_register())] 204 | fn register_clear((id, omni): (Identifier, CausalDotStore)) { 205 | let omni = black_box(omni); 206 | let clear = api::register::clear()(&omni.store, &omni.context, id); 207 | black_box(clear); 208 | } 209 | 210 | struct Ccs { 211 | big1: CausalContext, 212 | big2: CausalContext, 213 | small1: CausalContext, 214 | #[allow(dead_code)] 215 | small2: CausalContext, 216 | } 217 | 218 | fn setup_cc() -> Ccs { 219 | dson::enable_determinism(); 220 | 221 | let big1 = CausalContext::from_iter(BIG1.iter().copied()); 222 | let big2 = CausalContext::from_iter(BIG2.iter().copied()); 223 | let small1 = CausalContext::from_iter(SMALL1.iter().copied()); 224 | let small2 = CausalContext::from_iter(SMALL2.iter().copied()); 225 | Ccs { 226 | big1, 227 | big2, 228 | small1, 229 | small2, 230 | } 231 | } 232 | 233 | #[library_benchmark] 234 | #[bench::id(setup_cc())] 235 | fn cc_join_big_small(ccs: Ccs) { 236 | let mut ccs = black_box(ccs); 237 | ccs.big1.union(&ccs.small1); 238 | black_box(ccs); 239 | } 240 | 241 | #[library_benchmark] 242 | #[bench::id(setup_cc())] 243 | fn cc_join_big_big(ccs: Ccs) { 244 | let mut ccs = black_box(ccs); 245 | ccs.big1.union(&ccs.big2); 246 | black_box(ccs); 247 | } 248 | 249 | library_benchmark_group!( 250 | name = arrays; 251 | benchmarks = array_unshift, array_delete, array_update, array_insert 252 | ); 253 | library_benchmark_group!( 254 | name = maps; 255 | benchmarks = map_insert, map_remove, map_update 256 | ); 257 | library_benchmark_group!( 258 | name = direct_crdt_maps; 259 | benchmarks = direct_crdt_map_insert, direct_crdt_map_remove, direct_crdt_map_update 260 | ); 261 | library_benchmark_group!( 262 | name = registers; 263 | benchmarks = register_write, register_clear 264 | ); 265 | library_benchmark_group!( 266 | name = causal_contexts; 267 | benchmarks = cc_join_big_small, cc_join_big_big 268 | ); 269 | 270 | #[cfg(target_os = "linux")] 271 | main!( 272 | library_benchmark_groups = arrays, 273 | maps, 274 | direct_crdt_maps, 275 | registers, 276 | causal_contexts 277 | ); 278 | 279 | #[cfg(not(target_os = "linux"))] 280 | fn main() {} 281 | -------------------------------------------------------------------------------- /src/api/array.rs: -------------------------------------------------------------------------------- 1 | // (c) Copyright 2025 Helsing GmbH. All rights reserved. 2 | use crate::{ 3 | CausalContext, CausalDotStore, ExtensionType, Identifier, MvReg, OrArray, OrMap, 4 | crdts::{ 5 | TypeVariantValue, Value, 6 | orarray::{Position, Uid}, 7 | snapshot::{self, ToValue}, 8 | }, 9 | }; 10 | use std::{convert::Infallible, fmt}; 11 | 12 | /* 13 | /// insert(𝑖𝑑𝑥, 𝑜𝛿 𝑖 ) – given an index 𝑖𝑑𝑥 and a method 𝑜𝛿 14 | /// 𝑖 from 15 | /// the API of some CRDT of type 𝑉 , The method assigns a 16 | /// unique id 𝑢𝑖𝑑, assigns a stable position identifier 𝑝 such that 17 | /// the new element in the sorted array appears at index 𝑖𝑑𝑥, 18 | /// and invokes apply(𝑢𝑖𝑑, 𝑜𝛿 19 | /// 𝑖 , 𝑝). 20 | update(𝑖𝑑𝑥, 𝑜𝛿 21 | 𝑖 ) – given an index 𝑖𝑑𝑥 and a method 𝑜𝛿 22 | 𝑖 of 23 | some CRDT type 𝑉 , The method finds the 𝑢𝑖𝑑 corresponding 24 | to the element at index 𝑖𝑑𝑥, finds the position 𝑝, and invokes 25 | apply(𝑢𝑖𝑑, 𝑜𝛿 26 | 𝑖 , 𝑝). 27 | move(𝑜𝑙𝑑_𝑖𝑑𝑥, 𝑛𝑒𝑤_𝑖𝑑𝑥) – given two indexes, finds the ele- 28 | ment 𝑢𝑖𝑑 corresponding to the element at index 𝑜𝑙𝑑_𝑖𝑑𝑥, 29 | calculates the stable position identifier 𝑝 such that the el- 30 | ement in the sorted array will be at index 𝑛𝑒𝑤_𝑖𝑑𝑥, and 31 | invokes move(𝑢𝑖𝑑, 𝑝). 32 | delete(𝑖𝑑𝑥) – given an index 𝑖𝑑𝑥, finds the element 𝑢𝑖𝑑 corre- 33 | sponding to the element at index 𝑖𝑑𝑥, and invokes delete(𝑢𝑖𝑑). 34 | get(𝑖𝑑𝑥) – given an index 𝑖𝑑𝑥, finds the element 𝑢𝑖𝑑 corre- 35 | sponding to the element at index 𝑖𝑑𝑥, and invokes get(𝑢𝑖𝑑). 36 | */ 37 | 38 | /// Returns the values of this array without collapsing conflicts. 39 | pub fn values(m: &OrArray) -> snapshot::OrArray>> 40 | where 41 | C: ExtensionType, 42 | { 43 | m.values() 44 | } 45 | 46 | /// Returns the values of this array assuming (and asserting) no conflicts on element values. 47 | // NOTE: A type alias won't help much here :melt:. 48 | #[allow(clippy::type_complexity)] 49 | pub fn value( 50 | m: &OrArray, 51 | ) -> Result< 52 | snapshot::OrArray>>, 53 | Box as ToValue>::LeafValue>>, 54 | > 55 | where 56 | C: ExtensionType, 57 | { 58 | m.value() 59 | } 60 | 61 | /// Creates a new array. 62 | pub fn create() -> impl Fn(&OrArray, &CausalContext, Identifier) -> CausalDotStore> 63 | where 64 | C: ExtensionType + fmt::Debug + PartialEq, 65 | { 66 | move |m, cc, id| m.create(cc, id) 67 | } 68 | 69 | /// Inserts a new element at the given index. 70 | pub fn insert( 71 | o: O, 72 | idx: usize, 73 | ) -> impl FnOnce(&OrArray, &CausalContext, Identifier) -> CausalDotStore> 74 | where 75 | O: FnOnce(&CausalContext, Identifier) -> CausalDotStore>, 76 | C: ExtensionType + fmt::Debug + PartialEq, 77 | { 78 | move |m, cc, id| { 79 | let uid = cc.next_dot_for(id).into(); 80 | let p = create_position_for_index(m, idx); 81 | m.insert(uid, o, p, cc, id) 82 | } 83 | } 84 | 85 | /// Inserts a new map at the given index. 86 | pub fn insert_map( 87 | o: O, 88 | idx: usize, 89 | ) -> impl FnOnce(&OrArray, &CausalContext, Identifier) -> CausalDotStore> 90 | where 91 | O: FnOnce(&CausalContext, Identifier) -> CausalDotStore>, 92 | C: ExtensionType + fmt::Debug + PartialEq, 93 | { 94 | insert(move |cc, id| (o)(cc, id).map_store(Value::Map), idx) 95 | } 96 | 97 | /// Inserts a new array at the given index. 98 | pub fn insert_array( 99 | o: O, 100 | idx: usize, 101 | ) -> impl FnOnce(&OrArray, &CausalContext, Identifier) -> CausalDotStore> 102 | where 103 | O: FnOnce(&CausalContext, Identifier) -> CausalDotStore>, 104 | C: ExtensionType + fmt::Debug + PartialEq, 105 | { 106 | insert(move |cc, id| (o)(cc, id).map_store(Value::Array), idx) 107 | } 108 | 109 | /// Inserts a new register at the given index. 110 | pub fn insert_register( 111 | o: O, 112 | idx: usize, 113 | ) -> impl FnOnce(&OrArray, &CausalContext, Identifier) -> CausalDotStore> 114 | where 115 | O: FnOnce(&CausalContext, Identifier) -> CausalDotStore, 116 | C: ExtensionType + fmt::Debug + PartialEq, 117 | { 118 | insert(move |cc, id| (o)(cc, id).map_store(Value::Register), idx) 119 | } 120 | 121 | /// Applies a function to the element at the given index. 122 | pub fn apply( 123 | o: O, 124 | idx: usize, 125 | ) -> impl FnOnce(&OrArray, &CausalContext, Identifier) -> CausalDotStore> 126 | where 127 | O: FnOnce(&TypeVariantValue, &CausalContext, Identifier) -> CausalDotStore>, 128 | C: ExtensionType + fmt::Debug + PartialEq, 129 | { 130 | move |m, cc, id| { 131 | let uid = uid_from_index(m, idx); 132 | assert_ne!(idx, m.len(), "index out of bounds"); 133 | let p = create_position_for_index(m, idx); 134 | m.apply(uid, o, p, cc, id) 135 | } 136 | } 137 | 138 | /// Applies a function to the map at the given index. 139 | pub fn apply_to_map( 140 | o: O, 141 | idx: usize, 142 | ) -> impl FnOnce(&OrArray, &CausalContext, Identifier) -> CausalDotStore> 143 | where 144 | O: FnOnce(&OrMap, &CausalContext, Identifier) -> CausalDotStore>, 145 | C: ExtensionType + fmt::Debug + PartialEq, 146 | { 147 | apply( 148 | move |m, cc, id| (o)(&m.map, cc, id).map_store(Value::Map), 149 | idx, 150 | ) 151 | } 152 | 153 | /// Applies a function to the array at the given index. 154 | pub fn apply_to_array( 155 | o: O, 156 | idx: usize, 157 | ) -> impl FnOnce(&OrArray, &CausalContext, Identifier) -> CausalDotStore> 158 | where 159 | O: FnOnce(&OrArray, &CausalContext, Identifier) -> CausalDotStore>, 160 | C: ExtensionType + fmt::Debug + PartialEq, 161 | { 162 | apply( 163 | move |m, cc, id| (o)(&m.array, cc, id).map_store(Value::Array), 164 | idx, 165 | ) 166 | } 167 | 168 | /// Applies a function to the register at the given index. 169 | pub fn apply_to_register( 170 | o: O, 171 | idx: usize, 172 | ) -> impl FnOnce(&OrArray, &CausalContext, Identifier) -> CausalDotStore> 173 | where 174 | O: FnOnce(&MvReg, &CausalContext, Identifier) -> CausalDotStore, 175 | C: ExtensionType + fmt::Debug + PartialEq, 176 | { 177 | apply( 178 | move |m, cc, id| (o)(&m.reg, cc, id).map_store(Value::Register), 179 | idx, 180 | ) 181 | } 182 | 183 | /// Moves an element from one index to another. 184 | pub fn mv( 185 | from: usize, 186 | to: usize, 187 | ) -> impl Fn(&OrArray, &CausalContext, Identifier) -> CausalDotStore> 188 | where 189 | C: ExtensionType + fmt::Debug + PartialEq, 190 | { 191 | move |m, cc, id| { 192 | let uid = uid_from_index(m, from); 193 | let p = create_position_for_index(m, to); 194 | m.mv(uid, p, cc, id) 195 | } 196 | } 197 | 198 | /// Deletes an element at the given index. 199 | pub fn delete<'s, C>( 200 | idx: usize, 201 | ) -> impl Fn(&OrArray, &CausalContext, Identifier) -> CausalDotStore> + 's 202 | where 203 | C: ExtensionType + fmt::Debug + PartialEq, 204 | { 205 | move |m, cc, id| { 206 | let uid = uid_from_index(m, idx); 207 | m.delete(uid, cc, id) 208 | } 209 | } 210 | 211 | /// Clears the array. 212 | pub fn clear() -> impl Fn(&OrArray, &CausalContext, Identifier) -> CausalDotStore> 213 | where 214 | C: ExtensionType + fmt::Debug + PartialEq, 215 | { 216 | move |m, cc, id| m.clear(cc, id) 217 | } 218 | 219 | fn ids(m: &OrArray) -> Vec<((), Uid, Position)> { 220 | // TODO(https://github.com/rust-lang/rust/issues/61695): use into_ok 221 | m.with_list(|_, _, _| Ok::<_, Infallible>(Some(()))) 222 | .unwrap() 223 | } 224 | 225 | /// Computes the [`Position`] a new element should have to end up at `[idx]`. 226 | /// 227 | /// Inserting a new element with the given [`Position`] will end up shifting all later elements to 228 | /// the rigth by one. For example, inserting an element with position `create_position_for_index(_, 229 | /// 0)` will make the current `[0]` be at `[1]`, the current `[1]` at `[2]`, and so on. 230 | fn create_position_for_index(m: &OrArray, idx: usize) -> Position { 231 | // NOTE: the original code passes cc.id() to the Position::between calls here, but that 232 | // argument is ignored, so it's removed in our implementation; 233 | 234 | // we don't have to sort all the items to resolve the first/last position. 235 | // not doing the sort saves us from the `.collect` in `with_list`, which would result in a 236 | // `Vec` that gets pretty much immediately thrown away afterwards. 237 | // TODO: cache min/max Position inside OrArray maybe? 238 | if idx == 0 { 239 | let min_p = m.iter_as_is().map(|(_, _, p)| p).min(); 240 | return Position::between(None, min_p); 241 | } 242 | if idx == m.len() { 243 | let max_p = m.iter_as_is().map(|(_, _, p)| p).max(); 244 | return Position::between(max_p, None); 245 | } 246 | 247 | assert!( 248 | idx < m.len(), 249 | "index out of bounds ({idx} when length is {})", 250 | m.len() 251 | ); 252 | // NOTE: we know here that !m.is_empty(), otherwise we'd either hit idx == 0 or the asset. 253 | 254 | let ids = ids(m); 255 | let pos_at_index = ids.get(idx).map(|(_, _, p)| *p); 256 | let pos_at_previous_index = if idx == 0 { 257 | None 258 | } else { 259 | Some( 260 | ids.get(idx - 1) 261 | .expect("we check for out-of-bounds above") 262 | .2, 263 | ) 264 | }; 265 | Position::between(pos_at_previous_index, pos_at_index) 266 | } 267 | 268 | fn uid_from_index(m: &OrArray, idx: usize) -> Uid { 269 | ids(m)[idx].1 270 | } 271 | -------------------------------------------------------------------------------- /examples/conflicts.rs: -------------------------------------------------------------------------------- 1 | // (c) Copyright 2025 Helsing GmbH. All rights reserved. 2 | //! This example demonstrates how dson handles concurrent edits and resolves conflicts. 3 | //! We simulate two replicas of a user profile, make conflicting changes to the same fields, 4 | //! and then merge them to observe the final, converged state. 5 | use dson::{ 6 | CausalDotStore, Identifier, MvReg, OrMap, 7 | crdts::{ 8 | NoExtensionTypes, Value, 9 | mvreg::MvRegValue, 10 | snapshot::{AllValues, ToValue}, 11 | }, 12 | sentinel::DummySentinel, 13 | }; 14 | use std::error::Error; 15 | 16 | // The data model for our user profile is a map with string keys. 17 | // - "name": A Multi-Value Register (MvReg) for the user's name. Concurrent writes will be preserved as conflicts. 18 | // - "tags": An Observed-Remove Array (OrArray) of MvReg for tags. 19 | // - "settings": A nested Observed-Remove Map (OrMap) for user settings. 20 | 21 | fn main() -> Result<(), Box> { 22 | // SETUP: TWO REPLICAS 23 | // We create two replicas, A and B, each with a unique identifier. 24 | // Both start with an empty CausalDotStore, which will hold our OrMap-based user profile. 25 | let replica_a_id = Identifier::new(0, 0); 26 | let mut replica_a_state = CausalDotStore::>::default(); 27 | 28 | let replica_b_id = Identifier::new(1, 0); 29 | let mut replica_b_state = CausalDotStore::>::default(); 30 | 31 | // INITIAL STATE on Replica A 32 | println!("1. Replica A creates an initial user profile."); 33 | // We create a "user" map and set the "name" field to "Alice". 34 | // This operation generates a delta (`delta_a1`) representing the change. 35 | let delta_a1 = dson::api::map::apply_to_map::<_, NoExtensionTypes, _>( 36 | |map, ctx, id| { 37 | // Set name in the user map 38 | dson::api::map::apply_to_register( 39 | |reg, ctx, id| reg.write("Alice".to_string().into(), ctx, id), 40 | "name".to_string(), 41 | )(map, ctx, id) 42 | }, 43 | "user".to_string(), 44 | )( 45 | &replica_a_state.store, 46 | &replica_a_state.context, 47 | replica_a_id, 48 | ); 49 | 50 | // Apply the delta to Replica A's state. 51 | replica_a_state.join_or_replace_with(delta_a1.store.clone(), &delta_a1.context); 52 | 53 | // SYNC: REPLICA B GETS INITIAL STATE 54 | println!("2. Replica B syncs with Replica A."); 55 | // Replica B applies the delta from Replica A to get the initial state. 56 | // After this, both replicas are in sync. 57 | replica_b_state.join_or_replace_with(delta_a1.store, &delta_a1.context); 58 | assert_eq!(replica_a_state, replica_b_state); 59 | println!(" Initial state synced: {replica_a_state:?}"); 60 | 61 | // CONCURRENT EDITS 62 | println!("\n3. Replicas A and B make concurrent edits without syncing."); 63 | 64 | // On Replica A: Change the name to "Alice B." and add a "rust" tag. 65 | // These changes are based on the initial state. 66 | let delta_a2 = dson::api::map::apply_to_map::<_, NoExtensionTypes, _>( 67 | |map, ctx, id| { 68 | // 1. Change the name 69 | let map_after_name_change = dson::api::map::apply_to_register( 70 | |reg, ctx, id| reg.write("Alice B.".to_string().into(), ctx, id), 71 | "name".to_string(), 72 | )(map, ctx, id); 73 | 74 | // 2. Add a tag to the 'tags' array 75 | let map_after_tag_add = dson::api::map::apply_to_array( 76 | |array, ctx, id| { 77 | dson::api::array::insert( 78 | // Each element in the array is a register for the tag string 79 | |ctx, _id| { 80 | MvReg::default() 81 | .write("rust".to_string().into(), ctx, id) 82 | .map_store(Value::Register) 83 | }, 84 | array.len(), // Insert at the end 85 | )(array, ctx, id) 86 | }, 87 | "tags".to_string(), 88 | )( 89 | &map_after_name_change.store, 90 | &map_after_name_change.context, 91 | id, 92 | ); 93 | 94 | // Join the two operations into a single delta 95 | map_after_name_change 96 | .join(map_after_tag_add, &mut DummySentinel) 97 | .expect("DummySentinel is infallible") 98 | }, 99 | "user".to_string(), 100 | )( 101 | &replica_a_state.store, 102 | &replica_a_state.context, 103 | replica_a_id, 104 | ); 105 | // Apply the changes locally to Replica A. 106 | replica_a_state.join_or_replace_with(delta_a2.store.clone(), &delta_a2.context); 107 | println!(" Replica A: Changed name to 'Alice B.', added 'rust' tag."); 108 | 109 | // On Replica B: Change name to "Alice C." (a direct conflict with Replica A's change), 110 | // add a "crdt" tag, and add a new "dark_mode" setting. 111 | let delta_b1 = dson::api::map::apply_to_map::<_, NoExtensionTypes, _>( 112 | |map, ctx, id| { 113 | // 1. Change the name, creating a conflict with Replica A's edit. 114 | let map_after_name_change = dson::api::map::apply_to_register( 115 | |reg, ctx, id| reg.write("Alice C.".to_string().into(), ctx, id), 116 | "name".to_string(), 117 | )(map, ctx, id); 118 | 119 | // 2. Add a "crdt" tag. 120 | let map_after_tag_add = dson::api::map::apply_to_array( 121 | |array, ctx, id| { 122 | dson::api::array::insert( 123 | |ctx, id| { 124 | MvReg::default() 125 | .write("crdt".to_string().into(), ctx, id) 126 | .map_store(Value::Register) 127 | }, 128 | array.len(), 129 | )(array, ctx, id) 130 | }, 131 | "tags".to_string(), 132 | )( 133 | &map_after_name_change.store, 134 | &map_after_name_change.context, 135 | id, 136 | ); 137 | 138 | // Join the name and tag changes 139 | let delta_with_name_and_tag = map_after_name_change 140 | .join(map_after_tag_add, &mut DummySentinel) 141 | .expect("DummySentinel is infallible"); 142 | 143 | // 3. Add a "dark_mode" setting in a nested map. 144 | let delta_with_settings = dson::api::map::apply_to_map( 145 | |settings_map, ctx, id| { 146 | dson::api::map::apply_to_register( 147 | |reg, ctx, id| reg.write(true.into(), ctx, id), 148 | "dark_mode".to_string(), 149 | )(settings_map, ctx, id) 150 | }, 151 | "settings".to_string(), 152 | )( 153 | &delta_with_name_and_tag.store, 154 | &delta_with_name_and_tag.context, 155 | id, 156 | ); 157 | 158 | // Join all changes for Replica B into a final delta. 159 | delta_with_name_and_tag 160 | .join(delta_with_settings, &mut DummySentinel) 161 | .expect("DummySentinel is infallible") 162 | }, 163 | "user".to_string(), 164 | )( 165 | &replica_b_state.store, 166 | &replica_b_state.context, 167 | replica_b_id, 168 | ); 169 | // Apply the changes locally to Replica B. 170 | replica_b_state.join_or_replace_with(delta_b1.store.clone(), &delta_b1.context); 171 | println!(" Replica B: Changed name to 'Alice C.', added 'crdt' tag, enabled dark_mode."); 172 | 173 | // MERGE 174 | println!("\n4. Merging the concurrent changes."); 175 | // Replica A merges the delta from Replica B. 176 | replica_a_state.join_or_replace_with(delta_b1.store, &delta_b1.context); 177 | // Replica B merges the delta from Replica A. 178 | replica_b_state.join_or_replace_with(delta_a2.store, &delta_a2.context); 179 | // After merging, both replicas should have an identical state, demonstrating convergence. 180 | 181 | // VERIFICATION 182 | println!("\n5. Verifying the converged state."); 183 | assert_eq!(replica_a_state, replica_b_state); 184 | println!(" Replicas have converged to the same state."); 185 | println!(" Final state: {replica_a_state:?}"); 186 | 187 | // Now, let's inspect the converged data structure to see how conflicts were handled. 188 | let user_profile = replica_a_state 189 | .store 190 | .get("user") 191 | .expect("key 'user' should be present"); 192 | 193 | // --- Verify Name Conflict --- 194 | // The concurrent writes to the "name" field result in a conflict. 195 | // The MvReg preserves both values. The application can then decide how to resolve this. 196 | let name_values = user_profile 197 | .map 198 | .get("name") 199 | .unwrap() 200 | .reg 201 | .values() 202 | .into_iter() 203 | .cloned() 204 | .collect::>(); 205 | 206 | assert_eq!( 207 | name_values.len(), 208 | 2, 209 | "Name should have two conflicting values" 210 | ); 211 | assert!(name_values.contains(&MvRegValue::String("Alice B.".to_string()))); 212 | assert!(name_values.contains(&MvRegValue::String("Alice C.".to_string()))); 213 | println!(" SUCCESS: Name field correctly shows conflicting values: {name_values:?}"); 214 | 215 | // --- Verify Tags Array --- 216 | // The 'tags' array should contain both "rust" and "crdt", as they were added concurrently. 217 | let tags = user_profile 218 | .map 219 | .get("tags") 220 | .expect("key 'tags' should be present"); 221 | let tag_values = tags 222 | .array 223 | .values() 224 | .iter() 225 | .map(|v| { 226 | let AllValues::Register(r) = v else { 227 | unreachable!() 228 | }; 229 | // No conflicts are expected within the tags themselves. 230 | assert_eq!(r.len(), 1); 231 | let MvRegValue::String(s) = r.get(0).unwrap() else { 232 | unreachable!() 233 | }; 234 | s.to_owned() 235 | }) 236 | .collect::>(); 237 | 238 | assert_eq!(tag_values.len(), 2, "Tags array should have two elements"); 239 | assert!(tag_values.contains(&"rust".to_string())); 240 | assert!(tag_values.contains(&"crdt".to_string())); 241 | println!(" SUCCESS: Tags array correctly contains: {tag_values:?}"); 242 | 243 | // --- Verify Settings Map --- 244 | // The 'settings' map was only modified by Replica B, so it should exist with the 'dark_mode' key. 245 | let settings = user_profile 246 | .map 247 | .get("settings") 248 | .expect("key 'settings' should be present"); 249 | let dark_mode = settings 250 | .map 251 | .get("dark_mode") 252 | .expect("key 'dark_mode' should be present") 253 | .reg 254 | .value() // We expect a single value since there were no concurrent edits. 255 | .expect("should be no conflict in dark_mode setting"); 256 | 257 | assert_eq!(*dark_mode, MvRegValue::Bool(true)); 258 | println!(" SUCCESS: Settings map correctly contains: dark_mode -> {dark_mode:?}"); 259 | 260 | Ok(()) 261 | } 262 | -------------------------------------------------------------------------------- /src/sentinel.rs: -------------------------------------------------------------------------------- 1 | // (c) Copyright 2025 Helsing GmbH. All rights reserved. 2 | //! Observe and validate changes to a CRDT. 3 | //! 4 | //! Sentinels are types that can be used to inspect the changes being applied to a CRDT. They are 5 | //! useful for validating that the changes conform to a schema, or simply to observe the changes 6 | //! for any other purpose (for example, logging, metrics, etc). 7 | //! 8 | //! The main entry point for this module is the [`Sentinel`] trait, which is composed of more 9 | //! specialized traits that can be implemented to observe different kinds of changes. 10 | //! 11 | //! For a testing-oriented example, see the `recording_sentinel` module. 12 | 13 | use crate::crdts::ValueType; 14 | use std::convert::Infallible; 15 | 16 | /// Observes and optionally stops a change being applied to a CRDT. 17 | /// 18 | /// This is the base trait that all Sentinels should implement. Different Sentinels may observe 19 | /// different changes, and different data structures may produce different changes, so we've split 20 | /// the actual behaviour into several specialized traits. 21 | /// 22 | /// This trait should normally be paired with [`Visit`] so that the Sentinel can keep track of 23 | /// which node is currently being modified in the document tree. 24 | /// 25 | /// If Error = Infallible, the Sentinel is referred to as an Observer. If it can produce an error, it 26 | /// may be referred to as a Validator. 27 | pub trait Sentinel { 28 | type Error; 29 | } 30 | 31 | /// Observe when a key is added or removed from the document tree. 32 | /// 33 | /// This is how Sentinels can track when container nodes are created. Register values changes can 34 | /// be tracked via the [`ValueSentinel`] trait. 35 | pub trait KeySentinel: Sentinel { 36 | /// Observe and validate the creation of a new entry under the current path. 37 | /// 38 | /// This method may be called _after_ [`ValueSentinel::set`] for a given entry. 39 | fn create_key(&mut self) -> Result<(), Self::Error> { 40 | Ok(()) 41 | } 42 | 43 | /// Observe and validate the deletion of the entry under the current path. 44 | fn delete_key(&mut self) -> Result<(), Self::Error> { 45 | Ok(()) 46 | } 47 | } 48 | 49 | /// Observes when a value's type changes. 50 | /// 51 | /// This is useful for tracking changes involving container values - particularly, when 52 | /// transitioning a value from a container to register or vice-versa, or when creating empty 53 | /// containers. The first case is because updates that switch to/from register values only produce 54 | /// one [`ValueSentinel`] event, as there is no set/unset counterpart for the container value. 55 | /// This leads to incorrectly interpreting the change as an addition or removal. The second case 56 | /// is because no [`ValueSentinel`] events are produced at all, which leaves the container type 57 | /// ambiguous. In either case these type change events are the only way to get the complete picture. 58 | #[expect(unused_variables)] 59 | pub trait TypeSentinel: Sentinel { 60 | /// Observe and validate setting a type at the current path. 61 | fn set_type(&mut self, value_type: ValueType) -> Result<(), Self::Error> { 62 | Ok(()) 63 | } 64 | 65 | /// Observe and validate unsetting a type at the current path. 66 | fn unset_type(&mut self, value_type: ValueType) -> Result<(), Self::Error> { 67 | Ok(()) 68 | } 69 | } 70 | 71 | /// Observe when values are set or unset at the current path. 72 | /// 73 | /// Updates are represented as a value unset and another one set. There are no ordering 74 | /// guarantees between the calls. 75 | #[expect(unused_variables)] 76 | pub trait ValueSentinel: Sentinel { 77 | /// Observe and validate setting a new value under the current path. 78 | fn set(&mut self, value: &V) -> Result<(), Self::Error> { 79 | Ok(()) 80 | } 81 | 82 | /// Observe and validate unsetting the value under the current path. 83 | fn unset(&mut self, value: V) -> Result<(), Self::Error> { 84 | Ok(()) 85 | } 86 | } 87 | 88 | /// Enables a Sentinel to keep track of document traversal. 89 | /// 90 | /// During a document mutation (typically via [`DotStoreJoin::join`](crate::DotStoreJoin)), the 91 | /// document tree is traversed in a depth-first manner and each map field or array element visited 92 | /// is reported via this interface, so that the Sentinel can update its internal pointer. 93 | /// 94 | /// Typically, you want to implement this for [`String`] (to visit [`OrMap`](crate::OrMap) values) 95 | /// and [`Uid`](crate::crdts::orarray::Uid) (to visit [`OrArray`](crate::OrArray)), for example. 96 | /// 97 | /// NOTE: any nodes in the document tree may be visited, regardless of whether they contain a change. 98 | /// Additionally, nodes that are visited may not exist in the final tree. 99 | #[expect(unused_variables)] 100 | pub trait Visit: Sentinel { 101 | /// Descend into a map field or array element. 102 | fn enter(&mut self, key: &K) -> Result<(), Self::Error> { 103 | Ok(()) 104 | } 105 | /// Backtrack to the parent container. 106 | /// 107 | /// NOTE: may not be called if the Sentinel produces an Err. 108 | fn exit(&mut self) -> Result<(), Self::Error> { 109 | Ok(()) 110 | } 111 | } 112 | 113 | /// A Sentinel that does nothing. 114 | /// 115 | /// This is useful when the join doesn't need any introspection. Using it helps the compiler 116 | /// optimise some code away. 117 | pub struct DummySentinel; 118 | 119 | impl Sentinel for DummySentinel { 120 | type Error = Infallible; 121 | } 122 | 123 | impl KeySentinel for DummySentinel {} 124 | 125 | impl TypeSentinel for DummySentinel {} 126 | 127 | impl Visit for DummySentinel {} 128 | 129 | impl ValueSentinel for DummySentinel {} 130 | 131 | #[cfg(test)] 132 | pub(crate) mod test { 133 | use super::*; 134 | use std::{collections::BTreeMap, fmt}; 135 | 136 | /// A Sentinel that always rejects changes. 137 | pub struct NoChangeValidator; 138 | 139 | impl Sentinel for NoChangeValidator { 140 | type Error = (); 141 | } 142 | 143 | impl Visit for NoChangeValidator {} 144 | 145 | impl KeySentinel for NoChangeValidator { 146 | fn create_key(&mut self) -> Result<(), Self::Error> { 147 | Err(()) 148 | } 149 | 150 | fn delete_key(&mut self) -> Result<(), Self::Error> { 151 | Err(()) 152 | } 153 | } 154 | 155 | impl TypeSentinel for NoChangeValidator { 156 | fn set_type(&mut self, _value_type: ValueType) -> Result<(), Self::Error> { 157 | Err(()) 158 | } 159 | 160 | fn unset_type(&mut self, _value_type: ValueType) -> Result<(), Self::Error> { 161 | Err(()) 162 | } 163 | } 164 | 165 | impl ValueSentinel for NoChangeValidator { 166 | fn set(&mut self, _value: &V) -> Result<(), Self::Error> { 167 | Err(()) 168 | } 169 | 170 | fn unset(&mut self, _value: V) -> Result<(), Self::Error> { 171 | Err(()) 172 | } 173 | } 174 | 175 | /// A Sentinel that counts keys added or removed and rejects other changes. 176 | #[derive(Debug, Default)] 177 | pub struct KeyCountingValidator { 178 | pub added: usize, 179 | pub removed: usize, 180 | } 181 | impl Sentinel for KeyCountingValidator { 182 | type Error = (); 183 | } 184 | impl Visit for KeyCountingValidator {} 185 | impl KeySentinel for KeyCountingValidator { 186 | fn create_key(&mut self) -> Result<(), Self::Error> { 187 | self.added += 1; 188 | Ok(()) 189 | } 190 | 191 | fn delete_key(&mut self) -> Result<(), Self::Error> { 192 | self.removed += 1; 193 | Ok(()) 194 | } 195 | } 196 | impl TypeSentinel for KeyCountingValidator { 197 | fn set_type(&mut self, _value_type: crate::crdts::ValueType) -> Result<(), Self::Error> { 198 | Err(()) 199 | } 200 | 201 | fn unset_type( 202 | &mut self, 203 | _value_type: crate::crdts::ValueType, 204 | ) -> Result<(), Self::Error> { 205 | Err(()) 206 | } 207 | } 208 | impl ValueSentinel for KeyCountingValidator {} 209 | 210 | /// A Sentinel that counts changes to values and rejects other changes. 211 | /// 212 | /// Setting `permissive` to true disables erroring on key and type changes. 213 | #[derive(Debug)] 214 | pub struct ValueCountingValidator { 215 | pub added: BTreeMap, 216 | pub removed: BTreeMap, 217 | path: Vec, 218 | permissive: bool, 219 | } 220 | 221 | impl Default for ValueCountingValidator { 222 | fn default() -> Self { 223 | Self { 224 | added: Default::default(), 225 | removed: Default::default(), 226 | path: Default::default(), 227 | permissive: false, 228 | } 229 | } 230 | } 231 | 232 | impl ValueCountingValidator { 233 | pub fn new(permissive: bool) -> Self { 234 | Self { 235 | permissive, 236 | ..Default::default() 237 | } 238 | } 239 | } 240 | 241 | impl Sentinel for ValueCountingValidator { 242 | type Error = String; 243 | } 244 | 245 | impl Visit for ValueCountingValidator 246 | where 247 | K: std::fmt::Debug, 248 | { 249 | fn enter(&mut self, key: &K) -> Result<(), Self::Error> { 250 | self.path.push(format!("{key:?}")); 251 | Ok(()) 252 | } 253 | 254 | fn exit(&mut self) -> Result<(), Self::Error> { 255 | self.path.pop(); 256 | Ok(()) 257 | } 258 | } 259 | 260 | impl KeySentinel for ValueCountingValidator { 261 | fn create_key(&mut self) -> Result<(), Self::Error> { 262 | self.permissive 263 | .then_some(()) 264 | .ok_or(format!("create_key at {}", self.path.join("/"))) 265 | } 266 | 267 | fn delete_key(&mut self) -> Result<(), Self::Error> { 268 | self.permissive 269 | .then_some(()) 270 | .ok_or(format!("delete_key at {}", self.path.join("/"))) 271 | } 272 | } 273 | 274 | impl TypeSentinel for ValueCountingValidator 275 | where 276 | C: fmt::Debug, 277 | { 278 | fn set_type(&mut self, value_type: crate::crdts::ValueType) -> Result<(), Self::Error> { 279 | self.permissive.then_some(()).ok_or(format!( 280 | "set_type: {value_type:?} at {}", 281 | self.path.join("/") 282 | )) 283 | } 284 | 285 | fn unset_type( 286 | &mut self, 287 | value_type: crate::crdts::ValueType, 288 | ) -> Result<(), Self::Error> { 289 | self.permissive.then_some(()).ok_or(format!( 290 | "unset_type: {value_type:?} at {}", 291 | self.path.join("/") 292 | )) 293 | } 294 | } 295 | 296 | impl ValueSentinel for ValueCountingValidator 297 | where 298 | V: std::fmt::Debug + Ord + Clone, 299 | { 300 | fn set(&mut self, value: &V) -> Result<(), Self::Error> { 301 | *self.added.entry(value.clone()).or_default() += 1; 302 | Ok(()) 303 | } 304 | 305 | fn unset(&mut self, value: V) -> Result<(), Self::Error> { 306 | *self.removed.entry(value).or_default() += 1; 307 | Ok(()) 308 | } 309 | } 310 | } 311 | -------------------------------------------------------------------------------- /LICENSE-APACHE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. -------------------------------------------------------------------------------- /benches/tango.rs: -------------------------------------------------------------------------------- 1 | // (c) Copyright 2025 Helsing GmbH. All rights reserved. 2 | // because we need this below to retain 'static on the borrows of omni 3 | #![allow(clippy::borrow_deref_ref)] 4 | 5 | use dson::{ 6 | CausalContext, CausalDotStore, Dot, Identifier, MvReg, OrArray, OrMap, api, 7 | crdts::{NoExtensionTypes, mvreg::MvRegValue}, 8 | sentinel::DummySentinel, 9 | }; 10 | use std::hint::black_box; 11 | use tango_bench::{IntoBenchmarks, benchmark_fn, tango_benchmarks, tango_main}; 12 | 13 | include!(concat!(env!("OUT_DIR"), "/random_dots.rs")); 14 | 15 | fn array_benchmarks() -> impl IntoBenchmarks { 16 | dson::enable_determinism(); 17 | 18 | let omni_id = Identifier::new(1, 0); 19 | let mut omni = CausalDotStore::>::default(); 20 | for i in 0..255 { 21 | let add = api::array::insert_register( 22 | |cc, id| MvReg::default().write(MvRegValue::Bool(true), cc, id), 23 | i, 24 | )(&omni.store, &omni.context, omni_id); 25 | omni.consume(add, &mut DummySentinel).unwrap(); 26 | } 27 | 28 | let omni: &'static _ = Box::leak(Box::new(omni)); 29 | [ 30 | benchmark_fn("array::unshift", move |b| { 31 | b.iter(move || { 32 | let omni = black_box(&*omni); 33 | api::array::insert_register( 34 | |cc, id| MvReg::default().write(MvRegValue::Bool(true), cc, id), 35 | 0, 36 | )(&omni.store, &omni.context, omni_id) 37 | }) 38 | }), 39 | benchmark_fn("array::delete", move |b| { 40 | b.iter(move || { 41 | let omni = black_box(&*omni); 42 | api::array::delete(128)(&omni.store, &omni.context, omni_id) 43 | }) 44 | }), 45 | benchmark_fn("array::update", move |b| { 46 | b.iter(move || { 47 | let omni = black_box(&*omni); 48 | api::array::apply_to_register( 49 | |old, cc, id| old.write(MvRegValue::Bool(false), cc, id), 50 | 128, 51 | )(&omni.store, &omni.context, omni_id) 52 | }) 53 | }), 54 | benchmark_fn("array::insert", move |b| { 55 | b.iter(move || { 56 | let omni = black_box(&*omni); 57 | api::array::insert_register( 58 | |cc, id| MvReg::default().write(MvRegValue::Bool(false), cc, id), 59 | 128, 60 | )(&omni.store, &omni.context, omni_id) 61 | }) 62 | }), 63 | benchmark_fn("array::push", move |b| { 64 | b.iter(move || { 65 | let omni = black_box(&*omni); 66 | api::array::insert_register( 67 | |cc, id| MvReg::default().write(MvRegValue::Bool(true), cc, id), 68 | omni.store.len(), 69 | )(&omni.store, &omni.context, omni_id) 70 | }) 71 | }), 72 | ] 73 | } 74 | 75 | fn map_benchmarks() -> impl IntoBenchmarks { 76 | dson::enable_determinism(); 77 | 78 | let omni_id = Identifier::new(1, 0); 79 | let mut omni = CausalDotStore::>::default(); 80 | for i in 0..255 { 81 | let add = api::map::apply_to_register( 82 | |_, cc, id| MvReg::default().write(MvRegValue::Bool(true), cc, id), 83 | i.to_string(), 84 | )(&omni.store, &omni.context, omni_id); 85 | omni.consume(add, &mut DummySentinel).unwrap(); 86 | } 87 | 88 | let omni: &'static _ = Box::leak(Box::new(omni)); 89 | [ 90 | benchmark_fn("map::insert", move |b| { 91 | b.iter(move || { 92 | let omni = black_box(&*omni); 93 | api::map::apply_to_register( 94 | |_, cc, id| MvReg::default().write(MvRegValue::Bool(true), cc, id), 95 | "duck".into(), 96 | )(&omni.store, &omni.context, omni_id) 97 | }) 98 | }), 99 | benchmark_fn("map::remove", move |b| { 100 | b.iter(move || { 101 | let omni = black_box(&*omni); 102 | api::map::remove("128")(&omni.store, &omni.context, omni_id) 103 | }) 104 | }), 105 | benchmark_fn("map::update", move |b| { 106 | b.iter(move || { 107 | let omni = black_box(&*omni); 108 | api::map::apply_to_register( 109 | |old, cc, id| old.write(MvRegValue::Bool(true), cc, id), 110 | "128".into(), 111 | )(&omni.store, &omni.context, omni_id) 112 | }) 113 | }), 114 | ] 115 | } 116 | 117 | fn direct_crdt_map_benchmarks() -> impl IntoBenchmarks { 118 | dson::enable_determinism(); 119 | 120 | let omni_id = Identifier::new(1, 0); 121 | let mut omni = CausalDotStore::>::default(); 122 | for i in 0..255 { 123 | let delta = omni.store.apply_to_register( 124 | |reg, ctx, id| reg.write(MvRegValue::Bool(true), ctx, id), 125 | i.to_string(), 126 | &omni.context, 127 | omni_id, 128 | ); 129 | omni.consume(delta, &mut DummySentinel).unwrap(); 130 | } 131 | 132 | let omni: &'static _ = Box::leak(Box::new(omni)); 133 | [ 134 | benchmark_fn("direct-crdt::map::insert", move |b| { 135 | b.iter(move || { 136 | let omni = black_box(&*omni); 137 | omni.store.apply_to_register( 138 | |reg, ctx, id| reg.write(MvRegValue::Bool(true), ctx, id), 139 | "duck".to_string(), 140 | &omni.context, 141 | omni_id, 142 | ) 143 | }) 144 | }), 145 | benchmark_fn("direct-crdt::map::remove", move |b| { 146 | b.iter(move || { 147 | let omni = black_box(&*omni); 148 | omni.store 149 | .remove(&"128".to_string(), &omni.context, omni_id) 150 | }) 151 | }), 152 | benchmark_fn("direct-crdt::map::update", move |b| { 153 | b.iter(move || { 154 | let omni = black_box(&*omni); 155 | omni.store.apply_to_register( 156 | |reg, ctx, id| reg.write(MvRegValue::Bool(true), ctx, id), 157 | "128".to_string(), 158 | &omni.context, 159 | omni_id, 160 | ) 161 | }) 162 | }), 163 | ] 164 | } 165 | 166 | fn register_benchmarks() -> impl IntoBenchmarks { 167 | dson::enable_determinism(); 168 | 169 | let omni_id = Identifier::new(1, 0); 170 | let mut omni = CausalDotStore::::default(); 171 | let write = api::register::write(MvRegValue::Bool(false))(&omni.store, &omni.context, omni_id); 172 | omni.consume(write, &mut DummySentinel).unwrap(); 173 | 174 | let omni: &'static _ = Box::leak(Box::new(omni)); 175 | [ 176 | benchmark_fn("register::write", move |b| { 177 | b.iter(move || { 178 | let omni = black_box(&*omni); 179 | api::register::write(MvRegValue::Bool(true))(&omni.store, &omni.context, omni_id) 180 | }) 181 | }), 182 | benchmark_fn("register::clear", move |b| { 183 | b.iter(move || { 184 | let omni = black_box(&*omni); 185 | api::register::clear()(&omni.store, &omni.context, omni_id) 186 | }) 187 | }), 188 | ] 189 | } 190 | 191 | fn transaction_map_benchmarks() -> impl IntoBenchmarks { 192 | dson::enable_determinism(); 193 | let omni_id = Identifier::new(1, 0); 194 | 195 | // Setup for single-op benchmarks (no pre-population, isolate transaction overhead) 196 | [ 197 | benchmark_fn("transaction::map::insert-empty", move |b| { 198 | b.iter(move || { 199 | let mut omni = CausalDotStore::>::default(); 200 | let mut tx = black_box(&mut omni).transact(omni_id); 201 | tx.write_register("duck".to_string(), MvRegValue::Bool(true)); 202 | black_box(tx.commit()) 203 | }) 204 | }), 205 | benchmark_fn("transaction::map::insert-with-setup", move |b| { 206 | b.iter(move || { 207 | let mut omni = CausalDotStore::>::default(); 208 | for i in 0..255 { 209 | let delta = omni.store.apply_to_register( 210 | |reg, ctx, id| reg.write(MvRegValue::Bool(true), ctx, id), 211 | i.to_string(), 212 | &omni.context, 213 | omni_id, 214 | ); 215 | omni.consume(delta, &mut DummySentinel).unwrap(); 216 | } 217 | let mut tx = black_box(&mut omni).transact(omni_id); 218 | tx.write_register("duck".to_string(), MvRegValue::Bool(true)); 219 | black_box(tx.commit()) 220 | }) 221 | }), 222 | benchmark_fn("transaction::map::remove-empty", move |b| { 223 | b.iter(move || { 224 | let mut omni = CausalDotStore::>::default(); 225 | let mut tx = black_box(&mut omni).transact(omni_id); 226 | tx.remove("128".to_string()); 227 | black_box(tx.commit()) 228 | }) 229 | }), 230 | benchmark_fn("transaction::map::update-empty", move |b| { 231 | b.iter(move || { 232 | let mut omni = CausalDotStore::>::default(); 233 | let mut tx = black_box(&mut omni).transact(omni_id); 234 | tx.write_register("128".to_string(), MvRegValue::Bool(true)); 235 | black_box(tx.commit()) 236 | }) 237 | }), 238 | ] 239 | } 240 | 241 | fn cc_benchmarks() -> impl IntoBenchmarks { 242 | dson::enable_determinism(); 243 | 244 | let big1 = CausalContext::from_iter(BIG1.iter().copied()); 245 | let big2 = CausalContext::from_iter(BIG2.iter().copied()); 246 | let small1 = CausalContext::from_iter(SMALL1.iter().copied()); 247 | let small2 = CausalContext::from_iter(SMALL2.iter().copied()); 248 | 249 | let big1: &'static _ = Box::leak(Box::new(big1)); 250 | let big2: &'static _ = Box::leak(Box::new(big2)); 251 | let small1: &'static _ = Box::leak(Box::new(small1)); 252 | let small2: &'static _ = Box::leak(Box::new(small2)); 253 | [ 254 | benchmark_fn("causal-context::join::both_same_small", move |b| { 255 | b.iter(|| { 256 | let mut left = black_box(small1.clone()); 257 | let right = black_box(&*small1); 258 | left.union(right) 259 | }) 260 | }), 261 | benchmark_fn("causal-context::join::both_small", move |b| { 262 | b.iter(|| { 263 | let mut left = black_box(small1.clone()); 264 | let right = black_box(&*small2); 265 | left.union(right) 266 | }) 267 | }), 268 | benchmark_fn("causal-context::join::left_big", move |b| { 269 | b.iter(|| { 270 | let mut left = black_box(big1.clone()); 271 | let right = black_box(&*small1); 272 | left.union(right) 273 | }) 274 | }), 275 | benchmark_fn("causal-context::join::right_big", move |b| { 276 | b.iter(|| { 277 | let mut left = black_box(small1.clone()); 278 | let right = black_box(&*big1); 279 | left.union(right) 280 | }) 281 | }), 282 | benchmark_fn("causal-context::join::both_big", move |b| { 283 | b.iter(|| { 284 | let mut left = black_box(big1.clone()); 285 | let right = black_box(&*big2); 286 | left.union(right) 287 | }) 288 | }), 289 | benchmark_fn("causal-context::join::both_same_big", move |b| { 290 | b.iter(|| { 291 | let mut left = black_box(big1.clone()); 292 | let right = black_box(&*big1); 293 | left.union(right) 294 | }) 295 | }), 296 | ] 297 | } 298 | 299 | tango_benchmarks!( 300 | array_benchmarks(), 301 | map_benchmarks(), 302 | direct_crdt_map_benchmarks(), 303 | transaction_map_benchmarks(), 304 | register_benchmarks(), 305 | cc_benchmarks() 306 | ); 307 | tango_main!(); 308 | -------------------------------------------------------------------------------- /tests/transaction_rollback.rs: -------------------------------------------------------------------------------- 1 | //! Tests for transaction rollback behavior. 2 | //! 3 | //! When a transaction is dropped without calling commit(), all changes 4 | //! should be rolled back, leaving the original store unchanged. 5 | 6 | use dson::{ 7 | CausalDotStore, Identifier, OrArray, OrMap, 8 | crdts::mvreg::MvRegValue, 9 | transaction::{ArrayTransaction, CrdtValue, MapTransaction}, 10 | }; 11 | 12 | #[test] 13 | fn map_transaction_rollback_register() { 14 | let mut store = CausalDotStore::>::default(); 15 | let id = Identifier::new(0, 0); 16 | 17 | // Create initial state 18 | { 19 | let mut tx = MapTransaction::new(&mut store, id); 20 | tx.write_register("name", MvRegValue::String("Alice".to_string())); 21 | let _delta = tx.commit(); 22 | } 23 | 24 | // Clone store to compare later 25 | let original_store = store.clone(); 26 | 27 | // Start transaction and make changes but DON'T commit 28 | { 29 | let mut tx = MapTransaction::new(&mut store, id); 30 | tx.write_register("name", MvRegValue::String("Bob".to_string())); 31 | tx.write_register("age", MvRegValue::U64(30)); 32 | // Drop tx without calling commit() - should rollback 33 | } 34 | 35 | // Store should be unchanged 36 | assert_eq!(store, original_store); 37 | 38 | // Verify original value still present 39 | let tx = MapTransaction::new(&mut store, id); 40 | match tx.get(&"name".to_string()) { 41 | Some(CrdtValue::Register(reg)) => { 42 | use dson::crdts::snapshot::ToValue; 43 | assert_eq!( 44 | reg.value().unwrap(), 45 | &MvRegValue::String("Alice".to_string()) 46 | ); 47 | } 48 | _ => panic!("Expected register with original value"), 49 | } 50 | 51 | // Verify new key was NOT added 52 | assert!(tx.get(&"age".to_string()).is_none()); 53 | } 54 | 55 | #[test] 56 | fn map_transaction_rollback_nested_map() { 57 | let mut store = CausalDotStore::>::default(); 58 | let id = Identifier::new(0, 0); 59 | 60 | // Create initial state with nested map 61 | { 62 | let mut tx = MapTransaction::new(&mut store, id); 63 | tx.in_map("config", |cfg_tx| { 64 | cfg_tx.write_register("version", MvRegValue::U64(1)); 65 | }); 66 | let _delta = tx.commit(); 67 | } 68 | 69 | let original_store = store.clone(); 70 | 71 | // Modify nested map but don't commit 72 | { 73 | let mut tx = MapTransaction::new(&mut store, id); 74 | tx.in_map("config", |cfg_tx| { 75 | cfg_tx.write_register("version", MvRegValue::U64(2)); 76 | cfg_tx.write_register("debug", MvRegValue::Bool(true)); 77 | }); 78 | // Drop without commit 79 | } 80 | 81 | // Store should be unchanged 82 | assert_eq!(store, original_store); 83 | 84 | // Verify original nested value 85 | use dson::crdts::snapshot::ToValue; 86 | let config = store.store.get(&"config".to_string()).unwrap(); 87 | let version = config.map.get(&"version".to_string()).unwrap(); 88 | assert_eq!(version.reg.value().unwrap(), &MvRegValue::U64(1)); 89 | assert!(config.map.get(&"debug".to_string()).is_none()); 90 | } 91 | 92 | #[test] 93 | fn map_transaction_rollback_array() { 94 | let mut store = CausalDotStore::>::default(); 95 | let id = Identifier::new(0, 0); 96 | 97 | // Create initial state with array 98 | { 99 | let mut tx = MapTransaction::new(&mut store, id); 100 | tx.in_array("items", |arr_tx| { 101 | arr_tx.insert_register(0, MvRegValue::String("first".to_string())); 102 | }); 103 | let _delta = tx.commit(); 104 | } 105 | 106 | let original_store = store.clone(); 107 | 108 | // Modify array but don't commit 109 | { 110 | let mut tx = MapTransaction::new(&mut store, id); 111 | tx.in_array("items", |arr_tx| { 112 | arr_tx.insert_register(1, MvRegValue::String("second".to_string())); 113 | arr_tx.insert_register(2, MvRegValue::String("third".to_string())); 114 | }); 115 | // Drop without commit 116 | } 117 | 118 | // Store should be unchanged 119 | assert_eq!(store, original_store); 120 | 121 | // Verify array still has only one element 122 | use dson::crdts::snapshot::ToValue; 123 | let items = store.store.get(&"items".to_string()).unwrap(); 124 | assert_eq!(items.array.len(), 1); 125 | assert_eq!( 126 | items.array.get(0).unwrap().reg.value().unwrap(), 127 | &MvRegValue::String("first".to_string()) 128 | ); 129 | } 130 | 131 | #[test] 132 | fn array_transaction_rollback_register() { 133 | let mut store = CausalDotStore::::default(); 134 | let id = Identifier::new(0, 0); 135 | 136 | // Create initial state 137 | { 138 | let mut tx = ArrayTransaction::new(&mut store, id); 139 | tx.insert_register(0, MvRegValue::U64(1)); 140 | tx.insert_register(1, MvRegValue::U64(2)); 141 | let _delta = tx.commit(); 142 | } 143 | 144 | let original_store = store.clone(); 145 | 146 | // Modify array but don't commit 147 | { 148 | let mut tx = ArrayTransaction::new(&mut store, id); 149 | tx.insert_register(2, MvRegValue::U64(3)); 150 | tx.insert_register(3, MvRegValue::U64(4)); 151 | // Drop without commit 152 | } 153 | 154 | // Store should be unchanged 155 | assert_eq!(store, original_store); 156 | 157 | // Verify array still has only 2 elements 158 | use dson::crdts::snapshot::ToValue; 159 | assert_eq!(store.store.len(), 2); 160 | assert_eq!( 161 | store.store.get(0).unwrap().reg.value().unwrap(), 162 | &MvRegValue::U64(1) 163 | ); 164 | assert_eq!( 165 | store.store.get(1).unwrap().reg.value().unwrap(), 166 | &MvRegValue::U64(2) 167 | ); 168 | } 169 | 170 | #[test] 171 | fn array_transaction_rollback_nested_array() { 172 | let mut store = CausalDotStore::::default(); 173 | let id = Identifier::new(0, 0); 174 | 175 | // Create initial state with nested array 176 | { 177 | let mut tx = ArrayTransaction::new(&mut store, id); 178 | tx.insert_array(0, |inner_tx| { 179 | inner_tx.insert_register(0, MvRegValue::U64(1)); 180 | }); 181 | let _delta = tx.commit(); 182 | } 183 | 184 | let original_store = store.clone(); 185 | 186 | // Modify nested array but don't commit 187 | { 188 | let mut tx = ArrayTransaction::new(&mut store, id); 189 | tx.insert_array(1, |inner_tx| { 190 | inner_tx.insert_register(0, MvRegValue::U64(2)); 191 | }); 192 | // Drop without commit 193 | } 194 | 195 | // Store should be unchanged 196 | assert_eq!(store, original_store); 197 | 198 | // Verify only original nested array exists 199 | use dson::crdts::snapshot::ToValue; 200 | assert_eq!(store.store.len(), 1); 201 | let nested = &store.store.get(0).unwrap().array; 202 | assert_eq!(nested.len(), 1); 203 | assert_eq!( 204 | nested.get(0).unwrap().reg.value().unwrap(), 205 | &MvRegValue::U64(1) 206 | ); 207 | } 208 | 209 | #[test] 210 | fn array_transaction_rollback_map() { 211 | let mut store = CausalDotStore::::default(); 212 | let id = Identifier::new(0, 0); 213 | 214 | // Create initial state with map 215 | { 216 | let mut tx = ArrayTransaction::new(&mut store, id); 217 | tx.insert_map(0, |map_tx| { 218 | map_tx.write_register("id", MvRegValue::U64(1)); 219 | }); 220 | let _delta = tx.commit(); 221 | } 222 | 223 | let original_store = store.clone(); 224 | 225 | // Modify map but don't commit 226 | { 227 | let mut tx = ArrayTransaction::new(&mut store, id); 228 | tx.insert_map(1, |map_tx| { 229 | map_tx.write_register("id", MvRegValue::U64(2)); 230 | }); 231 | // Drop without commit 232 | } 233 | 234 | // Store should be unchanged 235 | assert_eq!(store, original_store); 236 | 237 | // Verify only original map exists 238 | use dson::crdts::snapshot::ToValue; 239 | assert_eq!(store.store.len(), 1); 240 | let map = &store.store.get(0).unwrap().map; 241 | let id_val = map.get(&"id".to_string()).unwrap(); 242 | assert_eq!(id_val.reg.value().unwrap(), &MvRegValue::U64(1)); 243 | } 244 | 245 | #[test] 246 | fn map_transaction_commit_after_rollback() { 247 | // Ensure that after a rollback, a new transaction can still commit successfully 248 | let mut store = CausalDotStore::>::default(); 249 | let id = Identifier::new(0, 0); 250 | 251 | // First transaction: commit 252 | { 253 | let mut tx = MapTransaction::new(&mut store, id); 254 | tx.write_register("count", MvRegValue::U64(1)); 255 | let _delta = tx.commit(); 256 | } 257 | 258 | // Second transaction: rollback 259 | { 260 | let mut tx = MapTransaction::new(&mut store, id); 261 | tx.write_register("count", MvRegValue::U64(999)); 262 | // Drop without commit 263 | } 264 | 265 | // Third transaction: commit 266 | { 267 | let mut tx = MapTransaction::new(&mut store, id); 268 | tx.write_register("count", MvRegValue::U64(2)); 269 | let _delta = tx.commit(); 270 | } 271 | 272 | // Verify final value is from third transaction 273 | let tx = MapTransaction::new(&mut store, id); 274 | match tx.get(&"count".to_string()) { 275 | Some(CrdtValue::Register(reg)) => { 276 | use dson::crdts::snapshot::ToValue; 277 | assert_eq!(reg.value().unwrap(), &MvRegValue::U64(2)); 278 | } 279 | _ => panic!("Expected register"), 280 | } 281 | } 282 | 283 | #[test] 284 | fn array_transaction_commit_after_rollback() { 285 | // Ensure that after a rollback, a new transaction can still commit successfully 286 | let mut store = CausalDotStore::::default(); 287 | let id = Identifier::new(0, 0); 288 | 289 | // First transaction: commit 290 | { 291 | let mut tx = ArrayTransaction::new(&mut store, id); 292 | tx.insert_register(0, MvRegValue::U64(1)); 293 | let _delta = tx.commit(); 294 | } 295 | 296 | // Second transaction: rollback 297 | { 298 | let mut tx = ArrayTransaction::new(&mut store, id); 299 | tx.insert_register(1, MvRegValue::U64(999)); 300 | // Drop without commit 301 | } 302 | 303 | // Third transaction: commit 304 | { 305 | let mut tx = ArrayTransaction::new(&mut store, id); 306 | tx.insert_register(1, MvRegValue::U64(2)); 307 | let _delta = tx.commit(); 308 | } 309 | 310 | // Verify array has both committed values 311 | use dson::crdts::snapshot::ToValue; 312 | assert_eq!(store.store.len(), 2); 313 | assert_eq!( 314 | store.store.get(0).unwrap().reg.value().unwrap(), 315 | &MvRegValue::U64(1) 316 | ); 317 | assert_eq!( 318 | store.store.get(1).unwrap().reg.value().unwrap(), 319 | &MvRegValue::U64(2) 320 | ); 321 | } 322 | 323 | #[test] 324 | fn nested_transaction_panic_safety() { 325 | use dson::{ 326 | CausalDotStore, Identifier, OrMap, crdts::mvreg::MvRegValue, transaction::MapTransaction, 327 | }; 328 | 329 | // Verify that if a nested transaction panics, the parent transaction 330 | // is not corrupted and can still be rolled back cleanly 331 | let mut store = CausalDotStore::>::default(); 332 | let id = Identifier::new(0, 0); 333 | 334 | // Create initial state 335 | { 336 | let mut tx = MapTransaction::new(&mut store, id); 337 | tx.write_register("root", MvRegValue::U64(1)); 338 | let _delta = tx.commit(); 339 | } 340 | 341 | let original_store = store.clone(); 342 | 343 | // Transaction with nested panic 344 | let result = std::panic::catch_unwind(std::panic::AssertUnwindSafe(|| { 345 | let mut tx = MapTransaction::new(&mut store, id); 346 | tx.write_register("root", MvRegValue::U64(2)); 347 | 348 | tx.in_map("nested", |nested_tx| { 349 | nested_tx.write_register("field", MvRegValue::String("test".to_string())); 350 | panic!("Simulated panic in nested transaction"); 351 | }); 352 | 353 | #[allow(unreachable_code)] 354 | tx.commit() 355 | })); 356 | 357 | // Verify panic occurred 358 | assert!(result.is_err()); 359 | 360 | // Store should be unchanged - automatic rollback 361 | assert_eq!(store, original_store); 362 | 363 | // Verify original value still present 364 | use dson::crdts::snapshot::ToValue; 365 | let val = store.store.get(&"root".to_string()).unwrap(); 366 | assert_eq!(val.reg.value().unwrap(), &MvRegValue::U64(1)); 367 | 368 | // Verify nested map was not created 369 | assert!(store.store.get(&"nested".to_string()).is_none()); 370 | } 371 | -------------------------------------------------------------------------------- /src/crdts/test_util/arbitrary_delta_impls/orarray.rs: -------------------------------------------------------------------------------- 1 | // (c) Copyright 2025 Helsing GmbH. All rights reserved. 2 | use super::ValueDelta; 3 | use crate::{ 4 | CausalContext, CausalDotStore, DotStore, Identifier, MvReg, OrArray, OrMap, 5 | crdts::{ 6 | NoExtensionTypes, Value, 7 | orarray::Position, 8 | test_util::{ArbitraryDelta, Delta, KeyTracker}, 9 | }, 10 | }; 11 | use quickcheck::{Arbitrary, Gen}; 12 | use std::{fmt, ops::RangeBounds}; 13 | 14 | // NOTE: Box is needed here to allow arbitrary nesting, otherwise the type isn't Sized. 15 | // This is because `ValueDelta` itself contains `ArrayOp`. 16 | #[cfg_attr(feature = "serde", derive(::serde::Deserialize, ::serde::Serialize))] 17 | #[derive(Debug, Clone)] 18 | pub(crate) enum ArrayOp { 19 | Insert(usize, Position, Box), 20 | Update(usize, Position, Box), 21 | Delete(usize), 22 | Move(usize, Position), 23 | Clear, 24 | } 25 | 26 | impl fmt::Display for ArrayOp { 27 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 28 | match self { 29 | Self::Insert(keyi, _, _) => write!(f, "insert key #{keyi}"), 30 | Self::Update(keyi, _, _) => write!(f, "updates key #{keyi}"), 31 | Self::Delete(keyi) => write!(f, "deletes key #{keyi}"), 32 | Self::Move(keyi, _) => write!(f, "moves key #{keyi}"), 33 | Self::Clear => write!(f, "clears the map"), 34 | } 35 | } 36 | } 37 | 38 | impl Delta for ArrayOp { 39 | type DS = OrArray; 40 | 41 | fn depends_on_keyi_in>(&self, range: R) -> bool { 42 | match *self { 43 | Self::Insert(keyi, _, _) 44 | | Self::Update(keyi, _, _) 45 | | Self::Delete(keyi) 46 | | Self::Move(keyi, _) => range.contains(&keyi), 47 | Self::Clear => false, 48 | } 49 | } 50 | 51 | fn into_crdt( 52 | self, 53 | ds: &Self::DS, 54 | cc: &CausalContext, 55 | id: Identifier, 56 | keys: &mut KeyTracker, 57 | ) -> CausalDotStore { 58 | match self { 59 | Self::Insert(expected_keyi, p, v) => { 60 | assert_eq!(expected_keyi, keys.len()); 61 | let keyi = expected_keyi; 62 | let cc = cc.clone(); 63 | let uid = cc.next_dot_for(id).into(); 64 | let mut inner_keys = KeyTracker::default(); 65 | let crdt = ds.insert( 66 | uid, 67 | |cc, id| match *v { 68 | ValueDelta::Map(m) => m 69 | .into_crdt(&<_>::default(), cc, id, &mut inner_keys) 70 | .map_store(Value::Map), 71 | ValueDelta::Array(a) => a 72 | .into_crdt(&<_>::default(), cc, id, &mut inner_keys) 73 | .map_store(Value::Array), 74 | ValueDelta::Register(r) => r 75 | .into_crdt(&<_>::default(), cc, id, &mut inner_keys) 76 | .map_store(Value::Register), 77 | }, 78 | p, 79 | &cc, 80 | id, 81 | ); 82 | keys.inner_keys.push(inner_keys); 83 | keys.array_keys.insert(uid, keyi); 84 | crdt 85 | } 86 | Self::Update(keyi, p, v) => { 87 | let inner_keys = &mut keys.inner_keys[keyi]; 88 | let uid = *keys.array_keys.get_by_right(&keyi).unwrap(); 89 | ds.apply( 90 | uid, 91 | |old, cc, id| match *v { 92 | ValueDelta::Map(m) => m 93 | .into_crdt(&old.map, cc, id, inner_keys) 94 | .map_store(Value::Map), 95 | ValueDelta::Array(a) => a 96 | .into_crdt(&old.array, cc, id, inner_keys) 97 | .map_store(Value::Array), 98 | ValueDelta::Register(r) => r 99 | .into_crdt(&old.reg, cc, id, inner_keys) 100 | .map_store(Value::Register), 101 | }, 102 | p, 103 | cc, 104 | id, 105 | ) 106 | } 107 | Self::Delete(keyi) => { 108 | let uid = *keys.array_keys.get_by_right(&keyi).unwrap(); 109 | ds.delete(uid, cc, id) 110 | } 111 | Self::Move(keyi, p) => { 112 | let uid = *keys.array_keys.get_by_right(&keyi).unwrap(); 113 | ds.mv(uid, p, cc, id) 114 | } 115 | Self::Clear => ds.clear(cc, id), 116 | } 117 | } 118 | } 119 | 120 | impl ArbitraryDelta for OrArray { 121 | type Delta = ArrayOp; 122 | 123 | fn arbitrary_delta( 124 | &self, 125 | cc: &CausalContext, 126 | id: Identifier, 127 | keys: &mut KeyTracker, 128 | g: &mut Gen, 129 | depth: usize, 130 | ) -> (Self::Delta, CausalDotStore) { 131 | // NOTE: see the outer_remove_vs_inner_mv test for why we need this 132 | let valid_keys: Vec<_> = self 133 | .0 134 | .iter() 135 | .filter_map(|(k, v)| (!v.value.is_bottom()).then_some(k)) 136 | .collect(); 137 | 138 | let op = if valid_keys.is_empty() && self.0.is_empty() { 139 | g.choose(&["insert", "clear"]) 140 | } else if valid_keys.is_empty() { 141 | g.choose(&["insert", "delete", "clear"]) 142 | } else { 143 | g.choose(&["insert", "update", "delete", "move", "clear"]) 144 | }; 145 | let indent = " ".repeat(depth); 146 | 147 | match op.copied().unwrap() { 148 | "insert" => { 149 | let uid = cc.next_dot_for(id).into(); 150 | let kind = if g.size() <= 1 { 151 | "register" 152 | } else { 153 | g.choose(&["map", "array", "register"]).copied().unwrap() 154 | }; 155 | let keyi = keys.add_array_key(uid); 156 | eprintln!("{indent} -> inserting #{keyi} ({uid:?})"); 157 | let inner_keys = &mut keys.inner_keys[keyi]; 158 | let p = Position::arbitrary(g); 159 | let mut value_delta = None; 160 | let crdt = self.insert( 161 | uid, 162 | |cc, id| { 163 | eprintln!("{indent} -> generating inner {kind} operation"); 164 | let (vd, value_crdt) = match kind { 165 | "map" => { 166 | let mut g = Gen::new(g.size() / 2); 167 | let g = &mut g; 168 | let (delta, crdt) = OrMap::arbitrary_delta( 169 | &<_>::default(), 170 | cc, 171 | id, 172 | inner_keys, 173 | g, 174 | depth + 1, 175 | ); 176 | (ValueDelta::Map(delta), crdt.map_store(Value::Map)) 177 | } 178 | "array" => { 179 | let mut g = Gen::new(g.size() / 2); 180 | let g = &mut g; 181 | let (delta, crdt) = OrArray::arbitrary_delta( 182 | &<_>::default(), 183 | cc, 184 | id, 185 | inner_keys, 186 | g, 187 | depth + 1, 188 | ); 189 | (ValueDelta::Array(delta), crdt.map_store(Value::Array)) 190 | } 191 | "register" => { 192 | let (delta, crdt) = MvReg::arbitrary_delta( 193 | &<_>::default(), 194 | cc, 195 | id, 196 | inner_keys, 197 | g, 198 | depth + 1, 199 | ); 200 | (ValueDelta::Register(delta), crdt.map_store(Value::Register)) 201 | } 202 | kind => unreachable!("need match arm for '{kind}'"), 203 | }; 204 | value_delta = Some(vd); 205 | value_crdt 206 | }, 207 | p, 208 | cc, 209 | id, 210 | ); 211 | ( 212 | ArrayOp::Insert( 213 | keyi, 214 | p, 215 | Box::new(value_delta.expect("insert closure is always called")), 216 | ), 217 | crdt, 218 | ) 219 | } 220 | "update" => { 221 | let uid = **g 222 | .choose(&valid_keys) 223 | .expect("this arm is only taken if non-empty"); 224 | // TODO: how should this handle the case of concurrent inserts of the same 225 | // key, which will imply that a single key has _multiple_ keyi. 226 | let keyi = *keys.array_keys.get_by_left(&uid).unwrap(); 227 | eprintln!("{indent} -> updating #{keyi} ({uid:?})"); 228 | let inner_keys = &mut keys.inner_keys[keyi]; 229 | let p = Position::arbitrary(g); 230 | 231 | // NOTE: this _may_ change the type -- that is intentional! test thoroughly. 232 | let mut value_delta = None; 233 | let crdt = self.apply( 234 | uid, 235 | |old, cc, id| { 236 | let kind = if g.size() <= 1 { 237 | "register" 238 | } else { 239 | g.choose(&["map", "array", "register"]).copied().unwrap() 240 | }; 241 | eprintln!("{indent} -> generating inner {kind} operation"); 242 | let (vd, value_crdt) = match kind { 243 | "map" => { 244 | let mut g = Gen::new(g.size() / 2); 245 | let g = &mut g; 246 | let (delta, crdt) = OrMap::arbitrary_delta( 247 | &old.map, 248 | cc, 249 | id, 250 | inner_keys, 251 | g, 252 | depth + 1, 253 | ); 254 | (ValueDelta::Map(delta), crdt.map_store(Value::Map)) 255 | } 256 | "array" => { 257 | let mut g = Gen::new(g.size() / 2); 258 | let g = &mut g; 259 | let (delta, crdt) = OrArray::arbitrary_delta( 260 | &old.array, 261 | cc, 262 | id, 263 | inner_keys, 264 | g, 265 | depth + 1, 266 | ); 267 | (ValueDelta::Array(delta), crdt.map_store(Value::Array)) 268 | } 269 | "register" => { 270 | let (delta, crdt) = MvReg::arbitrary_delta( 271 | &old.reg, 272 | cc, 273 | id, 274 | inner_keys, 275 | g, 276 | depth + 1, 277 | ); 278 | (ValueDelta::Register(delta), crdt.map_store(Value::Register)) 279 | } 280 | kind => unreachable!("need match arm for '{kind}'"), 281 | }; 282 | value_delta = Some(vd); 283 | value_crdt 284 | }, 285 | p, 286 | cc, 287 | id, 288 | ); 289 | ( 290 | ArrayOp::Update( 291 | keyi, 292 | p, 293 | Box::new(value_delta.expect("apply closure is always called")), 294 | ), 295 | crdt, 296 | ) 297 | } 298 | "delete" => { 299 | // NOTE: we specifically use the whole range of keys here, not just 300 | // "valid_keys", since we want to test what happens if a bottom-value element is 301 | // deleted. 302 | let mut uids = self.0.keys(); 303 | let uidi = usize::arbitrary(g) % uids.len(); 304 | let uid = *uids 305 | .nth(uidi) 306 | .expect("this arm is only taken if non-empty, and n is % len"); 307 | let keyi = *keys.array_keys.get_by_left(&uid).unwrap(); 308 | eprintln!("{indent} -> deleting #{keyi} ({uid:?})"); 309 | (ArrayOp::Delete(keyi), self.delete(uid, cc, id)) 310 | } 311 | "move" => { 312 | let uid = **g 313 | .choose(&valid_keys) 314 | .expect("this arm is only taken if non-empty"); 315 | let keyi = *keys.array_keys.get_by_left(&uid).unwrap(); 316 | eprintln!("{indent} -> moving #{keyi} ({uid:?})"); 317 | let p = Position::arbitrary(g); 318 | (ArrayOp::Move(keyi, p), self.mv(uid, p, cc, id)) 319 | } 320 | "clear" => { 321 | eprintln!("{indent} -> clearing array"); 322 | (ArrayOp::Clear, self.clear(cc, id)) 323 | } 324 | op => unreachable!("need match arm for '{op}'"), 325 | } 326 | } 327 | } 328 | --------------------------------------------------------------------------------