├── .gitignore ├── docs ├── static │ ├── .nojekyll │ └── img │ │ ├── favicon.ico │ │ ├── docusaurus.png │ │ └── docusaurus-social-card.jpg ├── babel.config.js ├── src │ ├── components │ │ └── HomepageFeatures │ │ │ ├── styles.module.css │ │ │ └── index.js │ └── css │ │ └── custom.css ├── flake.nix ├── .gitignore ├── sidebars.js ├── README.md ├── flake.lock ├── package.json ├── docs │ ├── intro.md │ ├── developer_journey │ │ ├── event_storming.md │ │ ├── developer_journey.md │ │ ├── cqrs_and_event_sourcing.md │ │ ├── handle_concurrency_conflicts.md │ │ └── add_new_feature.md │ ├── stream_query.md │ ├── decision.md │ ├── faq.md │ └── postgres.md └── docusaurus.config.js ├── disintegrate ├── LICENSE ├── README.md ├── src │ ├── listener.rs │ ├── lib.rs │ ├── domain_identifier.rs │ ├── event_store.rs │ ├── event.rs │ └── testing.rs └── Cargo.toml ├── .env ├── examples ├── cart │ ├── .env │ ├── Cargo.toml │ └── src │ │ ├── main.rs │ │ ├── event.rs │ │ └── cart.rs ├── banking │ ├── .env │ ├── Cargo.toml │ ├── tests │ │ └── validation_query_test.js │ └── src │ │ └── main.rs └── courses │ ├── .env │ ├── src │ ├── lib.rs │ ├── application │ │ ├── queries.rs │ │ └── commands.rs │ ├── application.rs │ ├── domain.rs │ ├── main.rs │ ├── domain │ │ ├── unsubscription.rs │ │ ├── student.rs │ │ └── course.rs │ ├── serde.rs │ ├── read_model.rs │ └── grpc.rs │ ├── build.rs │ ├── proto │ ├── event.proto │ └── api.proto │ ├── Cargo.toml │ ├── README.md │ └── tests │ └── subscriptions_concurrency_test.js ├── disintegrate-postgres ├── src │ ├── event_store │ │ └── sql │ │ │ ├── idx_event_type.sql │ │ │ ├── idx_event_sequence_committed.sql │ │ │ ├── idx_event_sequence_type.sql │ │ │ ├── table_event.sql │ │ │ ├── table_event_sequence.sql │ │ │ ├── fn_event_store_begin_epoch.sql │ │ │ └── fn_event_store_current_epoch.sql │ ├── listener │ │ ├── sql │ │ │ ├── trigger_notify_event_listener.sql │ │ │ ├── table_event_listener.sql │ │ │ └── fn_notify_event_listener.sql │ │ └── id_indexer.rs │ ├── snapshotter │ │ ├── sql │ │ │ └── table_snapshot.sql │ │ └── tests.rs │ ├── error.rs │ ├── lib.rs │ └── snapshotter.rs └── Cargo.toml ├── .github ├── dependabot.yml └── workflows │ ├── test-docs.yml │ ├── deploy-docs.yml │ └── ci.yml ├── disintegrate-serde ├── src │ ├── lib.rs │ ├── serde.rs │ └── serde │ │ ├── protobuf.rs │ │ ├── json.rs │ │ ├── messagepack.rs │ │ ├── prost.rs │ │ └── avro.rs └── Cargo.toml ├── docker-compose.yml ├── Cargo.toml ├── disintegrate-macros ├── Cargo.toml ├── src │ ├── symbol.rs │ ├── lib.rs │ ├── state_query.rs │ └── event │ │ └── stream.rs └── tests │ ├── state_query.rs │ └── event.rs ├── LICENSE └── CONTRIBUTING.md /.gitignore: -------------------------------------------------------------------------------- 1 | target/ 2 | -------------------------------------------------------------------------------- /docs/static/.nojekyll: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /disintegrate/LICENSE: -------------------------------------------------------------------------------- 1 | ../LICENSE -------------------------------------------------------------------------------- /disintegrate/README.md: -------------------------------------------------------------------------------- 1 | ../README.md -------------------------------------------------------------------------------- /.env: -------------------------------------------------------------------------------- 1 | DATABASE_URL=postgres://postgres:postgres@localhost:5432/event_store 2 | -------------------------------------------------------------------------------- /docs/static/img/favicon.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/disintegrate-es/disintegrate/HEAD/docs/static/img/favicon.ico -------------------------------------------------------------------------------- /docs/babel.config.js: -------------------------------------------------------------------------------- 1 | module.exports = { 2 | presets: [require.resolve('@docusaurus/core/lib/babel/preset')], 3 | }; 4 | -------------------------------------------------------------------------------- /examples/cart/.env: -------------------------------------------------------------------------------- 1 | PGHOST=localhost 2 | PGPORT=5432 3 | PGUSER=postgres 4 | PGPASSWORD=postgres 5 | PGSSLMODE=disabled 6 | -------------------------------------------------------------------------------- /docs/static/img/docusaurus.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/disintegrate-es/disintegrate/HEAD/docs/static/img/docusaurus.png -------------------------------------------------------------------------------- /examples/banking/.env: -------------------------------------------------------------------------------- 1 | PGHOST=localhost 2 | PGPORT=5432 3 | PGUSER=postgres 4 | PGPASSWORD=postgres 5 | PGSSLMODE=disabled 6 | -------------------------------------------------------------------------------- /examples/courses/.env: -------------------------------------------------------------------------------- 1 | PGHOST=localhost 2 | PGPORT=5432 3 | PGUSER=postgres 4 | PGPASSWORD=postgres 5 | PGSSLMODE=disabled 6 | -------------------------------------------------------------------------------- /disintegrate-postgres/src/event_store/sql/idx_event_type.sql: -------------------------------------------------------------------------------- 1 | CREATE INDEX IF NOT EXISTS idx_event_type_btree ON event USING btree (event_type); 2 | -------------------------------------------------------------------------------- /docs/static/img/docusaurus-social-card.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/disintegrate-es/disintegrate/HEAD/docs/static/img/docusaurus-social-card.jpg -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | - package-ecosystem: "cargo" 4 | directory: "/" 5 | schedule: 6 | interval: "weekly" 7 | -------------------------------------------------------------------------------- /disintegrate-postgres/src/event_store/sql/idx_event_sequence_committed.sql: -------------------------------------------------------------------------------- 1 | CREATE INDEX IF NOT EXISTS idx_event_sequence_committed ON event_sequence(committed); 2 | -------------------------------------------------------------------------------- /disintegrate-postgres/src/event_store/sql/idx_event_sequence_type.sql: -------------------------------------------------------------------------------- 1 | CREATE INDEX IF NOT EXISTS idx_event_sequence_type ON event_sequence USING HASH (event_type); 2 | -------------------------------------------------------------------------------- /disintegrate-postgres/src/listener/sql/trigger_notify_event_listener.sql: -------------------------------------------------------------------------------- 1 | CREATE OR REPLACE TRIGGER event_insert_trigger 2 | AFTER INSERT ON event 3 | FOR EACH ROW 4 | EXECUTE function notify_event_listener(); 5 | -------------------------------------------------------------------------------- /disintegrate-postgres/src/listener/sql/table_event_listener.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE IF NOT EXISTS event_listener ( 2 | id TEXT PRIMARY KEY, 3 | last_processed_event_id BIGINT, 4 | updated_at TIMESTAMP DEFAULT now() 5 | ); 6 | -------------------------------------------------------------------------------- /disintegrate-postgres/src/event_store/sql/table_event.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE IF NOT EXISTS event ( 2 | event_id bigint PRIMARY KEY, 3 | event_type varchar(255), 4 | payload bytea, 5 | inserted_at TIMESTAMP DEFAULT now() 6 | ); 7 | -------------------------------------------------------------------------------- /docs/src/components/HomepageFeatures/styles.module.css: -------------------------------------------------------------------------------- 1 | .features { 2 | display: flex; 3 | align-items: center; 4 | padding: 2rem 0; 5 | width: 100%; 6 | } 7 | 8 | .featureSvg { 9 | height: 200px; 10 | width: 200px; 11 | } 12 | -------------------------------------------------------------------------------- /disintegrate-postgres/src/snapshotter/sql/table_snapshot.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE IF NOT EXISTS snapshot ( 2 | id uuid PRIMARY KEY, 3 | name text, 4 | query text, 5 | version bigint, 6 | payload text, 7 | inserted_at TIMESTAMP DEFAULT now() 8 | ); 9 | -------------------------------------------------------------------------------- /disintegrate-postgres/src/listener/sql/fn_notify_event_listener.sql: -------------------------------------------------------------------------------- 1 | CREATE OR REPLACE FUNCTION notify_event_listener() 2 | RETURNS TRIGGER AS $$ 3 | BEGIN 4 | PERFORM pg_notify('new_events', NEW.event_type); 5 | RETURN new; 6 | END; 7 | $$ LANGUAGE plpgsql; 8 | 9 | -------------------------------------------------------------------------------- /disintegrate-postgres/src/event_store/sql/table_event_sequence.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE IF NOT EXISTS event_sequence ( 2 | event_id bigint primary key generated always as identity, 3 | event_type varchar(255), 4 | consumed smallint DEFAULT 0 check (consumed <= 1), 5 | committed boolean DEFAULT false, 6 | inserted_at TIMESTAMP DEFAULT now() 7 | ); 8 | -------------------------------------------------------------------------------- /examples/courses/src/lib.rs: -------------------------------------------------------------------------------- 1 | pub mod application; 2 | pub mod domain; 3 | pub mod grpc; 4 | pub mod read_model; 5 | pub mod serde; 6 | 7 | pub mod proto { 8 | tonic::include_proto!("event"); 9 | tonic::include_proto!("api"); 10 | 11 | pub const FILE_DESCRIPTOR_SET: &[u8] = 12 | tonic::include_file_descriptor_set!("courses_descriptor"); 13 | } 14 | -------------------------------------------------------------------------------- /disintegrate-serde/src/lib.rs: -------------------------------------------------------------------------------- 1 | //! # Event Store Serialization Deserializaion Library 2 | //! 3 | //! This library provides traits and implementations for serializing and deserializing events for the Disintegrate Event Store. 4 | //! It includes implementations for common formats such as Avro, JSON, Protocol Buffers (Prost). 5 | pub mod serde; 6 | pub use crate::serde::{Deserializer, Error, Serde, Serializer}; 7 | -------------------------------------------------------------------------------- /docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3.9' 2 | 3 | services: 4 | pgsql: 5 | image: postgres:16 6 | restart: always 7 | environment: 8 | - POSTGRES_USER=postgres 9 | - POSTGRES_PASSWORD=postgres 10 | - POSTGRES_DB=event_store 11 | ports: 12 | - '5432:5432' 13 | volumes: 14 | - db:/var/lib/postgresql/data 15 | 16 | volumes: 17 | db: 18 | driver: local 19 | -------------------------------------------------------------------------------- /examples/courses/src/application/queries.rs: -------------------------------------------------------------------------------- 1 | use super::Application; 2 | use crate::domain::CourseId; 3 | use crate::read_model; 4 | use anyhow::Result; 5 | use tracing::instrument; 6 | 7 | impl Application { 8 | #[instrument(skip(self))] 9 | pub async fn course_by_id(&self, course_id: CourseId) -> Result> { 10 | Ok(self.read_model.course_by_id(course_id).await?) 11 | } 12 | } 13 | -------------------------------------------------------------------------------- /examples/courses/build.rs: -------------------------------------------------------------------------------- 1 | use std::{env, path::PathBuf}; 2 | 3 | fn main() { 4 | let out_dir = PathBuf::from(env::var("OUT_DIR").unwrap()); 5 | 6 | tonic_build::configure() 7 | .file_descriptor_set_path(out_dir.join("courses_descriptor.bin")) 8 | .build_server(true) 9 | .build_client(false) 10 | .compile_protos(&["proto/api.proto", "proto/event.proto"], &["proto"]) 11 | .unwrap(); 12 | } 13 | -------------------------------------------------------------------------------- /docs/flake.nix: -------------------------------------------------------------------------------- 1 | { 2 | inputs = { 3 | systems.url = "github:nix-systems/default"; 4 | }; 5 | 6 | outputs = { 7 | systems, 8 | nixpkgs, 9 | ... 10 | } @ inputs: let 11 | eachSystem = f: 12 | nixpkgs.lib.genAttrs (import systems) ( 13 | system: 14 | f nixpkgs.legacyPackages.${system} 15 | ); 16 | in { 17 | devShells = eachSystem (pkgs: { 18 | default = pkgs.mkShell { 19 | buildInputs = [ 20 | pkgs.nodejs 21 | pkgs.yarn 22 | ]; 23 | }; 24 | }); 25 | }; 26 | } 27 | -------------------------------------------------------------------------------- /disintegrate-postgres/src/event_store/sql/fn_event_store_begin_epoch.sql: -------------------------------------------------------------------------------- 1 | CREATE OR REPLACE FUNCTION event_store_begin_epoch() 2 | RETURNS void AS $$ 3 | DECLARE 4 | id BIGINT; 5 | db_id INT; 6 | BEGIN 7 | -- Fetch the maximum event id, default to 0 if no events exist 8 | SELECT COALESCE(MAX(event_id), 0) INTO id FROM event; 9 | SELECT oid INTO db_id FROM pg_database WHERE datname = current_database(); 10 | 11 | PERFORM pg_try_advisory_xact_lock_shared(db_id, 0); 12 | PERFORM pg_try_advisory_xact_lock_shared(1, (id & 0xFFFFFFFF)::bit(32)::integer); 13 | PERFORM pg_try_advisory_xact_lock_shared(2, (id >> 32)::bit(32)::integer); 14 | END; 15 | $$ LANGUAGE plpgsql; -------------------------------------------------------------------------------- /.github/workflows/test-docs.yml: -------------------------------------------------------------------------------- 1 | name: Test Docs 2 | 3 | on: 4 | pull_request: 5 | branches: 6 | - main 7 | paths: 8 | - 'docs/**' 9 | 10 | defaults: 11 | run: 12 | working-directory: ./docs 13 | 14 | jobs: 15 | test-docs: 16 | name: Test docs 17 | runs-on: ubuntu-latest 18 | steps: 19 | - uses: actions/checkout@v4 20 | with: 21 | fetch-depth: 0 22 | - uses: actions/setup-node@v4 23 | with: 24 | node-version: 18 25 | cache: yarn 26 | cache-dependency-path: './docs' 27 | 28 | - name: Install dependencies 29 | run: yarn install --frozen-lockfile 30 | - name: Test build website 31 | run: yarn build -------------------------------------------------------------------------------- /examples/banking/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "banking" 3 | version = "0.1.0" 4 | publish = false 5 | license.workspace = true 6 | edition.workspace = true 7 | authors.workspace = true 8 | repository.workspace = true 9 | 10 | [dependencies] 11 | disintegrate = { version = "2.1.0", path = "../../disintegrate", features = ["macros", "serde-json"] } 12 | disintegrate-postgres = { version = "2.1.0", path = "../../disintegrate-postgres" } 13 | tokio = { version = "1.47.1", features = ["macros", "rt-multi-thread", "signal"] } 14 | serde = { version = "1.0.217", features = ["derive"] } 15 | thiserror = "2.0.11" 16 | anyhow = "1.0.94" 17 | dotenv = "0.15.0" 18 | sqlx = { version = "0.8.6", features = [ "runtime-tokio-rustls" , "postgres" ] } 19 | actix-web = "4.11.0" 20 | -------------------------------------------------------------------------------- /examples/courses/src/application.rs: -------------------------------------------------------------------------------- 1 | mod commands; 2 | mod queries; 3 | 4 | use crate::{domain::DomainEvent, proto, read_model}; 5 | use disintegrate::serde::prost::Prost; 6 | use disintegrate_postgres::{PgDecisionMaker, WithPgSnapshot}; 7 | 8 | pub type DecisionMaker = 9 | PgDecisionMaker, WithPgSnapshot>; 10 | 11 | #[derive(Clone)] 12 | pub struct Application { 13 | decision_maker: DecisionMaker, 14 | read_model: read_model::Repository, 15 | } 16 | 17 | impl Application { 18 | pub fn new(decision_maker: DecisionMaker, read_model: read_model::Repository) -> Self { 19 | Self { 20 | decision_maker, 21 | read_model, 22 | } 23 | } 24 | } 25 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [workspace.package] 2 | edition = "2021" 3 | license = "MIT" 4 | repository = "https://github.com/disintegrate-es/disintegrate" 5 | keywords = ["event-sourcing", "domain-driven-design", "ddd", "event-driven-architecture", "cqrs", "aggregate", "event-store"] 6 | categories = ["development-tools", "software-libraries", "event-sourcing", "domain-driven-design"] 7 | authors = [ 8 | "Luca Iachini ", 9 | "Valerio Iachini ", 10 | ] 11 | readme = "README.md" 12 | 13 | [workspace] 14 | resolver = "2" 15 | members = [ 16 | ".", 17 | "disintegrate", 18 | "disintegrate-macros", 19 | "disintegrate-postgres", 20 | "disintegrate-serde", 21 | "examples/cart", 22 | "examples/courses", 23 | "examples/banking" 24 | ] 25 | 26 | -------------------------------------------------------------------------------- /disintegrate-macros/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "disintegrate-macros" 3 | description = "Disintegrate macros. Not for direct use. Refer to the `disintegrate` crate for details." 4 | version = "2.1.0" 5 | license.workspace = true 6 | edition.workspace = true 7 | authors.workspace = true 8 | repository.workspace = true 9 | readme.workspace = true 10 | 11 | [lib] 12 | proc-macro = true 13 | 14 | [features] 15 | never = [] 16 | 17 | [dependencies] 18 | heck = "0.5.0" 19 | proc-macro2 = "1.0.86" 20 | quote = "1.0.38" 21 | syn = { version = "2.0.65", features = ["full"] } 22 | 23 | [dev-dependencies] 24 | disintegrate = { version = "2.1.0", path = "../disintegrate", features = ["macros"] } 25 | 26 | [package.metadata.docs.rs] 27 | all-features = true 28 | -------------------------------------------------------------------------------- /examples/cart/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "cart" 3 | version = "0.1.0" 4 | publish = false 5 | license.workspace = true 6 | edition.workspace = true 7 | authors.workspace = true 8 | repository.workspace = true 9 | 10 | [dependencies] 11 | disintegrate = { version = "2.1.0", path = "../../disintegrate", features = ["macros", "serde-json"] } 12 | disintegrate-postgres = { version = "2.1.0", path = "../../disintegrate-postgres" } 13 | sqlx = { version = "0.8.6", features = [ "runtime-tokio-rustls" , "postgres" ] } 14 | tokio = { version = "1.47.1", features = ["macros", "rt-multi-thread", "signal"] } 15 | anyhow = "1.0.94" 16 | dotenv = "0.15.0" 17 | serde = { version = "1.0.217", features = ["derive"] } 18 | thiserror = "2.0.11" 19 | 20 | [build-dependencies] 21 | tonic-build = { version = "0.13.1", features = ["prost"] } 22 | -------------------------------------------------------------------------------- /disintegrate-postgres/src/event_store/sql/fn_event_store_current_epoch.sql: -------------------------------------------------------------------------------- 1 | CREATE OR REPLACE FUNCTION event_store_current_epoch() 2 | RETURNS BIGINT AS $$ 3 | DECLARE 4 | persisted_event_id BIGINT; 5 | pending_event_id BIGINT; 6 | db_id INT; 7 | BEGIN 8 | SELECT COALESCE(MAX(event_id), 0) INTO persisted_event_id FROM event; 9 | SELECT oid INTO db_id FROM pg_database WHERE datname = current_database(); 10 | 11 | SELECT MIN((l3.objid::bigint << 32) + l2.objid::bigint) 12 | INTO pending_event_id 13 | FROM pg_locks l1 14 | INNER JOIN pg_locks l2 ON l1.pid = l2.pid 15 | INNER JOIN pg_locks l3 ON l1.pid = l3.pid 16 | WHERE 17 | l1.classid = db_id 18 | AND l2.classid = 1 19 | AND l3.classid = 2 20 | AND l1.locktype = 'advisory'; 21 | 22 | RETURN COALESCE(pending_event_id, persisted_event_id); 23 | END; 24 | $$ LANGUAGE plpgsql; 25 | -------------------------------------------------------------------------------- /docs/.gitignore: -------------------------------------------------------------------------------- 1 | .DS_Store 2 | .vscode/* 3 | !.vscode/extensions.json 4 | .idea 5 | *.iml 6 | *.code-workspace 7 | .changelog 8 | .history 9 | .envrc 10 | .direnv/ 11 | 12 | node_modules 13 | .yarn 14 | package-lock.json 15 | 16 | .eslintcache 17 | 18 | yarn-error.log 19 | build 20 | coverage 21 | .docusaurus 22 | .cache-loader 23 | types 24 | test-website 25 | test-website-in-workspace 26 | 27 | packages/create-docusaurus/lib/ 28 | packages/lqip-loader/lib/ 29 | packages/docusaurus/lib/ 30 | packages/docusaurus-*/lib/* 31 | packages/eslint-plugin/lib/ 32 | packages/stylelint-copyright/lib/ 33 | 34 | website/netlifyDeployPreview/* 35 | website/changelog 36 | !website/netlifyDeployPreview/index.html 37 | !website/netlifyDeployPreview/_redirects 38 | 39 | website/_dogfooding/_swizzle_theme_tests 40 | 41 | CrowdinTranslations_*.zip 42 | 43 | website/i18n/**/* 44 | #!website/i18n/fr 45 | #!website/i18n/fr/**/* 46 | 47 | .netlify 48 | -------------------------------------------------------------------------------- /docs/sidebars.js: -------------------------------------------------------------------------------- 1 | /** 2 | * Creating a sidebar enables you to: 3 | - create an ordered group of docs 4 | - render a sidebar for each doc of that group 5 | - provide next/previous navigation 6 | 7 | The sidebars can be generated from the filesystem, or explicitly defined here. 8 | 9 | Create as many sidebars as you want. 10 | */ 11 | 12 | // @ts-check 13 | 14 | /** @type {import('@docusaurus/plugin-content-docs').SidebarsConfig} */ 15 | const sidebars = { 16 | // By default, Docusaurus generates a sidebar from the docs folder structure 17 | tutorialSidebar: [{type: 'autogenerated', dirName: '.'}], 18 | 19 | // But you can create a sidebar manually 20 | /* 21 | tutorialSidebar: [ 22 | 'intro', 23 | 'hello', 24 | { 25 | type: 'category', 26 | label: 'Tutorial', 27 | items: ['tutorial-basics/create-a-document'], 28 | }, 29 | ], 30 | */ 31 | }; 32 | 33 | export default sidebars; 34 | -------------------------------------------------------------------------------- /docs/README.md: -------------------------------------------------------------------------------- 1 | # Website 2 | 3 | This website is built using [Docusaurus](https://docusaurus.io/), a modern static website generator. 4 | 5 | ### Installation 6 | 7 | ``` 8 | $ yarn 9 | ``` 10 | 11 | ### Local Development 12 | 13 | ``` 14 | $ yarn start 15 | ``` 16 | 17 | This command starts a local development server and opens up a browser window. Most changes are reflected live without having to restart the server. 18 | 19 | ### Build 20 | 21 | ``` 22 | $ yarn build 23 | ``` 24 | 25 | This command generates static content into the `build` directory and can be served using any static contents hosting service. 26 | 27 | ### Deployment 28 | 29 | Using SSH: 30 | 31 | ``` 32 | $ USE_SSH=true yarn deploy 33 | ``` 34 | 35 | Not using SSH: 36 | 37 | ``` 38 | $ GIT_USER= yarn deploy 39 | ``` 40 | 41 | If you are using GitHub pages for hosting, this command is a convenient way to build the website and push to the `gh-pages` branch. 42 | -------------------------------------------------------------------------------- /disintegrate-macros/src/symbol.rs: -------------------------------------------------------------------------------- 1 | use std::fmt::{self, Display}; 2 | use syn::{Ident, Path}; 3 | 4 | #[derive(Copy, Clone)] 5 | pub struct Symbol(&'static str); 6 | 7 | pub const RENAME: Symbol = Symbol("rename"); 8 | pub const STATE_QUERY: Symbol = Symbol("state_query"); 9 | pub const ID: Symbol = Symbol("id"); 10 | 11 | impl PartialEq for Ident { 12 | fn eq(&self, word: &Symbol) -> bool { 13 | self == word.0 14 | } 15 | } 16 | 17 | impl PartialEq for &Ident { 18 | fn eq(&self, word: &Symbol) -> bool { 19 | *self == word.0 20 | } 21 | } 22 | 23 | impl PartialEq for Path { 24 | fn eq(&self, word: &Symbol) -> bool { 25 | self.is_ident(word.0) 26 | } 27 | } 28 | 29 | impl PartialEq for &Path { 30 | fn eq(&self, word: &Symbol) -> bool { 31 | self.is_ident(word.0) 32 | } 33 | } 34 | 35 | impl Display for Symbol { 36 | fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { 37 | formatter.write_str(self.0) 38 | } 39 | } 40 | -------------------------------------------------------------------------------- /disintegrate-serde/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "disintegrate-serde" 3 | description = "Serialization and deserializaion library for Disintegrate event store. Not for direct use. Refer to the `disintegrate` crate for details." 4 | version = "2.1.0" 5 | license.workspace = true 6 | edition.workspace = true 7 | authors.workspace = true 8 | repository.workspace = true 9 | readme.workspace = true 10 | 11 | [features] 12 | default = [] 13 | json = ["dep:serde_json"] 14 | protobuf = ["dep:protobuf"] 15 | prost = ["dep:prost"] 16 | avro = ["dep:apache-avro"] 17 | messagepack = ["dep:rmp-serde"] 18 | full = ["json", "protobuf", "avro", "prost"] 19 | 20 | [dependencies] 21 | thiserror = "2.0.11" 22 | serde_json = { version = "1.0.140", optional = true } 23 | serde = { version = "1.0.217", features = ["derive"] } 24 | protobuf = { version = "3.7.1", optional = true } 25 | apache-avro = { version = "0.17.0", optional = true } 26 | prost = {version = "0.13.5", optional = true} 27 | rmp-serde = { version = "1.3.0", optional = true } 28 | 29 | 30 | [package.metadata.docs.rs] 31 | all-features = true 32 | -------------------------------------------------------------------------------- /examples/courses/proto/event.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package event; 4 | 5 | message Event { 6 | message CourseCreated { 7 | string course_id = 1; 8 | string name = 2; 9 | uint32 seats = 3; 10 | }; 11 | message CourseClosed { 12 | string course_id = 1; 13 | }; 14 | message CourseRenamed { 15 | string course_id = 1; 16 | string name = 2; 17 | }; 18 | message StudentRegistered { 19 | string student_id = 1; 20 | string name = 2; 21 | }; 22 | message StudentSubscribed { 23 | string student_id = 1; 24 | string course_id = 2; 25 | }; 26 | message StudentUnsubscribed { 27 | string student_id = 1; 28 | string course_id = 2; 29 | }; 30 | oneof event { 31 | CourseCreated course_created = 1; 32 | CourseClosed course_closed = 2; 33 | CourseRenamed course_renamed = 3; 34 | StudentRegistered student_registered = 4; 35 | StudentSubscribed student_subscribed = 5; 36 | StudentUnsubscribed student_unsubscribed = 6; 37 | }; 38 | }; 39 | -------------------------------------------------------------------------------- /docs/flake.lock: -------------------------------------------------------------------------------- 1 | { 2 | "nodes": { 3 | "nixpkgs": { 4 | "locked": { 5 | "lastModified": 1711231723, 6 | "narHash": "sha256-dARJQ8AJOv6U+sdRePkbcVyVbXJTi1tReCrkkOeusiA=", 7 | "owner": "NixOS", 8 | "repo": "nixpkgs", 9 | "rev": "e1d501922fd7351da4200e1275dfcf5faaad1220", 10 | "type": "github" 11 | }, 12 | "original": { 13 | "id": "nixpkgs", 14 | "type": "indirect" 15 | } 16 | }, 17 | "root": { 18 | "inputs": { 19 | "nixpkgs": "nixpkgs", 20 | "systems": "systems" 21 | } 22 | }, 23 | "systems": { 24 | "locked": { 25 | "lastModified": 1681028828, 26 | "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=", 27 | "owner": "nix-systems", 28 | "repo": "default", 29 | "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e", 30 | "type": "github" 31 | }, 32 | "original": { 33 | "owner": "nix-systems", 34 | "repo": "default", 35 | "type": "github" 36 | } 37 | } 38 | }, 39 | "root": "root", 40 | "version": 7 41 | } 42 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2023 Luca Iachini, Valerio Iachini 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /docs/src/css/custom.css: -------------------------------------------------------------------------------- 1 | /** 2 | * Any CSS included here will be global. The classic template 3 | * bundles Infima by default. Infima is a CSS framework designed to 4 | * work well for content-centric websites. 5 | */ 6 | 7 | /* You can override the default Infima variables here. */ 8 | :root { 9 | --ifm-color-primary: #2e8555; 10 | --ifm-color-primary-dark: #29784c; 11 | --ifm-color-primary-darker: #277148; 12 | --ifm-color-primary-darkest: #205d3b; 13 | --ifm-color-primary-light: #33925d; 14 | --ifm-color-primary-lighter: #359962; 15 | --ifm-color-primary-lightest: #3cad6e; 16 | --ifm-code-font-size: 95%; 17 | --docusaurus-highlighted-code-line-bg: rgba(0, 0, 0, 0.1); 18 | } 19 | 20 | /* For readability concerns, you should choose a lighter palette in dark mode. */ 21 | [data-theme='dark'] { 22 | --ifm-color-primary: #25c2a0; 23 | --ifm-color-primary-dark: #21af90; 24 | --ifm-color-primary-darker: #1fa588; 25 | --ifm-color-primary-darkest: #1a8870; 26 | --ifm-color-primary-light: #29d5b0; 27 | --ifm-color-primary-lighter: #32d8b4; 28 | --ifm-color-primary-lightest: #4fddbf; 29 | --docusaurus-highlighted-code-line-bg: rgba(0, 0, 0, 0.3); 30 | } 31 | -------------------------------------------------------------------------------- /examples/cart/src/main.rs: -------------------------------------------------------------------------------- 1 | mod cart; 2 | mod event; 3 | 4 | use cart::AddItem; 5 | use event::DomainEvent; 6 | 7 | use anyhow::{Ok, Result}; 8 | use disintegrate::{serde::json::Json, NoSnapshot}; 9 | use disintegrate_postgres::PgEventStore; 10 | use sqlx::{postgres::PgConnectOptions, PgPool}; 11 | 12 | #[tokio::main] 13 | async fn main() -> Result<()> { 14 | dotenv::dotenv().unwrap(); 15 | 16 | // Create a PostgreSQL poll 17 | let connect_options = PgConnectOptions::new(); 18 | let pool = PgPool::connect_with(connect_options).await?; 19 | 20 | // Create a serde for serialize and deserialize events 21 | let serde = Json::::default(); 22 | 23 | // Create a PostgreSQL event store 24 | let event_store = PgEventStore::new(pool, serde).await?; 25 | 26 | // Create a Postgres DecisionMaker 27 | let decision_maker = disintegrate_postgres::decision_maker(event_store, NoSnapshot); 28 | 29 | // Make the decision. This performs the business decision and persists the changes into the 30 | // event store 31 | decision_maker 32 | .make(AddItem::new("user-1".to_string(), "item-1".to_string(), 4)) 33 | .await?; 34 | Ok(()) 35 | } 36 | -------------------------------------------------------------------------------- /examples/cart/src/event.rs: -------------------------------------------------------------------------------- 1 | #![allow(clippy::enum_variant_names)] 2 | use disintegrate::Event; 3 | use serde::{Deserialize, Serialize}; 4 | 5 | #[derive(Debug, Clone, PartialEq, Eq, Event, Serialize, Deserialize)] 6 | #[stream(UserEvent, [UserCreated])] 7 | #[stream(CartEvent, [ItemAdded, ItemRemoved, ItemUpdated, CouponApplied])] 8 | #[stream(CouponEvent, [CouponEmitted, CouponApplied])] 9 | pub enum DomainEvent { 10 | UserCreated { 11 | #[id] 12 | user_id: String, 13 | name: String, 14 | }, 15 | ItemAdded { 16 | #[id] 17 | user_id: String, 18 | #[id] 19 | item_id: String, 20 | quantity: u32, 21 | }, 22 | ItemRemoved { 23 | #[id] 24 | user_id: String, 25 | #[id] 26 | item_id: String, 27 | }, 28 | ItemUpdated { 29 | #[id] 30 | user_id: String, 31 | #[id] 32 | item_id: String, 33 | new_quantity: u32, 34 | }, 35 | CouponEmitted { 36 | #[id] 37 | coupon_id: String, 38 | quantity: u32, 39 | }, 40 | CouponApplied { 41 | #[id] 42 | coupon_id: String, 43 | #[id] 44 | user_id: String, 45 | }, 46 | } 47 | -------------------------------------------------------------------------------- /examples/courses/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "courses" 3 | version = "0.1.0" 4 | publish = false 5 | license.workspace = true 6 | edition.workspace = true 7 | authors.workspace = true 8 | repository.workspace = true 9 | 10 | [dependencies] 11 | disintegrate = { version = "2.1.0", path = "../../disintegrate", features = [ 12 | "macros", 13 | "serde-prost", 14 | ] } 15 | disintegrate-postgres = { version = "2.1.0", path = "../../disintegrate-postgres", features = [ 16 | "listener", 17 | ] } 18 | anyhow = "1.0.94" 19 | async-trait = "0.1.88" 20 | rust_decimal = "1.37.1" 21 | sqlx = { version = "0.8.6", features = ["runtime-tokio-rustls", "postgres"] } 22 | thiserror = "2.0.11" 23 | tokio = { version = "1.47.1", features = [ 24 | "macros", 25 | "rt-multi-thread", 26 | "signal", 27 | ] } 28 | tonic = { version = "0.13.1", features = ["gzip", "transport"] } 29 | tonic-health = "0.13.1" 30 | tonic-reflection = "0.13.1" 31 | tower = "0.5.2" 32 | tower-http = { version = "0.6.6", features = ["trace"] } 33 | prost = { version = "0.13.5" } 34 | dotenv = "0.15.0" 35 | serde = "1.0.217" 36 | tracing = "0.1.41" 37 | tracing-subscriber = "0.3.19" 38 | 39 | [build-dependencies] 40 | tonic-build = { version = "0.13.1", features = ["prost"] } 41 | -------------------------------------------------------------------------------- /disintegrate/src/listener.rs: -------------------------------------------------------------------------------- 1 | //! Event listener handles events that are emitted. 2 | use async_trait::async_trait; 3 | 4 | use crate::{ 5 | event::{Event, EventId, PersistedEvent}, 6 | stream_query::StreamQuery, 7 | }; 8 | 9 | /// Represents an event listener, which handles persisted events. 10 | #[async_trait] 11 | pub trait EventListener: Send + Sync { 12 | /// The type of error that may occur during the handle of an event. 13 | type Error; 14 | 15 | /// Returns the unique identifier of the event listener. 16 | /// 17 | /// It is typically a string or identifier that helps identify and distinguish the event handler. 18 | fn id(&self) -> &'static str; 19 | 20 | /// Returns the stream query used by the event listener. 21 | /// 22 | /// The query specifies the criteria for the events that the event listener can handle. 23 | fn query(&self) -> &StreamQuery; 24 | 25 | /// Handles an event. 26 | /// 27 | /// This method handle the event coming from the event stream. 28 | /// The method returns a result indicating success or an error that may occur during the event handler. 29 | async fn handle(&self, event: PersistedEvent) -> Result<(), Self::Error>; 30 | } 31 | -------------------------------------------------------------------------------- /docs/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "disintegrate", 3 | "version": "0.0.0", 4 | "private": true, 5 | "scripts": { 6 | "docusaurus": "docusaurus", 7 | "start": "docusaurus start", 8 | "build": "docusaurus build", 9 | "swizzle": "docusaurus swizzle", 10 | "deploy": "docusaurus deploy", 11 | "clear": "docusaurus clear", 12 | "serve": "docusaurus serve", 13 | "write-translations": "docusaurus write-translations", 14 | "write-heading-ids": "docusaurus write-heading-ids" 15 | }, 16 | "dependencies": { 17 | "@docusaurus/core": "^3.2.0", 18 | "@docusaurus/preset-classic": "^3.2.0", 19 | "@docusaurus/theme-mermaid": "^3.2.0", 20 | "@mdx-js/react": "^3.0.0", 21 | "clsx": "^2.0.0", 22 | "prism-react-renderer": "^2.3.0", 23 | "react": "^18.0.0", 24 | "react-dom": "^18.0.0" 25 | }, 26 | "devDependencies": { 27 | "@docusaurus/module-type-aliases": "^3.2.0", 28 | "@docusaurus/types": "^3.2.0" 29 | }, 30 | "browserslist": { 31 | "production": [ 32 | ">0.5%", 33 | "not dead", 34 | "not op_mini all" 35 | ], 36 | "development": [ 37 | "last 3 chrome version", 38 | "last 3 firefox version", 39 | "last 5 safari version" 40 | ] 41 | }, 42 | "engines": { 43 | "node": ">=18.0" 44 | } 45 | } 46 | -------------------------------------------------------------------------------- /disintegrate-postgres/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "disintegrate-postgres" 3 | description = "Disintegrate PostgresDB implementation. Not for direct use. Refer to the `disintegrate` crate for details." 4 | version = "2.1.0" 5 | license.workspace = true 6 | edition.workspace = true 7 | authors.workspace = true 8 | repository.workspace = true 9 | readme.workspace = true 10 | 11 | [features] 12 | default = [] 13 | listener = ["dep:tokio-util"] 14 | 15 | [dependencies] 16 | disintegrate = { version = "2.1.0", path = "../disintegrate" } 17 | disintegrate-serde = { version = "2.1.0", path = "../disintegrate-serde" } 18 | disintegrate-macros = { version = "2.1.0", path = "../disintegrate-macros" } 19 | serde = "1.0.217" 20 | serde_json = "1.0.140" 21 | sqlx = { version = "0.8.6", features = ["postgres", "runtime-tokio-rustls", "uuid"] } 22 | async-trait = "0.1.88" 23 | futures = "0.3.30" 24 | async-stream = "0.3.5" 25 | thiserror = "2.0.11" 26 | tokio = {version = "1.47.1", features = ["macros"]} 27 | tokio-util = {version = "0.7.16", optional = true} 28 | uuid = { version = "1.16.0", features = ["v3"] } 29 | md-5 = "0.10.6" 30 | paste = "1.0.14" 31 | 32 | [dev-dependencies] 33 | disintegrate-serde = { version = "2.1.0", path = "../disintegrate-serde", features = ["json"] } 34 | 35 | [package.metadata.docs.rs] 36 | all-features = true 37 | -------------------------------------------------------------------------------- /disintegrate-postgres/src/error.rs: -------------------------------------------------------------------------------- 1 | use std::error::Error as StdError; 2 | use thiserror::Error; 3 | 4 | /// Represents all the ways a method can fail within Disintegrate Postgres. 5 | #[derive(Error, Debug)] 6 | pub enum Error { 7 | /// Error returned from the database. 8 | #[error(transparent)] 9 | Database(#[from] sqlx::Error), 10 | /// An error occurred while deserializing an event payload. 11 | #[error(transparent)] 12 | Deserialization(#[from] disintegrate_serde::Error), 13 | /// An error occurred while acquiring an append permit. 14 | #[error(transparent)] 15 | AppendPermit(#[from] tokio::sync::AcquireError), 16 | /// An error occurred while mapping the event store event to the query event 17 | #[error("unable to map the event store event to the query event: {0}")] 18 | QueryEventMapping(#[source] Box), 19 | // An error occurred while attempting to persist events using an outdated version of the event set. 20 | /// 21 | /// This error indicates that another process has inserted a new event that was not included in the event stream query 22 | /// used to make the current business decision. The event store's state has changed, potentially affecting the decision-making process. 23 | #[error("concurrent modification error")] 24 | Concurrency, 25 | } 26 | -------------------------------------------------------------------------------- /.github/workflows/deploy-docs.yml: -------------------------------------------------------------------------------- 1 | name: Deploy Docs to GitHub Pages 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | paths: 8 | - 'docs/**' 9 | 10 | defaults: 11 | run: 12 | working-directory: ./docs 13 | 14 | jobs: 15 | build: 16 | name: Build Docs 17 | runs-on: ubuntu-latest 18 | steps: 19 | - uses: actions/checkout@v4 20 | with: 21 | fetch-depth: 0 22 | - uses: actions/setup-node@v4 23 | with: 24 | node-version: 18 25 | cache: yarn 26 | cache-dependency-path: './docs' 27 | 28 | - name: Install dependencies 29 | run: yarn install --frozen-lockfile 30 | - name: Build website 31 | run: yarn build 32 | 33 | - name: Upload Build Artifact 34 | uses: actions/upload-pages-artifact@v3 35 | with: 36 | path: ./docs/build 37 | 38 | deploy: 39 | name: Deploy Docs to GitHub Pages 40 | needs: build 41 | 42 | permissions: 43 | pages: write 44 | id-token: write 45 | 46 | environment: 47 | name: github-pages 48 | url: ${{ steps.deployment.outputs.page_url }} 49 | 50 | runs-on: ubuntu-latest 51 | steps: 52 | - name: Deploy to GitHub Pages 53 | id: deployment 54 | uses: actions/deploy-pages@v4 55 | -------------------------------------------------------------------------------- /disintegrate/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "disintegrate" 3 | description = "Disintegrate is a Rust library to build event-sourced applications." 4 | version = "2.1.0" 5 | edition.workspace = true 6 | authors.workspace = true 7 | repository.workspace = true 8 | readme = "README.md" 9 | license.workspace = true 10 | 11 | [features] 12 | macros = ["disintegrate-macros"] 13 | serde = ["disintegrate-serde"] 14 | serde-avro = ["serde", "disintegrate-serde/avro"] 15 | serde-json = ["serde", "disintegrate-serde/json"] 16 | serde-messagepack = ["serde", "disintegrate-serde/messagepack"] 17 | serde-prost = ["serde", "disintegrate-serde/prost"] 18 | serde-protobuf = ["serde", "disintegrate-serde/protobuf"] 19 | 20 | [dependencies] 21 | disintegrate-serde = { version = "2.1.0", path = "../disintegrate-serde", optional = true } 22 | disintegrate-macros = { version = "2.1.0", path = "../disintegrate-macros", optional = true } 23 | async-trait = "0.1.88" 24 | futures = "0.3.30" 25 | lazy_static = "1.4.0" 26 | regex = "1.11.1" 27 | serde = { version = "1.0.217", features = ["derive"] } 28 | thiserror = "2.0.11" 29 | mockall = "0.13.1" 30 | paste = "1.0.14" 31 | uuid = { version = "1.16.0", features = ["serde"] } 32 | async-stream = "0.3.5" 33 | 34 | [dev-dependencies] 35 | assert2 = "0.3.14" 36 | uuid = { version = "1.16.0", features = ["v4"] } 37 | tokio = {version = "1.47.1", features = ["macros", "rt-multi-thread"]} 38 | 39 | [package.metadata.docs.rs] 40 | all-features = true 41 | -------------------------------------------------------------------------------- /examples/courses/src/application/commands.rs: -------------------------------------------------------------------------------- 1 | use super::Application; 2 | use crate::domain::{ 3 | CloseCourse, CreateCourse, RegisterStudent, RenameCourse, SubscribeStudent, UnsubscribeStudent, 4 | }; 5 | use anyhow::Result; 6 | use tracing::instrument; 7 | 8 | impl Application { 9 | #[instrument(skip(self))] 10 | pub async fn create_course(&self, command: CreateCourse) -> Result<()> { 11 | self.decision_maker.make(command).await?; 12 | Ok(()) 13 | } 14 | 15 | #[instrument(skip(self))] 16 | pub async fn close_course(&self, command: CloseCourse) -> Result<()> { 17 | self.decision_maker.make(command).await?; 18 | Ok(()) 19 | } 20 | 21 | #[instrument(skip(self))] 22 | pub async fn rename_course(&self, command: RenameCourse) -> Result<()> { 23 | self.decision_maker.make(command).await?; 24 | Ok(()) 25 | } 26 | 27 | #[instrument(skip(self))] 28 | pub async fn register_student(&self, command: RegisterStudent) -> Result<()> { 29 | self.decision_maker.make(command).await?; 30 | Ok(()) 31 | } 32 | 33 | #[instrument(skip(self))] 34 | pub async fn subscribe_student(&self, command: SubscribeStudent) -> Result<()> { 35 | self.decision_maker.make(command).await?; 36 | Ok(()) 37 | } 38 | 39 | #[instrument(skip(self))] 40 | pub async fn unsubscribe_student(&self, command: UnsubscribeStudent) -> Result<()> { 41 | self.decision_maker.make(command).await?; 42 | Ok(()) 43 | } 44 | } 45 | -------------------------------------------------------------------------------- /disintegrate-macros/tests/state_query.rs: -------------------------------------------------------------------------------- 1 | use disintegrate::{query, Event, StateQuery}; 2 | 3 | #[allow(dead_code)] 4 | #[derive(Event, Debug, PartialEq, Eq, Clone)] 5 | enum DomainEvent { 6 | UserCreated { 7 | #[id] 8 | user_id: i64, 9 | name: String, 10 | email: String, 11 | }, 12 | OrderCreated { 13 | #[id] 14 | user_id: i64, 15 | #[id] 16 | order_id: String, 17 | amount: u32, 18 | }, 19 | } 20 | 21 | #[derive(StateQuery, Debug, PartialEq, Eq, Clone)] 22 | #[state_query(DomainEvent)] 23 | struct UserOrders { 24 | #[id] 25 | user_id: i64, 26 | } 27 | 28 | #[derive(StateQuery, Debug, PartialEq, Eq, Clone)] 29 | #[state_query(DomainEvent, rename = "UserOrderData")] 30 | struct UserOrder { 31 | #[id] 32 | user_id: i64, 33 | #[id] 34 | order_id: String, 35 | } 36 | 37 | #[test] 38 | fn it_sets_the_name_of_a_state_query() { 39 | assert_eq!(UserOrders::NAME, "UserOrders"); 40 | } 41 | 42 | #[test] 43 | fn it_renames_a_state_query() { 44 | assert_eq!(UserOrder::NAME, "UserOrderData"); 45 | } 46 | 47 | #[test] 48 | fn it_builds_the_stream_query() { 49 | let user_orders = UserOrders { user_id: 1 }; 50 | assert_eq!( 51 | user_orders.query::(), 52 | query!(DomainEvent; user_id == 1) 53 | ); 54 | 55 | let user_order = UserOrder { 56 | user_id: 2, 57 | order_id: "order1".to_string(), 58 | }; 59 | assert_eq!( 60 | user_order.query::(), 61 | query!(DomainEvent; user_id == 2, order_id == "order1") 62 | ); 63 | } 64 | -------------------------------------------------------------------------------- /examples/courses/src/domain.rs: -------------------------------------------------------------------------------- 1 | mod course; 2 | mod student; 3 | mod subscription; 4 | mod unsubscription; 5 | pub use course::{CloseCourse, CourseError, CourseId, CreateCourse, RenameCourse}; 6 | use disintegrate::Event; 7 | pub use student::{RegisterStudent, StudentError, StudentId}; 8 | pub use subscription::{SubscribeStudent, SubscriptionError}; 9 | pub use unsubscription::{UnsubscribeStudent, UnsubscriptionError}; 10 | 11 | #[derive(Debug, Clone, PartialEq, Eq, Event)] 12 | #[stream(CourseSubscriptionEvent, [CourseCreated, CourseClosed, StudentSubscribed, StudentUnsubscribed])] 13 | #[stream(StudentSubscriptionEvent, [StudentSubscribed, StudentUnsubscribed, StudentRegistered])] 14 | #[stream(UnsubscriptionEvent, [StudentSubscribed, StudentUnsubscribed])] 15 | #[stream(CourseEvent, [CourseCreated, CourseClosed, CourseRenamed])] 16 | #[stream(StudentEvent, [StudentRegistered])] 17 | pub enum DomainEvent { 18 | CourseCreated { 19 | #[id] 20 | course_id: CourseId, 21 | name: String, 22 | seats: u32, 23 | }, 24 | CourseClosed { 25 | #[id] 26 | course_id: CourseId, 27 | }, 28 | CourseRenamed { 29 | #[id] 30 | course_id: CourseId, 31 | name: String, 32 | }, 33 | StudentRegistered { 34 | #[id] 35 | student_id: StudentId, 36 | name: String, 37 | }, 38 | StudentSubscribed { 39 | #[id] 40 | student_id: StudentId, 41 | #[id] 42 | course_id: CourseId, 43 | }, 44 | StudentUnsubscribed { 45 | #[id] 46 | student_id: StudentId, 47 | #[id] 48 | course_id: CourseId, 49 | }, 50 | } 51 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing Guidelines 2 | 3 | Thank you for considering contributing to the Disintegrate project! We welcome contributions from everyone, and this document outlines the guidelines and steps to contribute to the project. 4 | 5 | ## Getting Started 6 | 7 | To get started with contributing, please follow these steps: 8 | 9 | 1. Fork the repository on GitHub. 10 | 2. Clone the forked repository to your local machine. 11 | 3. Create a new branch for your contribution. 12 | 4. Make your changes and ensure that the tests pass. 13 | 5. Commit your changes with a descriptive commit message. 14 | 6. Push your changes to your forked repository. 15 | 7. Submit a pull request to the main repository. 16 | 17 | ## Code Style 18 | 19 | We follow the Rust code style guidelines to maintain a consistent and readable codebase. Please make sure that your code adheres to these guidelines before submitting a pull request. You can use tools like `rustfmt` to automatically format your code. 20 | 21 | ## Issue Tracker 22 | 23 | If you encounter any issues, have questions, or want to propose new features, please use the issue tracker on GitHub. We appreciate detailed bug reports and well-defined feature requests. 24 | 25 | ## Code of Conduct 26 | 27 | We expect all contributors to follow the project's Code of Conduct. Please be respectful and considerate towards others when participating in discussions or contributing code. 28 | 29 | ## License 30 | 31 | By contributing to the Disintegrate project, you agree that your contributions will be licensed under the project's [MIT License](LICENSE). 32 | 33 | We appreciate your contributions and thank you for helping improve the Disintegrate project! 34 | 35 | -------------------------------------------------------------------------------- /disintegrate-postgres/src/lib.rs: -------------------------------------------------------------------------------- 1 | //! # PostgreSQL Disintegrate Backend Library 2 | mod error; 3 | mod event_store; 4 | #[cfg(feature = "listener")] 5 | mod listener; 6 | mod snapshotter; 7 | 8 | pub use crate::event_store::PgEventStore; 9 | #[cfg(feature = "listener")] 10 | pub use crate::listener::{ 11 | id_indexer::{Error as PgIdIndexerError, PgIdIndexer}, 12 | PgEventListener, PgEventListenerConfig, 13 | }; 14 | pub use crate::snapshotter::PgSnapshotter; 15 | use disintegrate::{DecisionMaker, Event, EventSourcedStateStore, SnapshotConfig, WithSnapshot}; 16 | use disintegrate_serde::Serde; 17 | pub use error::Error; 18 | 19 | pub type PgEventId = i64; 20 | 21 | /// An alias for [`DecisionMaker`], specialized for Postgres. 22 | pub type PgDecisionMaker = 23 | DecisionMaker, SN>>; 24 | 25 | /// An alias for [`WithSnapshot`], specialized for Postgres. 26 | pub type WithPgSnapshot = WithSnapshot; 27 | 28 | /// Creates a decision maker specialized for PostgreSQL. 29 | /// 30 | /// # Arguments 31 | /// 32 | /// - `event_store`: An instance of `PgEventStore`. 33 | /// - `snapshot_config`: The `SnapshotConfig` to be used for the snapshotting. 34 | /// 35 | /// # Returns 36 | /// 37 | /// A `PgDecisionMaker` with snapshotting configured according to the provided `snapshot_config`. 38 | pub fn decision_maker< 39 | E: Event + Send + Sync + Clone, 40 | S: Serde + Clone + Sync + Send, 41 | SN: SnapshotConfig + Clone, 42 | >( 43 | event_store: PgEventStore, 44 | snapshot_config: SN, 45 | ) -> PgDecisionMaker { 46 | DecisionMaker::new(EventSourcedStateStore::new(event_store, snapshot_config)) 47 | } 48 | -------------------------------------------------------------------------------- /examples/banking/tests/validation_query_test.js: -------------------------------------------------------------------------------- 1 | import http from 'k6/http'; 2 | import { check, sleep } from 'k6'; 3 | import exec from 'k6/execution'; 4 | 5 | const serverUrl = 'http://localhost:8080'; 6 | const USERS = 200; 7 | const CONFIG= {headers: { 'Content-Type': 'application/json' }}; 8 | 9 | export let options = { 10 | vus: USERS, 11 | duration: '10s', 12 | }; 13 | 14 | function buildAmountPayload(amount){ 15 | return JSON.stringify({ 16 | amount: amount 17 | }); 18 | } 19 | 20 | function requestOpenAccount(id) { 21 | return http.post(`${serverUrl}/account/${id}/open`, null, CONFIG); 22 | } 23 | 24 | 25 | function requestDeposit(id, amount) { 26 | return http.post(`${serverUrl}/account/${id}/deposit`, buildAmountPayload(amount), CONFIG); 27 | } 28 | 29 | function requestWithdraw(id, amount) { 30 | return http.post(`${serverUrl}/account/${id}/withdraw`, buildAmountPayload(amount), CONFIG); 31 | } 32 | 33 | function requestTransfer(id, beneficiary_id, amount) { 34 | return http.post(`${serverUrl}/account/${id}/transfer/${beneficiary_id}`, buildAmountPayload(amount), CONFIG); 35 | } 36 | 37 | export function setup() { 38 | // Register accounts 39 | for(let i = 1; i <= USERS; i ++){ 40 | requestOpenAccount(i); 41 | requestDeposit(i, 100); 42 | } 43 | } 44 | 45 | export default function (data) { 46 | let id = exec.vu.idInTest; 47 | 48 | let res; 49 | if(id === 1){ 50 | res = requestWithdraw(id, 1); 51 | }else{ 52 | res = requestTransfer(id, 1, 20); 53 | } 54 | 55 | check(res, { 56 | 'OK': (r) => res.status === 200, 57 | 'ERR Bad': (r) => res.status === 400, 58 | 'ERR Conflict': (r) => res.status === 500, 59 | }); 60 | } 61 | -------------------------------------------------------------------------------- /docs/docs/intro.md: -------------------------------------------------------------------------------- 1 | --- 2 | slug: / 3 | sidebar_position: 1 4 | --- 5 | 6 | # Disintegrate Docs 7 | **Welcome to Disintegrate!** 8 | 9 | **Disintegrate** was inspired by Sare Pellegring's talk, ["Kill the Aggregate,"](https://www.youtube.com/watch?v=DhhxKoOpJe0) which pointed out the challenge of identifying the right aggregates at the beginning of a project. This resonated with us because, from our experience with aggregate and event sourcing applications, we've learned that new features often require accessing information from multiple aggregates. This issue is also well explained in Gregory Young's book, ["Versioning in an Event Sourced System,"](https://leanpub.com/esversioning/read) especially in the chapter titled ["Stream Boundaries Are Wrong."](https://leanpub.com/esversioning/read#leanpub-auto-stream-boundaries-are-wrong) Young discusses the need to potentially **split or join streams** (which is a broader term than aggregates) to meet changing requirements. This splitting or joining can be done at the storage level by duplicating events to create new streams (for split-stream scenarios) or combining streams (for join-stream scenarios) or dynamically by allowing the application to read multiple streams, as explained in the book's section on ["Cheating."](https://leanpub.com/esversioning/read#leanpub-auto-cheating) 10 | 11 | What **Disintegrate** provides is an implementation of the ["Cheating"](https://leanpub.com/esversioning/read#leanpub-auto-cheating) chapter of Young's book, offering a way to query the event store to split or join multiple streams dynamically to create states for decision-making. We've developed a mechanism that simplifies writing applications, making it easy to extend them to accommodate new features without imposing strict boundaries on the streams. 12 | -------------------------------------------------------------------------------- /examples/courses/proto/api.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package api; 4 | 5 | service Course{ 6 | rpc Create(CreateCourseRequest) returns (CreateCourseResponse) {} 7 | rpc Close(CloseCourseRequest) returns (CloseCourseResponse) {} 8 | rpc Rename(RenameCourseRequest) returns (RenameCourseResponse) {} 9 | rpc Get(GetCourseRequest) returns (GetCourseResponse) {} 10 | } 11 | 12 | message CreateCourseRequest { 13 | string course_id = 1; 14 | string name = 2; 15 | uint32 seats = 3; 16 | } 17 | 18 | message CloseCourseRequest { 19 | string course_id = 1; 20 | } 21 | 22 | message RenameCourseRequest { 23 | string course_id = 1; 24 | string name = 2; 25 | } 26 | 27 | message GetCourseRequest { 28 | string course_id = 1; 29 | } 30 | 31 | message CreateCourseResponse {} 32 | message CloseCourseResponse {} 33 | message RenameCourseResponse {} 34 | message GetCourseResponse { 35 | string course_id = 1; 36 | string name = 2; 37 | int32 available_seats = 3; 38 | bool closed = 4; 39 | } 40 | 41 | 42 | service Student{ 43 | rpc Register(RegisterStudentRequest) returns (RegisterStudentResponse) {} 44 | } 45 | 46 | message RegisterStudentRequest { 47 | string student_id = 1; 48 | string name = 2; 49 | } 50 | 51 | message RegisterStudentResponse {} 52 | 53 | service Subscription { 54 | rpc Subscribe(SubscribeStudentRequest) returns (SubscribeStudentResponse) {} 55 | rpc Unsubscribe(UnsubscribeStudentRequest) returns (UnsubscribeStudentResponse) {} 56 | } 57 | 58 | message SubscribeStudentRequest{ 59 | string course_id = 1; 60 | string student_id = 2; 61 | } 62 | 63 | message UnsubscribeStudentRequest{ 64 | string course_id = 1; 65 | string student_id = 2; 66 | } 67 | 68 | message SubscribeStudentResponse{} 69 | message UnsubscribeStudentResponse{} 70 | 71 | 72 | 73 | -------------------------------------------------------------------------------- /disintegrate-serde/src/serde.rs: -------------------------------------------------------------------------------- 1 | #[cfg(feature = "avro")] 2 | pub mod avro; 3 | #[cfg(feature = "json")] 4 | pub mod json; 5 | 6 | #[cfg(feature = "messagepack")] 7 | pub mod messagepack; 8 | 9 | #[cfg(feature = "prost")] 10 | pub mod prost; 11 | #[cfg(feature = "protobuf")] 12 | pub mod protobuf; 13 | 14 | /// Serialization and deserialization error. 15 | #[derive(Debug, thiserror::Error)] 16 | pub enum Error { 17 | /// an error occurred during the deserialization of the data 18 | #[error("deserialization error: {0}")] 19 | Deserialization(#[source] Box), 20 | /// an error occurred while converting the persisted data to the application data 21 | #[error("conversion error")] 22 | Conversion, 23 | } 24 | 25 | /// Defines the behavior for serializing values of type `T`. 26 | pub trait Serializer { 27 | /// Serializes a value of type `T` into a byte vector. 28 | /// 29 | /// # Arguments 30 | /// 31 | /// * `value` - The value to be serialized. 32 | /// 33 | /// # Returns 34 | /// 35 | /// A byte vector containing the serialized representation of the value. 36 | fn serialize(&self, value: T) -> Vec; 37 | } 38 | 39 | /// Defines the behavior for deserializing values of type `T`. 40 | pub trait Deserializer { 41 | /// Deserializes a byte vector into a value of type `T`. 42 | /// 43 | /// # Arguments 44 | /// 45 | /// * `data` - The byte vector to be deserialized. 46 | /// 47 | /// # Returns 48 | /// 49 | /// A `Result` containing the deserialized value on success, or an error on failure. 50 | fn deserialize(&self, data: Vec) -> Result; 51 | } 52 | 53 | /// Combines the `Serializer` and `Deserializer` traits for convenience. 54 | pub trait Serde: Serializer + Deserializer {} 55 | 56 | impl Serde for K where K: Serializer + Deserializer {} 57 | -------------------------------------------------------------------------------- /docs/src/components/HomepageFeatures/index.js: -------------------------------------------------------------------------------- 1 | import clsx from 'clsx'; 2 | import Heading from '@theme/Heading'; 3 | import styles from './styles.module.css'; 4 | 5 | const FeatureList = [ 6 | { 7 | title: 'Easy to Use', 8 | Svg: require('@site/static/img/undraw_docusaurus_mountain.svg').default, 9 | description: ( 10 | <> 11 | Docusaurus was designed from the ground up to be easily installed and 12 | used to get your website up and running quickly. 13 | 14 | ), 15 | }, 16 | { 17 | title: 'Focus on What Matters', 18 | Svg: require('@site/static/img/undraw_docusaurus_tree.svg').default, 19 | description: ( 20 | <> 21 | Docusaurus lets you focus on your docs, and we'll do the chores. Go 22 | ahead and move your docs into the docs directory. 23 | 24 | ), 25 | }, 26 | { 27 | title: 'Powered by React', 28 | Svg: require('@site/static/img/undraw_docusaurus_react.svg').default, 29 | description: ( 30 | <> 31 | Extend or customize your website layout by reusing React. Docusaurus can 32 | be extended while reusing the same header and footer. 33 | 34 | ), 35 | }, 36 | ]; 37 | 38 | function Feature({Svg, title, description}) { 39 | return ( 40 |
41 |
42 | 43 |
44 |
45 | {title} 46 |

{description}

47 |
48 |
49 | ); 50 | } 51 | 52 | export default function HomepageFeatures() { 53 | return ( 54 |
55 |
56 |
57 | {FeatureList.map((props, idx) => ( 58 | 59 | ))} 60 |
61 |
62 |
63 | ); 64 | } 65 | -------------------------------------------------------------------------------- /disintegrate-serde/src/serde/protobuf.rs: -------------------------------------------------------------------------------- 1 | //! A Protobuf serialization and deserialization module. 2 | use std::marker::PhantomData; 3 | 4 | use super::Error; 5 | use protobuf::Message; 6 | 7 | use crate::serde::{Deserializer, Serializer}; 8 | 9 | /// A struct to serialize and deserialize Protobuf payloads. 10 | #[derive(Debug, Clone, Copy)] 11 | pub struct Protobuf(PhantomData, PhantomData) 12 | where 13 | O: Message; 14 | 15 | impl Default for Protobuf 16 | where 17 | O: Message, 18 | { 19 | fn default() -> Self { 20 | Self(PhantomData, PhantomData) 21 | } 22 | } 23 | 24 | impl Serializer for Protobuf 25 | where 26 | O: From + Message, 27 | { 28 | /// Serializes the given value to a byte vector. 29 | /// 30 | /// # Arguments 31 | /// 32 | /// * `value` - The value to be serialized. 33 | /// 34 | /// # Returns 35 | /// 36 | /// A byte vector containing the serialized data. 37 | /// 38 | /// # Panics 39 | /// 40 | /// Panics if the serialization from Rust type to Protobuf format fails. 41 | fn serialize(&self, value: I) -> Vec { 42 | let target = O::from(value); 43 | target 44 | .write_to_bytes() 45 | .expect("serialization from rust type to protobuf format should be successful") 46 | } 47 | } 48 | 49 | impl Deserializer for Protobuf 50 | where 51 | I: TryFrom, 52 | O: Message, 53 | { 54 | /// Deserializes the given byte vector to a target type. 55 | /// 56 | /// # Arguments 57 | /// 58 | /// * `data` - The byte vector to be deserialized. 59 | /// 60 | /// # Returns 61 | /// 62 | /// A `Result` containing the deserialized value on success, or an error on failure. 63 | fn deserialize(&self, data: Vec) -> Result { 64 | let target = O::parse_from_bytes(&data).map_err(|e| Error::Deserialization(Box::new(e)))?; 65 | I::try_from(target).map_err(|_| Error::Conversion) 66 | } 67 | } 68 | -------------------------------------------------------------------------------- /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | name: CI 2 | 3 | on: 4 | push: 5 | paths-ignore: 6 | - 'docs/**' 7 | pull_request: 8 | paths-ignore: 9 | - 'docs/**' 10 | env: 11 | CARGO_TERM_COLOR: always 12 | RUST_BACKTRACE: 1 13 | 14 | jobs: 15 | test: 16 | name: Test 17 | runs-on: ubuntu-latest 18 | services: 19 | postgres: 20 | image: postgres 21 | env: 22 | POSTGRES_USER: postgres 23 | POSTGRES_PASSWORD: postgres 24 | POSTGRES_DB: event_store 25 | ports: 26 | - 5432:5432 27 | options: >- 28 | --health-cmd pg_isready 29 | --health-interval 10s 30 | --health-timeout 5s 31 | --health-retries 5 32 | steps: 33 | - name: Checkout repository 34 | uses: actions/checkout@v3 35 | - name: Cache dependencies 36 | id: cache-dependencies 37 | uses: actions/cache@v3 38 | with: 39 | path: | 40 | ~/.cargo/registry 41 | ~/.cargo/git 42 | target 43 | key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }} 44 | - name: Install stable toolchain 45 | uses: actions-rs/toolchain@v1 46 | with: 47 | profile: minimal 48 | toolchain: stable 49 | override: true 50 | - name: Install Protoc 51 | uses: 4w3official/setup-protoc-action@v1 52 | - name: Run cargo test 53 | run: cargo test --verbose --workspace --all-features 54 | 55 | fmt: 56 | name: Rustfmt 57 | runs-on: ubuntu-latest 58 | steps: 59 | - uses: actions/checkout@v3 60 | - name: Run cargo fmt 61 | run: cargo fmt --all -- --check 62 | 63 | clippy: 64 | name: Clippy 65 | runs-on: ubuntu-latest 66 | steps: 67 | - name: Checkout repository 68 | uses: actions/checkout@v3 69 | - name: Install Protoc 70 | uses: 4w3official/setup-protoc-action@v1 71 | - name: Run clippy 72 | run: cargo clippy --workspace --all-features -- -D warnings 73 | -------------------------------------------------------------------------------- /docs/docs/developer_journey/event_storming.md: -------------------------------------------------------------------------------- 1 | --- 2 | sidebar_position: 1 3 | --- 4 | 5 | # Event Storming 6 | Event storming is a collaborative workshop technique used to explore and model complex business domains. Let's consider a shopping cart application as an example. During an event storming session for the shopping cart system, we identify various events, commands, and entities: 7 | * Events: `ItemAdded`, `ItemRemoved`, `CartCheckedOut`, ... 8 | * Commands: `AddItem`, `RemoveItem`, `CheckoutCart`, ... 9 | * Entities: `Products`, `Customers`, `Carts`, `Orders`, ... 10 | 11 | 12 | ## Example: 13 | During the event storming session, we map out the flow of events and commands within the shopping cart system. For instance, the `AddItem` command triggers the `ItemAdded` event, updating the contents of the user's shopping cart. Similarly, the `CheckoutCart` command results in the `CartCheckedOut` event, indicating the completion of a purchase. 14 | 15 | ### Events, Identifiers, and Commands 16 | In the context of the shopping cart system: 17 | * **Events**: Signify important actions within the shopping cart, such as adding items, removing items, applying coupons, and completing purchases. 18 | * **Identifiers**: Uniquely distinguish entities like products, customers, coupons, and orders, ensuring precise tracking and management. 19 | * **Commands**: User-initiated actions that alter the state of the shopping cart, including adding items, removing items, applying coupons, and initiating checkout processes. 20 | 21 | 22 | ## Example: 23 | 24 | When a customer adds an item to their shopping cart, the system generates an `ItemAdded` event, capturing details such as the item ID, quantity, and timestamp. This event triggers the updating of the shopping cart's contents, ensuring an accurate reflection of the user's selections. During an event storming session, you identify the aggregates that define transaction boundaries within the system. Each aggregate encapsulates related entities and actions, promoting consistency in data updates. 25 | In the shopping cart scenario, identifying aggregates may involve delineating entities like the shopping cart itself and items. Each aggregate's state should include only data pertinent to the commands it handles, minimizing unnecessary memory usage. 26 | 27 | -------------------------------------------------------------------------------- /disintegrate-serde/src/serde/json.rs: -------------------------------------------------------------------------------- 1 | //! A JSON serialization and deserialization module. 2 | use std::marker::PhantomData; 3 | 4 | use serde::{Deserialize, Serialize}; 5 | 6 | use super::Error; 7 | use crate::serde::{Deserializer, Serializer}; 8 | 9 | /// A struct to serialize and deserialize JSON payloads. 10 | #[derive(Debug, Clone, Copy)] 11 | pub struct Json(PhantomData); 12 | 13 | impl Default for Json { 14 | fn default() -> Self { 15 | Self(PhantomData) 16 | } 17 | } 18 | 19 | impl Serializer for Json 20 | where 21 | T: Serialize, 22 | { 23 | /// Serializes the given value to JSON format and returns the serialized bytes. 24 | /// 25 | /// # Arguments 26 | /// 27 | /// * `value` - The value to be serialized. 28 | /// 29 | /// # Returns 30 | /// 31 | /// Serialized bytes representing the value in JSON format. 32 | fn serialize(&self, value: T) -> Vec { 33 | serde_json::to_vec(&value).expect("json serialization should not fail") 34 | } 35 | } 36 | 37 | impl Deserializer for Json 38 | where 39 | for<'d> T: Deserialize<'d>, 40 | { 41 | /// Deserializes the given JSON bytes to produce a value of type `T`. 42 | /// 43 | /// # Arguments 44 | /// 45 | /// * `data` - The JSON bytes to be deserialized. 46 | /// 47 | /// # Returns 48 | /// 49 | /// A `Result` containing the deserialized value on success, or an error on failure. 50 | fn deserialize(&self, data: Vec) -> Result { 51 | serde_json::from_slice(&data).map_err(|e| Error::Deserialization(Box::new(e))) 52 | } 53 | } 54 | 55 | #[cfg(test)] 56 | mod tests { 57 | use super::*; 58 | 59 | #[derive(Debug, Serialize, Deserialize, PartialEq, Clone)] 60 | struct Person { 61 | name: String, 62 | age: u32, 63 | } 64 | 65 | #[test] 66 | fn it_serialize_and_deserialize_json_data() { 67 | let json_serializer = Json::::default(); 68 | let person = Person { 69 | name: String::from("Some Name"), 70 | age: 30, 71 | }; 72 | 73 | let serialized_data = json_serializer.serialize(person.clone()); 74 | let deserialized_person = json_serializer.deserialize(serialized_data).unwrap(); 75 | 76 | assert_eq!(person, deserialized_person); 77 | } 78 | } 79 | -------------------------------------------------------------------------------- /docs/docs/developer_journey/developer_journey.md: -------------------------------------------------------------------------------- 1 | --- 2 | sidebar_position: 2 3 | --- 4 | 5 | # Developer Journey 6 | Welcome to your journey with **Disintegrate**, where we introduce a unique approach to **Command and Query Responsibility Segregation** (CQRS). Assuming you have some familiarity with CQRS and Event Sourcing, we'll briefly revisit these concepts. If you're new to them, we recommend exploring literature on the topic to gain a better understanding. 7 | 8 | When initiating a new project following domain-driven design (DDD) principles, it's advisable to start with an **Event Storming** session. This helps in modeling the system and clarifying business concepts, enabling the identification of major events, commands, and aggregates. Through this session, stakeholders establish a ubiquitous language to refer to domain concepts and flows. Following Event Storming, the team refines the model and identifies aggregates, sub-domains, and bounded contexts. 9 | 10 | Aggregates are the primary components to identify—an aggregation of domain objects treated as a single unit. They handle commands and generate events to update the system's state. Defining aggregates is challenging, as they're designed to handle multiple commands, and their state should accommodate all possible use cases. 11 | 12 | :::warning 13 | Incorrectly defining aggregates can lead to issues, whether they become too large, creating contention and slowing down the application, or too small, scattering necessary data across multiple aggregates, complicating system consistency. Evolving aggregates is equally challenging, often requiring modifications to the state and impacting existing commands. 14 | ::: 15 | 16 | **Disintegrate** shifts focus from aggregates to commands (`Decision`s) the system must handle. This shift simplifies the identification of specific commands and eases the introduction of new commands compared to modifying aggregates originally designed for them. 17 | 18 | In the following sections, we'll guide you through creating a new application using Disintegrate. We'll start with an event storming session, outline the flow of events and commands, and then move on to coding the system, focusing on evolution and maintenance using the Shopping Cart example. Additionally, we'll explore Disintegrate's architecture, including our Postgres event store and techniques for handling snapshots and migrating Postgres tables, providing insight into Disintegrate's real-world application capabilities. 19 | 20 | -------------------------------------------------------------------------------- /docs/docs/developer_journey/cqrs_and_event_sourcing.md: -------------------------------------------------------------------------------- 1 | --- 2 | sidebar_position: 2 3 | --- 4 | 5 | # CQRS 6 | 7 | The development team has decided to adopt **CQRS** (Command Query Responsibility Segregation), a design pattern that advocates for the separation of concerns between commands (actions that change the system's state) and queries (actions that retrieve data from the system). This separation allows for independent scaling, optimization, and maintenance of both the write and read sides of an application. 8 | 9 | ## Write Model vs Read Model 10 | 11 | ### Write Model 12 | The write model represents the system's state as it changes over time in response to commands. It encapsulates the business logic and validation rules necessary to process incoming commands. Additionally, the team has chosen to persist the state of the Shopping Cart using event sourcing. 13 | 14 | ### Read Model 15 | On the other hand, the read model is optimized for querying and presenting data to users. It stores denormalized or precomputed views of the data tailored to specific use cases, improving performance and scalability for read-heavy workloads. In this case, the team has decided to create a table containing the items added to a Shopping Cart. Each user has their own shopping cart, identified by their user ID. 16 | 17 | ## Event Sourcing 18 | Event sourcing is a pattern closely related to CQRS, where the state of the application is determined by a sequence of immutable events. Instead of directly modifying the state of entities, actions are represented as domain events that are appended to an event log. The current state of the system is then derived by replaying these events. 19 | 20 | ### Advantages of Event Sourcing 21 | 22 | - **Full Auditability**: Since every change to the system is captured as an event, event sourcing provides a complete audit trail of actions taken within the application, enabling detailed historical analysis and debugging. 23 | 24 | - **Temporal Querying**: Event sourcing allows for querying the state of the system at any point in time by replaying events up to that moment. This temporal querying capability is useful for implementing features like versioning, time travel, and historical reporting. 25 | 26 | - **Improved Resilience**: By decoupling state mutation from state storage, event sourcing enhances the resilience of the system against failures and data corruption. In the event of a failure, the system can be easily restored by replaying events from the event log. 27 | 28 | -------------------------------------------------------------------------------- /examples/courses/README.md: -------------------------------------------------------------------------------- 1 | # Courses Example 2 | 3 | The Courses Example is a sample project that demonstrates the usage of the Disintegrate library. It represents a fictional school system where users can register and subscribe to courses. The example showcases how Disintegrate can be utilized to manage events and implement business logic in an event-driven architecture. 4 | 5 | ## Project Overview 6 | 7 | The project focuses on the following key aspects: 8 | 9 | * **Student Registration**: Students can register in the system by providing their information such as name and student ID. 10 | 11 | * **Course Subscription**: Students can subscribe to available courses. Each course has a limited number of seats, and students can only subscribe to a maximum of 2 courses. 12 | 13 | * **Event-driven Architecture**: The project leverages Disintegrate to implement an event-driven architecture. 14 | 15 | ## Getting Started 16 | 17 | To run the Courses Example, follow these steps: 18 | 19 | 1. Clone the Disintegrate repository from GitHub: `git clone https://github.com/disintegrate-es/disintegrate.git` 20 | 21 | 2. Navigate to the `examples/courses` directory: `cd disintegrate/examples/courses` 22 | 23 | 3. Start the required services using Docker Compose: `docker compose up -d` 24 | 25 | 4. Run the example using Cargo: `cargo run` 26 | 27 | ## Example Usage 28 | 29 | In the test folder, you can find an Insomnia collection named `course_api_insomnia_collection.json` that contains a set of requests for testing the gRPC API. 30 | 31 | To use the Insomnia collection: 32 | 33 | 1. Import the collection into Insomnia. You can use the Insomnia Desktop App to import the collection. 34 | 2. Once imported, you will find the collection with various requests. 35 | 36 | ### Requests 37 | 38 | Here are some of the requests available in the collection: 39 | 40 | * **Create Course**: Creates a new course with the specified ID, name, and number of seats. 41 | * **Close Course**: Closes a course with the specified ID, preventing further subscriptions. 42 | * **Rename Course**: Renames a course with the specified ID. 43 | * **Register Student**: Registers a new student with the specified ID and name. 44 | * **Subscribe Student**: Subscribes a student to a course with the specified student ID and course ID. 45 | * **Unsubscribe Student**: Unsubscribes a student from a course with the specified student ID and course ID. 46 | 47 | To use these requests, make sure the application is running, and update the request bodies with the desired parameters. 48 | -------------------------------------------------------------------------------- /disintegrate-serde/src/serde/messagepack.rs: -------------------------------------------------------------------------------- 1 | use rmp_serde; 2 | use std::marker::PhantomData; 3 | 4 | use serde::Deserialize; 5 | use serde::Serialize; 6 | 7 | use super::Error; 8 | use crate::serde::Deserializer; 9 | use crate::serde::Serializer; 10 | 11 | /// A struct to serialize and deserialize MessagePack payloads. 12 | #[derive(Debug, Clone, Copy)] 13 | pub struct MessagePack(PhantomData); 14 | 15 | impl Default for crate::serde::messagepack::MessagePack { 16 | fn default() -> Self { 17 | Self(PhantomData) 18 | } 19 | } 20 | impl Serializer for crate::serde::messagepack::MessagePack 21 | where 22 | T: Serialize, 23 | { 24 | /// Serializes the given value to MessagePack format and returns the serialized bytes. 25 | /// 26 | /// # Arguments 27 | /// 28 | /// * `value` - The value to be serialized. 29 | /// 30 | /// # Returns 31 | /// 32 | /// Serialized bytes representing the value in MessagePack format. 33 | fn serialize(&self, value: T) -> Vec { 34 | rmp_serde::to_vec(&value).expect("MessagePack serialization failed") 35 | } 36 | } 37 | 38 | impl Deserializer for crate::serde::messagepack::MessagePack 39 | where 40 | for<'d> T: Deserialize<'d>, 41 | { 42 | /// Deserializes the given MessagePack bytes to produce a value of type `T`. 43 | /// 44 | /// # Arguments 45 | /// 46 | /// * `data` - The MessagePack bytes to be deserialized. 47 | /// 48 | /// # Returns 49 | /// 50 | /// A `Result` containing the deserialized value on success, or an error on failure. 51 | fn deserialize(&self, data: Vec) -> Result { 52 | rmp_serde::from_slice(&data).map_err(|e| Error::Deserialization(Box::new(e))) 53 | } 54 | } 55 | 56 | #[cfg(test)] 57 | mod tests { 58 | use super::*; 59 | 60 | #[derive(Debug, Serialize, Deserialize, PartialEq, Clone)] 61 | struct Person { 62 | name: String, 63 | age: u32, 64 | } 65 | 66 | #[test] 67 | fn it_serialize_and_deserialize_messagepack_data() { 68 | let msgpack_serializer = crate::serde::messagepack::MessagePack::::default(); 69 | let person = Person { 70 | name: String::from("Some Name"), 71 | age: 30, 72 | }; 73 | 74 | let serialized_data = msgpack_serializer.serialize(person.clone()); 75 | let deserialized_person = msgpack_serializer.deserialize(serialized_data).unwrap(); 76 | 77 | assert_eq!(person, deserialized_person); 78 | } 79 | } 80 | -------------------------------------------------------------------------------- /disintegrate/src/lib.rs: -------------------------------------------------------------------------------- 1 | #![doc = include_str!("../README.md")] 2 | 3 | mod decision; 4 | mod domain_identifier; 5 | mod event; 6 | mod event_store; 7 | mod identifier; 8 | mod listener; 9 | mod state; 10 | mod state_store; 11 | mod stream_query; 12 | mod testing; 13 | pub mod utils; 14 | 15 | #[doc(inline)] 16 | pub use crate::decision::{Decision, DecisionMaker, Error as DecisionError, PersistDecision}; 17 | #[doc(inline)] 18 | pub use crate::domain_identifier::{DomainIdentifier, DomainIdentifierSet}; 19 | #[doc(inline)] 20 | pub use crate::event::{ 21 | DomainIdentifierInfo, Event, EventId, EventInfo, EventSchema, PersistedEvent, 22 | }; 23 | #[doc(inline)] 24 | pub use crate::event_store::EventStore; 25 | #[doc(inline)] 26 | pub use crate::identifier::{Identifier, IdentifierType, IdentifierValue, IntoIdentifierValue}; 27 | #[doc(inline)] 28 | pub use crate::listener::EventListener; 29 | #[doc(inline)] 30 | pub use crate::state::{IntoState, IntoStatePart, MultiState, StateMutate, StatePart, StateQuery}; 31 | #[doc(inline)] 32 | pub use crate::state_store::{ 33 | EventSourcedStateStore, LoadState, LoadedState, NoSnapshot, SnapshotConfig, StateSnapshotter, 34 | WithSnapshot, 35 | }; 36 | #[doc(inline)] 37 | pub use crate::stream_query::{query, StreamFilter, StreamQuery}; 38 | #[doc(inline)] 39 | pub use crate::testing::TestHarness; 40 | 41 | pub type BoxDynError = Box; 42 | 43 | #[cfg(feature = "macros")] 44 | pub use disintegrate_macros::{Event, StateQuery}; 45 | 46 | #[cfg(feature = "serde")] 47 | pub mod serde { 48 | //! # Event Store Serialization Deserializaion. 49 | #[cfg(feature = "serde-avro")] 50 | #[doc(inline)] 51 | pub use disintegrate_serde::serde::avro; 52 | #[cfg(feature = "serde-json")] 53 | #[doc(inline)] 54 | pub use disintegrate_serde::serde::json; 55 | #[cfg(feature = "serde-messagepack")] 56 | #[doc(inline)] 57 | pub use disintegrate_serde::serde::messagepack; 58 | #[cfg(feature = "serde-prost")] 59 | #[doc(inline)] 60 | pub use disintegrate_serde::serde::prost; 61 | #[cfg(feature = "serde-protobuf")] 62 | #[doc(inline)] 63 | pub use disintegrate_serde::serde::protobuf; 64 | #[doc(inline)] 65 | pub use disintegrate_serde::{Deserializer, Serde, Serializer}; 66 | } 67 | 68 | #[doc(hidden)] 69 | #[macro_export] 70 | macro_rules! all_the_tuples { 71 | ($name:ident) => { 72 | $name!([], T1); 73 | $name!([T1], T2); 74 | $name!([T1, T2], T3); 75 | $name!([T1, T2, T3], T4); 76 | $name!([T1, T2, T3, T4], T5); 77 | }; 78 | } 79 | -------------------------------------------------------------------------------- /examples/courses/tests/subscriptions_concurrency_test.js: -------------------------------------------------------------------------------- 1 | import grpc from 'k6/net/grpc'; 2 | import { check, sleep } from 'k6'; 3 | import exec from 'k6/execution'; 4 | 5 | const serverUrl = 'localhost:10437'; 6 | const COURSES = 10; 7 | const USERS = 500; 8 | 9 | export let options = { 10 | vus: USERS, 11 | duration: '100s', 12 | }; 13 | 14 | const client = new grpc.Client(); 15 | client.load(['../proto'], 'api.proto'); 16 | 17 | export function setup() { 18 | // Create courses 19 | client.connect(serverUrl, { plaintext: true }); 20 | for (let i = 1; i <= COURSES; i++) { 21 | const course = { 22 | course_id: `course${i}`, 23 | name: 'Introduction to Programming', 24 | seats: 100, 25 | }; 26 | const res = client.invoke('api.Course/Create', course); 27 | check(res, { 'course created': (r) => r && r.status === grpc.StatusOK }); 28 | } 29 | // Register students 30 | for (let i = 1; i <= USERS; i++) { 31 | const student = { 32 | student_id: `student${i}`, 33 | name: `Student ${i}`, 34 | }; 35 | const res = client.invoke('api.Student/Register', student); 36 | check(res, { 'registered successfully': (r) => r && r.status === grpc.StatusOK }); 37 | } 38 | client.close(); 39 | } 40 | 41 | // Each virtual user subscribes one specific student to the course 42 | export default function (data) { 43 | client.connect(serverUrl, { plaintext: true }); 44 | const course_id = Math.floor(Math.random() * COURSES) + 1; 45 | const student_id = Math.floor(Math.random() * USERS) + 1; 46 | const subscription = { 47 | course_id: `course${course_id}`, 48 | student_id: `student${student_id}`, 49 | }; 50 | const res = client.invoke('api.Subscription/Subscribe', subscription); 51 | check(res, { 'student subscribed successfully': (r) => r && r.status === grpc.StatusOK }); 52 | check(res, { 'student already subscribed error': (r) => r && r.status === grpc.StatusInternal && r.error.message.includes('student already subscribed') }); 53 | check(res, { 'student has too many courses error': (r) => r && r.status === grpc.StatusInternal && r.error.message.includes('student has too many courses') }); 54 | check(res, { 'course no seats available error': (r) => r && r.status === grpc.StatusInternal && r.error.message.includes('no seats available') }); 55 | check(res, { 'concurrent modification error': (r) => r && r.status === grpc.StatusInternal && r.error.message.includes('concurrent modification error') }); 56 | client.close(); 57 | } 58 | -------------------------------------------------------------------------------- /docs/docs/developer_journey/handle_concurrency_conflicts.md: -------------------------------------------------------------------------------- 1 | --- 2 | sidebar_position: 4 3 | --- 4 | 5 | # Handling Concurrecy 6 | 7 | If your system experiences high usage due to a rush of clients eager to use your new, shiny coupon feature, conflicts may arise because coupons will be concurrently utilized by multiple users. In Disintegrate, this means the `Coupon` state might become outdated for some users when decisions are made, leading to conflicts that need to be resolved by trying the decision again later. However, such challenges are not unique to Disintegrate and are also encountered in traditional aggregate-based systems. 8 | 9 | Various approaches exist to address this situation. One option is to introduce a lock mechanism that grants exclusive access to Coupon resources, allowing only one client at a time to decrement the counter. Alternatively, employing a queue can ensure commands with the same coupon ID are executed sequentially, either through dedicated queues for each coupon ID or by grouping commands with the same coupon into partitions if Kafka is utilized. 10 | 11 | Another solution involves allowing overbooking, if permissible by your business model. This approach simplifies the system by permitting a limited amount of overbooking. In Disintegrate, implementing this is straightforward: using the `ValidationQuery` to exclude `CouponApplied` events during conflict checking before decision is commited. While still verifying availability in decisions by loading Coupon state, the system does not check for Coupon state staleness. Consequently, concurrent operations may proceed through conflict checks and commit decisions even when the coupon is no longer available. The following example shows you how to exclude `CouponApplied` from the conflict validation and accept overbooking of coupons: 12 | 13 | ```rust 14 | impl Decision for ApplyCoupon { 15 | type Event = DomainEvent; 16 | type StateQuery = (Cart, Coupon); 17 | type Error = CartError; 18 | 19 | fn state_query(&self) -> Self::StateQuery { 20 | (Cart::new(&self.user_id), Coupon::new(&self.coupon_id)) 21 | } 22 | 23 | fn process(&self, (cart, coupon): &Self::StateQuery) -> Result, Self::Error> { 24 | // business logic 25 | todo!() 26 | } 27 | 28 | fn validation_query(&self) -> Option> { 29 | let (cart, coupon) = self.state_query(); 30 | // the validation query is the union of the two state queries used by the decision 31 | Some(union!( 32 | //the original cart state query will be used to validate the decision against user's cart changes. 33 | &cart, 34 | // exclude the `AppliedCoupon` event from the coupon state query to allow some overbooking. 35 | coupon.exclude_events(event_types!(DomainEvent, [CouponApplied])) 36 | )) 37 | } 38 | } 39 | ``` -------------------------------------------------------------------------------- /docs/docusaurus.config.js: -------------------------------------------------------------------------------- 1 | // @ts-check 2 | // `@type` JSDoc annotations allow editor autocompletion and type checking 3 | // (when paired with `@ts-check`). 4 | // There are various equivalent ways to declare your Docusaurus config. 5 | // See: https://docusaurus.io/docs/api/docusaurus-config 6 | 7 | import {themes as prismThemes} from 'prism-react-renderer'; 8 | 9 | /** @type {import('@docusaurus/types').Config} */ 10 | const config = { 11 | title: 'Disintegrate', 12 | tagline: 'Disintegrate Rust library', 13 | favicon: 'img/favicon.ico', 14 | // Set the production url of your site here 15 | url: 'https://disintegrate-es.github.io', 16 | // Set the // pathname under which your site is served 17 | // For GitHub pages deployment, it is often '//' 18 | baseUrl: '/disintegrate/', 19 | // GitHub pages deployment config. 20 | // If you aren't using GitHub pages, you don't need these. 21 | organizationName: 'disintegrate-es', // Usually your GitHub org/user name. 22 | projectName: 'disintegrate', // Usually your repo name. 23 | trailingSlash: false, 24 | 25 | onBrokenLinks: 'throw', 26 | onBrokenMarkdownLinks: 'warn', 27 | 28 | // Even if you don't use internationalization, you can use this field to set 29 | // useful metadata like html lang. For example, if your site is Chinese, you 30 | // may want to replace "en" with "zh-Hans". 31 | i18n: { 32 | defaultLocale: 'en', 33 | locales: ['en'], 34 | }, 35 | presets: [ 36 | [ 37 | 'classic', 38 | /** @type {import('@docusaurus/preset-classic').Options} */ 39 | ({ 40 | docs: { 41 | routeBasePath: '/', 42 | sidebarPath: './sidebars.js', 43 | }, 44 | blog: false, 45 | theme: { 46 | customCss: './src/css/custom.css', 47 | }, 48 | }), 49 | ], 50 | ], 51 | // markdown plugins 52 | markdown: { 53 | mermaid: true, 54 | }, 55 | themes: ['@docusaurus/theme-mermaid'], 56 | themeConfig: 57 | /** @type {import('@docusaurus/preset-classic').ThemeConfig} */ 58 | ({ 59 | // Replace with your project's social card 60 | image: 'img/docusaurus-social-card.jpg', 61 | navbar: { 62 | title: 'Disintegrate', 63 | logo: { 64 | alt: 'Disintegrate Logo', 65 | src: 'img/logo.svg', 66 | }, 67 | items: [ 68 | { 69 | href: 'https://github.com/disintegrate-es/disintegrate', 70 | label: 'GitHub', 71 | position: 'right', 72 | }, 73 | ], 74 | }, 75 | footer: { 76 | style: 'dark', 77 | links: [], 78 | copyright: `Copyright © ${new Date().getFullYear()} Disintegrate, Inc. Built with Docusaurus.`, 79 | }, 80 | prism: { 81 | theme: prismThemes.github, 82 | darkTheme: prismThemes.dracula, 83 | }, 84 | }), 85 | }; 86 | 87 | export default config; 88 | -------------------------------------------------------------------------------- /docs/docs/stream_query.md: -------------------------------------------------------------------------------- 1 | --- 2 | sidebar_position: 3 3 | --- 4 | 5 | # Stream Query 6 | 7 | A stream query allows querying events from the event store. When using Disentegrate, you can write a stream query and submit it to the event store to retrieve events. Alternatively, if you define a state query by deriving `StateQuery`, the library will generate a stream query for you. This query selects events from the specified stream and utilizes annotated id fields. 8 | 9 | For example: 10 | 11 | ```rust 12 | #[derive(Debug, StateQuery, Clone, Serialize, Deserialize)] 13 | #[state_query(CourseEvent)] 14 | pub struct Course { 15 | #[id] 16 | course_id: CourseId, 17 | name: String, 18 | created: bool, 19 | closed: bool, 20 | } 21 | ``` 22 | 23 | Under the hood, the library generates a stream query that fetches all events from the `CourseEvent` stream, filtering only those with the `course_id` specified in an instance of the `Course` struct. 24 | 25 | How does it work if the stream has more than one ID, and you specify only a subset? 26 | 27 | If the events included in your stream have multiple IDs, filtering for only a subset of those IDs will result in the query retrieving all the events that match the specified IDs while ignoring the others. For instance, if we filter only for the `course_id`, but the event `StudentSubscribed` has the `student_id`, it will still be selected if the `course_id` matches the one specified in the query. 28 | 29 | ## Multi State query 30 | 31 | Disintegrate automatically implements `StateQuery` for a tuple of `StateQuery`. The stream query of the tuple comprises the union of all its queries: the library retrieves all the queried events and mutates the `StateQuery`s in the tuple based on the specified filters. This feature is particularly useful for reusing the same query for multiple `Decision`s by combining shared `StateQuery`s in complex queries. 32 | 33 | ```rust 34 | #[derive(Default, StateQuery, Clone, Serialize, Deserialize)] 35 | #[state_query(CartEvent)] 36 | pub struct Cart { 37 | #[id] 38 | user_id: String, 39 | items: HashSet, 40 | applied_coupon: Option, 41 | } 42 | 43 | impl Cart { 44 | pub fn new(user_id: &str) -> Self { 45 | Self { 46 | user_id: user_id.into(), 47 | ..Default::default() 48 | } 49 | } 50 | } 51 | 52 | #[derive(Default, StateQuery, Clone, Serialize, Deserialize)] 53 | #[state_query(CouponEvent)] 54 | pub struct Coupon { 55 | #[id] 56 | coupon_id: String, 57 | quantity: u32, 58 | } 59 | 60 | impl Coupon { 61 | pub fn new(coupon_id: &str) -> Self { 62 | Self { 63 | coupon_id: coupon_id.to_string(), 64 | ..Default::default() 65 | } 66 | } 67 | } 68 | 69 | fn state_query(&self) -> Self::StateQuery { 70 | // returns a MultiStateQuery wich is the union of the Cart and Coupon `StateQuery`s 71 | (Cart::new(&self.user_id), Coupon::new(&self.coupon_id)) 72 | } 73 | ``` 74 | -------------------------------------------------------------------------------- /disintegrate-serde/src/serde/prost.rs: -------------------------------------------------------------------------------- 1 | //! A Protobuf serialization and deserialization module using Prost. 2 | //! 3 | //! This module provides the capability to serialize and deserialize data using the Prost library. 4 | use std::marker::PhantomData; 5 | 6 | use prost::{bytes::Bytes, Message}; 7 | 8 | use super::Error; 9 | use crate::serde::{Deserializer, Serializer}; 10 | 11 | /// A struct to serialize and deserialize Protobuf payloads. 12 | #[derive(Debug, Clone, Copy)] 13 | pub struct Prost(PhantomData, PhantomData) 14 | where 15 | O: Message; 16 | 17 | impl Prost 18 | where 19 | O: Message, 20 | { 21 | /// Creates a new instance of the `ProstSerde` module. 22 | pub fn new() -> Self { 23 | Self(PhantomData, PhantomData) 24 | } 25 | } 26 | 27 | impl Default for Prost 28 | where 29 | O: Message, 30 | { 31 | fn default() -> Self { 32 | Prost::new() 33 | } 34 | } 35 | 36 | impl Serializer for Prost 37 | where 38 | O: From + Message, 39 | { 40 | /// Serializes the given value to Protobuf-encoded bytes. 41 | /// 42 | /// # Arguments 43 | /// 44 | /// * `value` - The value to be serialized. 45 | /// 46 | /// # Returns 47 | /// 48 | /// Serialized bytes representing the value in Protobuf format. 49 | fn serialize(&self, value: I) -> Vec { 50 | let target = O::from(value); 51 | target.encode_to_vec() 52 | } 53 | } 54 | 55 | impl Deserializer for Prost 56 | where 57 | I: TryFrom, 58 | O: Message + Default, 59 | { 60 | /// Deserializes the given Protobuf-encoded bytes to produce a value of type `I`. 61 | /// 62 | /// # Arguments 63 | /// 64 | /// * `data` - The Protobuf-encoded bytes to be deserialized. 65 | /// 66 | /// # Returns 67 | /// 68 | /// A `Result` containing the deserialized value on success, or an error on failure. 69 | fn deserialize(&self, data: Vec) -> Result { 70 | let buf = Bytes::from(data); 71 | 72 | let target = O::decode(buf).map_err(|e| Error::Deserialization(Box::new(e)))?; 73 | I::try_from(target).map_err(|_| Error::Conversion) 74 | } 75 | } 76 | 77 | #[cfg(test)] 78 | mod tests { 79 | use super::*; 80 | use prost::Message; 81 | 82 | #[derive(PartialEq, Message, Clone)] 83 | struct Person { 84 | #[prost(string, tag = "1")] 85 | name: String, 86 | #[prost(uint32, tag = "2")] 87 | age: u32, 88 | } 89 | 90 | #[test] 91 | fn it_serialize_and_deserialize_prost_data() { 92 | let serde_module = Prost::::new(); 93 | 94 | let person = Person { 95 | name: String::from("Some name"), 96 | age: 30, 97 | }; 98 | 99 | // Serialize the person to bytes 100 | let serialized_data = serde_module.serialize(person.clone()); 101 | 102 | // Deserialize the bytes back to a person 103 | let deserialized_person = serde_module.deserialize(serialized_data).unwrap(); 104 | 105 | // Verify that the deserialized person matches the original person 106 | assert_eq!(person, deserialized_person); 107 | } 108 | } 109 | -------------------------------------------------------------------------------- /examples/courses/src/main.rs: -------------------------------------------------------------------------------- 1 | use std::time::Duration; 2 | 3 | use anyhow::{anyhow, Ok, Result}; 4 | use application::Application; 5 | use disintegrate::{serde::prost::Prost, WithSnapshot}; 6 | use disintegrate_postgres::{PgEventListener, PgEventListenerConfig, PgEventStore, PgSnapshotter}; 7 | use sqlx::{postgres::PgConnectOptions, PgPool}; 8 | use tokio::signal; 9 | use tracing_subscriber::{self, fmt::format::FmtSpan}; 10 | 11 | use courses::{application, domain::DomainEvent, grpc, proto, read_model}; 12 | 13 | type EventStore = PgEventStore>; 14 | 15 | #[tokio::main] 16 | async fn main() -> Result<()> { 17 | dotenv::dotenv().unwrap(); 18 | 19 | tracing_subscriber::fmt() 20 | .with_span_events(FmtSpan::NEW | FmtSpan::CLOSE) 21 | .init(); 22 | 23 | let pool = PgPool::connect_with(PgConnectOptions::new()).await?; 24 | let serde = Prost::::default(); 25 | let event_store = PgEventStore::new(pool.clone(), serde).await?; 26 | let snapshotter = PgSnapshotter::new(pool.clone(), 10).await?; 27 | let decision_maker = 28 | disintegrate_postgres::decision_maker(event_store.clone(), WithSnapshot::new(snapshotter)); 29 | 30 | let read_model = read_model::Repository::new(pool.clone()); 31 | let app = Application::new(decision_maker, read_model); 32 | 33 | tokio::try_join!(grpc_server(app), event_listener(pool, event_store))?; 34 | Ok(()) 35 | } 36 | 37 | async fn grpc_server(app: Application) -> Result<()> { 38 | let addr = "0.0.0.0:10437" 39 | .parse() 40 | .map_err(|e| anyhow!("failed to parse grpc address: {}", e))?; 41 | 42 | let (_, health_svc) = tonic_health::server::health_reporter(); 43 | 44 | let reflection_svc = tonic_reflection::server::Builder::configure() 45 | .register_encoded_file_descriptor_set(proto::FILE_DESCRIPTOR_SET) 46 | .register_encoded_file_descriptor_set(tonic_health::pb::FILE_DESCRIPTOR_SET) 47 | .build_v1() 48 | .map_err(|e| anyhow!("failed to build grpc reflection service: {}", e))?; 49 | 50 | let course_svc = proto::course_server::CourseServer::new(grpc::CourseApi::new(app.clone())); 51 | 52 | let student_svc = proto::student_server::StudentServer::new(grpc::StudentApi::new(app.clone())); 53 | 54 | let subscription_svc = 55 | proto::subscription_server::SubscriptionServer::new(grpc::SubscriptionApi::new(app)); 56 | 57 | tonic::transport::Server::builder() 58 | .add_service(health_svc) 59 | .add_service(reflection_svc) 60 | .add_service(course_svc) 61 | .add_service(student_svc) 62 | .add_service(subscription_svc) 63 | .serve_with_shutdown(addr, shutdown()) 64 | .await 65 | .map_err(|e| anyhow!("tonic server exited with error: {}", e))?; 66 | Ok(()) 67 | } 68 | 69 | async fn event_listener(pool: sqlx::PgPool, event_store: EventStore) -> Result<()> { 70 | PgEventListener::builder(event_store) 71 | .register_listener( 72 | read_model::ReadModelProjection::new(pool).await?, 73 | PgEventListenerConfig::poller(Duration::from_secs(5)).with_notifier(), 74 | ) 75 | .start_with_shutdown(shutdown()) 76 | .await 77 | .map_err(|e| anyhow!("event listener exited with error: {}", e))?; 78 | Ok(()) 79 | } 80 | 81 | async fn shutdown() { 82 | signal::ctrl_c().await.expect("failed to listen for event"); 83 | } 84 | -------------------------------------------------------------------------------- /examples/courses/src/domain/unsubscription.rs: -------------------------------------------------------------------------------- 1 | use disintegrate::{Decision, StateMutate, StateQuery}; 2 | use serde::{Deserialize, Serialize}; 3 | 4 | use super::{CourseId, DomainEvent, StudentId, UnsubscriptionEvent}; 5 | 6 | #[derive(Debug, Clone, PartialEq, Eq, thiserror::Error)] 7 | pub enum UnsubscriptionError { 8 | #[error("student not subscribed")] 9 | StudentNotSubscribed, 10 | } 11 | 12 | #[derive(Debug, Clone, StateQuery, Default, Serialize, Deserialize)] 13 | #[state_query(UnsubscriptionEvent)] 14 | pub struct Unsubscription { 15 | #[id] 16 | course_id: CourseId, 17 | #[id] 18 | student_id: StudentId, 19 | student_subscribed: bool, 20 | } 21 | impl Unsubscription { 22 | pub fn new(student_id: StudentId, course_id: CourseId) -> Self { 23 | Self { 24 | student_id, 25 | course_id, 26 | student_subscribed: false, 27 | } 28 | } 29 | } 30 | 31 | impl StateMutate for Unsubscription { 32 | fn mutate(&mut self, event: Self::Event) { 33 | match event { 34 | UnsubscriptionEvent::StudentSubscribed { .. } => { 35 | self.student_subscribed = true; 36 | } 37 | UnsubscriptionEvent::StudentUnsubscribed { .. } => { 38 | self.student_subscribed = false; 39 | } 40 | } 41 | } 42 | } 43 | 44 | #[derive(Debug)] 45 | pub struct UnsubscribeStudent { 46 | pub student_id: StudentId, 47 | pub course_id: CourseId, 48 | } 49 | 50 | impl UnsubscribeStudent { 51 | pub fn new(student_id: StudentId, course_id: CourseId) -> Self { 52 | Self { 53 | student_id, 54 | course_id, 55 | } 56 | } 57 | } 58 | 59 | impl Decision for UnsubscribeStudent { 60 | type Event = DomainEvent; 61 | 62 | type StateQuery = Unsubscription; 63 | 64 | type Error = UnsubscriptionError; 65 | 66 | fn state_query(&self) -> Self::StateQuery { 67 | Unsubscription::new(self.student_id.clone(), self.course_id.clone()) 68 | } 69 | 70 | fn process(&self, state: &Self::StateQuery) -> Result, Self::Error> { 71 | if !state.student_subscribed { 72 | return Err(UnsubscriptionError::StudentNotSubscribed); 73 | } 74 | 75 | Ok(vec![DomainEvent::StudentUnsubscribed { 76 | course_id: self.course_id.clone(), 77 | student_id: self.student_id.clone(), 78 | }]) 79 | } 80 | } 81 | 82 | #[cfg(test)] 83 | mod test { 84 | use super::*; 85 | 86 | #[test] 87 | fn it_unsubscribes_a_student() { 88 | disintegrate::TestHarness::given([DomainEvent::StudentSubscribed { 89 | student_id: "some student".to_string(), 90 | course_id: "some course".to_string(), 91 | }]) 92 | .when(UnsubscribeStudent::new( 93 | "some student".into(), 94 | "some course".into(), 95 | )) 96 | .then([DomainEvent::StudentUnsubscribed { 97 | course_id: "some course".into(), 98 | student_id: "some student".into(), 99 | }]); 100 | } 101 | 102 | #[test] 103 | fn it_should_not_unsubscribe_a_student_not_subscribed() { 104 | disintegrate::TestHarness::given([]) 105 | .when(UnsubscribeStudent::new( 106 | "some student".into(), 107 | "some course".into(), 108 | )) 109 | .then_err(UnsubscriptionError::StudentNotSubscribed); 110 | } 111 | } 112 | -------------------------------------------------------------------------------- /examples/courses/src/serde.rs: -------------------------------------------------------------------------------- 1 | use crate::{domain::DomainEvent, proto, proto::Event as ProtoDomainEvent}; 2 | 3 | impl From for ProtoDomainEvent { 4 | fn from(event: DomainEvent) -> Self { 5 | Self { 6 | event: Some(match event { 7 | DomainEvent::CourseCreated { 8 | course_id, 9 | name, 10 | seats, 11 | } => proto::event::Event::CourseCreated(proto::event::CourseCreated { 12 | course_id, 13 | name, 14 | seats, 15 | }), 16 | DomainEvent::CourseClosed { course_id } => { 17 | proto::event::Event::CourseClosed(proto::event::CourseClosed { course_id }) 18 | } 19 | DomainEvent::CourseRenamed { course_id, name } => { 20 | proto::event::Event::CourseRenamed(proto::event::CourseRenamed { 21 | course_id, 22 | name, 23 | }) 24 | } 25 | DomainEvent::StudentRegistered { student_id, name } => { 26 | proto::event::Event::StudentRegistered(proto::event::StudentRegistered { 27 | student_id, 28 | name, 29 | }) 30 | } 31 | DomainEvent::StudentSubscribed { 32 | student_id, 33 | course_id, 34 | } => proto::event::Event::StudentSubscribed(proto::event::StudentSubscribed { 35 | student_id, 36 | course_id, 37 | }), 38 | DomainEvent::StudentUnsubscribed { 39 | student_id, 40 | course_id, 41 | } => proto::event::Event::StudentUnsubscribed(proto::event::StudentUnsubscribed { 42 | student_id, 43 | course_id, 44 | }), 45 | }), 46 | } 47 | } 48 | } 49 | 50 | impl From for DomainEvent { 51 | fn from(proto: ProtoDomainEvent) -> Self { 52 | match proto.event.expect("event is a required field") { 53 | proto::event::Event::CourseCreated(proto::event::CourseCreated { 54 | course_id, 55 | name, 56 | seats, 57 | }) => DomainEvent::CourseCreated { 58 | course_id, 59 | name, 60 | seats, 61 | }, 62 | proto::event::Event::CourseClosed(proto::event::CourseClosed { course_id }) => { 63 | DomainEvent::CourseClosed { course_id } 64 | } 65 | proto::event::Event::CourseRenamed(proto::event::CourseRenamed { course_id, name }) => { 66 | DomainEvent::CourseRenamed { course_id, name } 67 | } 68 | proto::event::Event::StudentRegistered(proto::event::StudentRegistered { 69 | student_id, 70 | name, 71 | }) => DomainEvent::StudentRegistered { student_id, name }, 72 | proto::event::Event::StudentSubscribed(proto::event::StudentSubscribed { 73 | student_id, 74 | course_id, 75 | }) => DomainEvent::StudentSubscribed { 76 | student_id, 77 | course_id, 78 | }, 79 | proto::event::Event::StudentUnsubscribed(proto::event::StudentUnsubscribed { 80 | student_id, 81 | course_id, 82 | }) => DomainEvent::StudentUnsubscribed { 83 | student_id, 84 | course_id, 85 | }, 86 | } 87 | } 88 | } 89 | -------------------------------------------------------------------------------- /examples/courses/src/domain/student.rs: -------------------------------------------------------------------------------- 1 | use disintegrate::{Decision, StateMutate, StateQuery}; 2 | use serde::{Deserialize, Serialize}; 3 | 4 | use super::{DomainEvent, StudentEvent}; 5 | 6 | pub type StudentId = String; 7 | 8 | #[derive(Debug, Clone, PartialEq, Eq, thiserror::Error)] 9 | pub enum StudentError { 10 | #[error("not found")] 11 | NotFound, 12 | #[error("already registered")] 13 | AlreadyRegistered, 14 | #[error("name empty")] 15 | NameEmpty, 16 | } 17 | 18 | #[derive(Debug, StateQuery, Clone, Serialize, Deserialize)] 19 | #[state_query(StudentEvent)] 20 | pub struct Student { 21 | #[id] 22 | student_id: StudentId, 23 | name: String, 24 | registered: bool, 25 | } 26 | 27 | impl Student { 28 | pub fn new(student_id: StudentId) -> Self { 29 | Self { 30 | student_id, 31 | name: "".to_string(), 32 | registered: false, 33 | } 34 | } 35 | 36 | pub fn register(&self, name: &str) -> Result, StudentError> { 37 | if self.registered { 38 | return Err(StudentError::AlreadyRegistered); 39 | } 40 | if name.is_empty() { 41 | return Err(StudentError::NameEmpty); 42 | } 43 | 44 | Ok(vec![StudentEvent::StudentRegistered { 45 | student_id: self.student_id.clone(), 46 | name: name.into(), 47 | }]) 48 | } 49 | } 50 | 51 | impl StateMutate for Student { 52 | fn mutate(&mut self, event: Self::Event) { 53 | match event { 54 | StudentEvent::StudentRegistered { name, .. } => { 55 | self.registered = true; 56 | self.name = name; 57 | } 58 | } 59 | } 60 | } 61 | 62 | #[derive(Debug)] 63 | pub struct RegisterStudent { 64 | pub student_id: StudentId, 65 | pub name: String, 66 | } 67 | 68 | impl RegisterStudent { 69 | pub fn new(student_id: StudentId, name: String) -> Self { 70 | Self { student_id, name } 71 | } 72 | } 73 | 74 | impl Decision for RegisterStudent { 75 | type Event = DomainEvent; 76 | 77 | type StateQuery = Student; 78 | 79 | type Error = StudentError; 80 | 81 | fn state_query(&self) -> Self::StateQuery { 82 | Student::new(self.student_id.clone()) 83 | } 84 | 85 | fn process(&self, state: &Self::StateQuery) -> Result, Self::Error> { 86 | if state.registered { 87 | return Err(StudentError::AlreadyRegistered); 88 | } 89 | if self.name.is_empty() { 90 | return Err(StudentError::NameEmpty); 91 | } 92 | 93 | Ok(vec![DomainEvent::StudentRegistered { 94 | student_id: self.student_id.clone(), 95 | name: self.name.clone(), 96 | }]) 97 | } 98 | } 99 | 100 | #[cfg(test)] 101 | mod test { 102 | use super::*; 103 | 104 | #[test] 105 | fn it_registers_a_new_student() { 106 | disintegrate::TestHarness::given([]) 107 | .when(RegisterStudent::new("1".into(), "some name".to_string())) 108 | .then([DomainEvent::StudentRegistered { 109 | student_id: "1".into(), 110 | name: "some name".into(), 111 | }]); 112 | } 113 | 114 | #[test] 115 | fn it_should_not_register_a_student_when_it_already_exists() { 116 | disintegrate::TestHarness::given([DomainEvent::StudentRegistered { 117 | student_id: "1".into(), 118 | name: "some name".into(), 119 | }]) 120 | .when(RegisterStudent::new("1".into(), "some name".to_string())) 121 | .then_err(StudentError::AlreadyRegistered); 122 | } 123 | } 124 | -------------------------------------------------------------------------------- /disintegrate/src/domain_identifier.rs: -------------------------------------------------------------------------------- 1 | //! Domain identifier represents an ID associated to a domain entity 2 | //! 3 | //! This module provides types and utilities for managing and manipulating domain identifiers. 4 | //! 5 | //! # Examples 6 | //! 7 | //! Creating a `DomainIdentifierSet` with two domain identifiers: 8 | //! 9 | //! ``` 10 | //! use disintegrate::{DomainIdentifier, DomainIdentifierSet, Identifier, domain_identifiers, IntoIdentifierValue}; 11 | //! 12 | //! // Create domain identifiers 13 | //! let identifier1 = Identifier::new("id1").unwrap(); 14 | //! let identifier2 = Identifier::new("id2").unwrap(); 15 | //! 16 | //! // Create a DomainIdentifierSet 17 | //! let mut identifier_set = domain_identifiers! { 18 | //! id1: "value1", id2: "value2" 19 | //! }; 20 | //! 21 | //! // Insert a new domain identifier 22 | //! let new_identifier = DomainIdentifier { 23 | //! key: Identifier::new("id3").unwrap(), 24 | //! value: "value3".into_identifier_value(), 25 | //! }; 26 | //! identifier_set.insert(new_identifier); 27 | //! 28 | //! // Access domain identifiers 29 | //! assert_eq!(identifier_set.len(), 3); 30 | //! assert_eq!(identifier_set.get(&identifier1), Some("value1".into_identifier_value()).as_ref()); 31 | //! assert_eq!(identifier_set.get(&identifier2), Some("value2".into_identifier_value()).as_ref()); 32 | //! 33 | //! // Iterate over domain identifiers 34 | //! for (key, value) in &*identifier_set { 35 | //! println!("Identifier: {}, Value: {}", key, value); 36 | //! } 37 | //! ``` 38 | use crate::{Identifier, IdentifierValue}; 39 | use std::{collections::BTreeMap, ops::Deref}; 40 | 41 | /// Represents a key-value pair of domain identifiers. 42 | /// 43 | /// The `DomainIdentifier` struct is used to associate a specific `Identifier` key with a corresponding value. 44 | #[derive(Debug, PartialEq, Eq, Clone)] 45 | pub struct DomainIdentifier { 46 | pub key: Identifier, 47 | pub value: IdentifierValue, 48 | } 49 | 50 | /// A set of domain identifiers, represented as a map of `Identifier` keys and values. 51 | /// 52 | /// The `DomainIdentifierSet` struct is used to store a collection of domain identifiers. 53 | #[derive(Debug, Default, PartialEq, Eq, Clone)] 54 | pub struct DomainIdentifierSet(BTreeMap); 55 | 56 | impl DomainIdentifierSet { 57 | /// Creates a new `DomainIdentifierSet` with the given `BTreeMap` of domain identifiers. 58 | pub fn new(domain_identifiers: BTreeMap) -> Self { 59 | Self(domain_identifiers) 60 | } 61 | 62 | /// Inserts a new `DomainIdentifier` into the set. 63 | pub fn insert(&mut self, DomainIdentifier { key, value }: DomainIdentifier) { 64 | self.0.insert(key, value); 65 | } 66 | } 67 | 68 | /// Implements the `Deref` trait for `DomainIdentifierSet`, allowing it to be dereferenced to a `HashMap`. 69 | /// This enables transparent access to the underlying `BTreeMap` of domain identifiers. 70 | impl Deref for DomainIdentifierSet { 71 | type Target = BTreeMap; 72 | 73 | fn deref(&self) -> &Self::Target { 74 | &self.0 75 | } 76 | } 77 | 78 | /// Creates a domain identifiers set. 79 | #[macro_export] 80 | macro_rules! domain_identifiers{ 81 | {}=> { 82 | $crate::DomainIdentifierSet::default() 83 | }; 84 | {$($key:ident: $value:expr),*} => {{ 85 | #[allow(unused_mut)] 86 | let mut domain_identifiers = std::collections::BTreeMap::<$crate::Identifier, $crate::IdentifierValue>::new(); 87 | $(domain_identifiers.insert($crate::ident!(#$key), $crate::IntoIdentifierValue::into_identifier_value($value.clone()));)* 88 | $crate::DomainIdentifierSet::new(domain_identifiers) 89 | }}; 90 | } 91 | -------------------------------------------------------------------------------- /disintegrate/src/event_store.rs: -------------------------------------------------------------------------------- 1 | //! Event store is responsible for storing and retrieving events. 2 | //! 3 | //! It is designed to be implemented by different storage backends, such as databases 4 | //! or distributed event sourcing systems. Implementations of this trait should handle event persistence, querying, 5 | //! and conflict resolution in a way that aligns with the specific requirements and constraints of the underlying 6 | //! storage system. 7 | //! 8 | //! For more details and specific implementations, refer to the trait documentation and individual implementations 9 | //! of the `EventStore` trait. 10 | use crate::{ 11 | event::{Event, EventId, PersistedEvent}, 12 | stream_query::StreamQuery, 13 | }; 14 | 15 | use async_trait::async_trait; 16 | use futures::stream::BoxStream; 17 | use std::error::Error as StdError; 18 | /// An event store. 19 | /// 20 | /// This trait provides methods for streaming events and appending events to the event store. 21 | #[async_trait] 22 | pub trait EventStore 23 | where 24 | ID: EventId, 25 | E: Event + Send + Sync, 26 | { 27 | type Error: Send + Sync; 28 | 29 | // Streams events based on the provided query. 30 | /// 31 | /// # Arguments 32 | /// 33 | /// * `query` - The stream query specifying the filtering conditions. 34 | /// 35 | /// # Returns 36 | /// 37 | /// A `Result` containing a boxed stream of `PersistedEvent` matching the query, or an error. 38 | fn stream<'a, QE>( 39 | &'a self, 40 | query: &'a StreamQuery, 41 | ) -> BoxStream<'a, Result, Self::Error>> 42 | where 43 | QE: TryFrom + Event + 'static + Clone + Send + Sync, 44 | >::Error: StdError + 'static + Send + Sync; 45 | 46 | /// Appends a batch of events to the event store. 47 | /// 48 | /// # Arguments 49 | /// 50 | /// * `events` - A vector of events to append to the event store. 51 | /// * `query` - The stream query associated with the appended events. 52 | /// * `last_event_id` - The ID of the last event in the event stream that was queried before appending. 53 | /// 54 | /// # Returns 55 | /// 56 | /// A `Result` containing a vector of `PersistedEvent` representing the appended events, or an error. 57 | /// 58 | /// # Notes 59 | /// 60 | /// The `append` method re-executes the `query` and checks if there are new events between the `last_event_id` 61 | /// queried and the appended events' IDs. If new events are found, a conflict has occurred, and the conflict 62 | /// handling mechanism should be implemented accordingly. 63 | async fn append( 64 | &self, 65 | events: Vec, 66 | query: StreamQuery, 67 | last_event_id: ID, 68 | ) -> Result>, Self::Error> 69 | where 70 | E: Clone + 'async_trait, 71 | QE: Event + 'static + Clone + Send + Sync; 72 | 73 | /// Appends a batch of events to the event store **without** verifying if 74 | /// new events have been added since the last read. 75 | /// 76 | /// This method is useful when you are certain that no other process 77 | /// has modified the event store in a way that would make your logic stale. 78 | /// 79 | /// If you need to guarantee that no duplicate events are added, 80 | /// use the `append` method instead, providing a query that ensures uniqueness. 81 | /// 82 | /// # Arguments 83 | /// 84 | /// * `events` - A vector of events to append to the event store. 85 | /// 86 | ///# Returns 87 | /// 88 | /// A `Result` containing a vector of `PersistedEvent` representing the appended events, or an error. 89 | async fn append_without_validation( 90 | &self, 91 | events: Vec, 92 | ) -> Result>, Self::Error> 93 | where 94 | E: Clone + 'async_trait; 95 | } 96 | -------------------------------------------------------------------------------- /examples/banking/src/main.rs: -------------------------------------------------------------------------------- 1 | mod domain; 2 | 3 | use actix_web::{ 4 | error, 5 | http::{header::ContentType, StatusCode}, 6 | post, 7 | web::{Data, Json, Path}, 8 | App, HttpResponse, HttpServer, Result, 9 | }; 10 | 11 | use disintegrate::WithSnapshot; 12 | use disintegrate_postgres::{PgDecisionMaker, PgEventStore, PgSnapshotter, WithPgSnapshot}; 13 | use domain::DomainEvent; 14 | use serde::{Deserialize, Serialize}; 15 | use sqlx::{postgres::PgConnectOptions, PgPool}; 16 | 17 | use crate::domain::{CloseAccount, DepositAmount, OpenAccount, SendMoney, WithdrawAmount}; 18 | 19 | type DecisionMaker = 20 | PgDecisionMaker, WithPgSnapshot>; 21 | #[derive(thiserror::Error, Debug)] 22 | #[error(transparent)] 23 | pub struct Error { 24 | #[from] 25 | source: disintegrate::DecisionError, 26 | } 27 | 28 | #[tokio::main] 29 | async fn main() -> anyhow::Result<()> { 30 | dotenv::dotenv().unwrap(); 31 | 32 | let connect_options = PgConnectOptions::new(); 33 | let pool = PgPool::connect_with(connect_options).await?; 34 | 35 | let serde = disintegrate::serde::json::Json::::default(); 36 | let event_store = PgEventStore::new_uninitialized(pool.clone(), serde); 37 | let snapshotter = PgSnapshotter::new(pool, 10).await?; 38 | let decision_maker = 39 | disintegrate_postgres::decision_maker(event_store, WithSnapshot::new(snapshotter)); 40 | 41 | Ok(HttpServer::new(move || { 42 | App::new() 43 | .app_data(Data::new(decision_maker.clone())) 44 | .service(open_account) 45 | .service(close_account) 46 | .service(deposit) 47 | .service(withdraw) 48 | .service(transfer) 49 | }) 50 | .bind(("127.0.0.1", 8080))? 51 | .run() 52 | .await?) 53 | } 54 | 55 | #[derive(Serialize, Deserialize)] 56 | struct Amount { 57 | amount: i32, 58 | } 59 | 60 | #[post("/account/{id}/open")] 61 | async fn open_account( 62 | decision_maker: Data, 63 | id: Path, 64 | ) -> Result<&'static str, Error> { 65 | decision_maker.make(OpenAccount::new(*id)).await?; 66 | Ok("success!") 67 | } 68 | 69 | #[post("/account/{id}/deposit")] 70 | async fn deposit( 71 | decision_maker: Data, 72 | id: Path, 73 | data: Json, 74 | ) -> Result<&'static str, Error> { 75 | decision_maker 76 | .make(DepositAmount::new(*id, data.amount)) 77 | .await?; 78 | Ok("success!") 79 | } 80 | 81 | #[post("/account/{id}/withdraw")] 82 | async fn withdraw( 83 | decision_maker: Data, 84 | id: Path, 85 | data: Json, 86 | ) -> Result<&'static str, Error> { 87 | decision_maker 88 | .make(WithdrawAmount::new(*id, data.amount)) 89 | .await?; 90 | Ok("success!") 91 | } 92 | 93 | #[post("/account/{id}/close")] 94 | async fn close_account( 95 | decision_maker: Data, 96 | id: Path, 97 | ) -> Result<&'static str, Error> { 98 | decision_maker.make(CloseAccount::new(*id)).await?; 99 | Ok("success!") 100 | } 101 | 102 | #[post("account/{id}/transfer/{beneficiary_id}")] 103 | async fn transfer( 104 | decision_maker: Data, 105 | accounts: Path<(i64, i64)>, 106 | data: Json, 107 | ) -> Result<&'static str, Error> { 108 | decision_maker 109 | .make(SendMoney::new(accounts.0, accounts.1, data.amount)) 110 | .await?; 111 | Ok("success!") 112 | } 113 | 114 | impl error::ResponseError for Error { 115 | fn error_response(&self) -> HttpResponse { 116 | HttpResponse::build(self.status_code()) 117 | .insert_header(ContentType::html()) 118 | .body(self.to_string()) 119 | } 120 | 121 | fn status_code(&self) -> StatusCode { 122 | match self.source { 123 | disintegrate::DecisionError::Domain(_) => StatusCode::BAD_REQUEST, 124 | disintegrate::DecisionError::EventStore(_) => StatusCode::INTERNAL_SERVER_ERROR, 125 | disintegrate::DecisionError::StateStore(_) => StatusCode::INTERNAL_SERVER_ERROR, 126 | } 127 | } 128 | } 129 | -------------------------------------------------------------------------------- /disintegrate/src/event.rs: -------------------------------------------------------------------------------- 1 | //! Event represents an occurrence or action of interest within the system. 2 | //! 3 | //! This module defines the Event trait, which provides methods for retrieving domain identifiers associated 4 | //! with the event and getting the event's name. 5 | //! 6 | //! The PersistedEvent struct wraps an event and contains an ID assigned by the event store. It represents 7 | //! an event that has been persisted in the event store. 8 | use crate::{domain_identifier::DomainIdentifierSet, Identifier, IdentifierType}; 9 | use std::ops::Deref; 10 | 11 | /// Represents the ID of an event. 12 | pub trait EventId: 13 | Default + Copy + Clone + PartialEq + Eq + Ord + PartialOrd + Send + Sync + 'static 14 | { 15 | } 16 | 17 | impl EventId for Id where 18 | Id: Default + Copy + Clone + PartialEq + Eq + Ord + PartialOrd + Send + Sync + 'static 19 | { 20 | } 21 | 22 | /// Represents the schema of an event. 23 | /// 24 | /// The event info contains the name of the event and the domain identifiers associated with it. 25 | #[derive(Debug, PartialEq, Eq, Clone)] 26 | pub struct EventInfo { 27 | /// The name of the event. 28 | pub name: &'static str, 29 | /// The domain identifiers associated with the event. 30 | pub domain_identifiers: &'static [&'static Identifier], 31 | } 32 | 33 | impl EventInfo { 34 | /// Returns true if the event has the given domain identifier. 35 | pub fn has_domain_identifier(&self, ident: &Identifier) -> bool { 36 | self.domain_identifiers.contains(&ident) 37 | } 38 | } 39 | 40 | /// Represents the domain identifier and its type. 41 | #[derive(Debug, PartialEq, Eq, Clone, Copy)] 42 | pub struct DomainIdentifierInfo { 43 | /// The domain identifier. 44 | pub ident: Identifier, 45 | /// The type of the domain identifier. 46 | pub type_info: IdentifierType, 47 | } 48 | 49 | /// Represents the schema of all supported events. 50 | /// 51 | /// The schema contains the names of all supported events, 52 | /// the domain identifiers associated with them, and the domain identifiers' types. 53 | #[derive(Debug, Clone)] 54 | pub struct EventSchema { 55 | pub events: &'static [&'static str], 56 | pub events_info: &'static [&'static EventInfo], 57 | pub domain_identifiers: &'static [&'static DomainIdentifierInfo], 58 | } 59 | 60 | impl EventSchema { 61 | pub fn event_info(&self, name: &str) -> Option<&EventInfo> { 62 | self.events_info 63 | .iter() 64 | .find(|info| info.name == name) 65 | .copied() 66 | } 67 | } 68 | 69 | /// Represents an event in the event store. 70 | /// 71 | /// An event is an occurrence or action of interest within the system. It can be persisted and retrieved from 72 | /// the event store. The `Event` trait provides methods for retrieving domain identifiers associated with the event 73 | /// and getting the event's name. The constant `SCHEMA` holds the name and the domain identifiers of all supported events. 74 | pub trait Event { 75 | /// Returns the schema of all supported events. 76 | const SCHEMA: EventSchema; 77 | /// Retrieves the domain identifiers associated with the event. 78 | fn domain_identifiers(&self) -> DomainIdentifierSet; 79 | /// Retrieves the name of the event. 80 | fn name(&self) -> &'static str; 81 | } 82 | 83 | /// Wrapper for a persisted event. 84 | /// 85 | /// It contains an ID assigned by the event store and the event itself. 86 | #[derive(Debug, Clone)] 87 | pub struct PersistedEvent { 88 | pub(crate) id: ID, 89 | pub(crate) event: E, 90 | } 91 | 92 | impl PersistedEvent { 93 | /// Creates a new `PersistedEvent` instance with the given ID and event. 94 | pub fn new(id: ID, event: E) -> Self { 95 | Self { id, event } 96 | } 97 | 98 | /// Returns the inner event. 99 | pub fn into_inner(self) -> E { 100 | self.event 101 | } 102 | 103 | /// Retrieves the ID assigned by the event store for this persisted event. 104 | pub fn id(&self) -> ID { 105 | self.id 106 | } 107 | } 108 | 109 | impl Deref for PersistedEvent { 110 | type Target = E; 111 | 112 | fn deref(&self) -> &Self::Target { 113 | &self.event 114 | } 115 | } 116 | -------------------------------------------------------------------------------- /examples/courses/src/read_model.rs: -------------------------------------------------------------------------------- 1 | use crate::domain::{CourseId, DomainEvent}; 2 | use async_trait::async_trait; 3 | use disintegrate::{query, EventListener, PersistedEvent, StreamQuery}; 4 | use sqlx::{FromRow, PgPool}; 5 | 6 | #[derive(Clone)] 7 | pub struct Repository { 8 | pool: PgPool, 9 | } 10 | 11 | impl Repository { 12 | pub fn new(pool: PgPool) -> Self { 13 | Self { pool } 14 | } 15 | pub async fn course_by_id(&self, course_id: CourseId) -> Result, sqlx::Error> { 16 | sqlx::query_as::<_, Course>( 17 | "SELECT course_id, name, available_seats, closed FROM course WHERE course_id = $1", 18 | ) 19 | .bind(course_id) 20 | .fetch_optional(&self.pool) 21 | .await 22 | } 23 | } 24 | 25 | #[derive(FromRow)] 26 | pub struct Course { 27 | pub course_id: String, 28 | pub name: String, 29 | pub available_seats: i32, 30 | pub closed: bool, 31 | } 32 | 33 | pub struct ReadModelProjection { 34 | query: StreamQuery, 35 | pool: PgPool, 36 | } 37 | 38 | impl ReadModelProjection { 39 | pub async fn new(pool: PgPool) -> Result { 40 | sqlx::query( 41 | r#" 42 | CREATE TABLE IF NOT EXISTS course ( 43 | course_id TEXT PRIMARY KEY, 44 | name TEXT, 45 | available_seats INT, 46 | closed BOOL DEFAULT false, 47 | event_id BIGINT not null 48 | )"#, 49 | ) 50 | .execute(&pool) 51 | .await?; 52 | Ok(Self { 53 | query: query!(DomainEvent), 54 | pool, 55 | }) 56 | } 57 | } 58 | 59 | #[async_trait] 60 | impl EventListener for ReadModelProjection { 61 | type Error = sqlx::Error; 62 | fn id(&self) -> &'static str { 63 | "courses" 64 | } 65 | 66 | fn query(&self) -> &StreamQuery { 67 | &self.query 68 | } 69 | 70 | async fn handle(&self, event: PersistedEvent) -> Result<(), Self::Error> { 71 | let event_id = event.id(); 72 | match event.into_inner() { 73 | DomainEvent::CourseCreated { 74 | course_id, 75 | name, 76 | seats, 77 | } => { 78 | sqlx::query( 79 | "INSERT INTO course (course_id, name, available_seats, event_id) VALUES($1, $2, $3, $4) ON CONFLICT DO NOTHING", 80 | ) 81 | .bind(course_id) 82 | .bind(name) 83 | .bind(seats as i32) 84 | .bind(event_id) 85 | .execute(&self.pool) 86 | .await 87 | .unwrap(); 88 | } 89 | DomainEvent::CourseClosed { course_id } => { 90 | sqlx::query( 91 | "UPDATE course SET closed = true, event_id = $2 WHERE course_id = $1 and event_id < $2", 92 | ) 93 | .bind(course_id) 94 | .bind(event_id) 95 | .execute(&self.pool) 96 | .await 97 | .unwrap(); 98 | } 99 | DomainEvent::StudentSubscribed { course_id, .. } => { 100 | sqlx::query( 101 | "UPDATE course SET available_seats = available_seats - 1, event_id = $2 WHERE course_id = $1 and event_id < $2", 102 | ) 103 | .bind(course_id) 104 | .bind(event_id) 105 | .execute(&self.pool) 106 | .await 107 | .unwrap(); 108 | } 109 | DomainEvent::StudentUnsubscribed { course_id, .. } => { 110 | sqlx::query( 111 | "UPDATE course SET available_seats = available_seats + 1, event_id = $2 WHERE course_id = $1 and event_id < $2", 112 | ) 113 | .bind(course_id) 114 | .bind(event_id) 115 | .execute(&self.pool) 116 | .await 117 | .unwrap(); 118 | } 119 | DomainEvent::CourseRenamed { course_id, name } => { 120 | sqlx::query("UPDATE course SET name = $2, event_id = $3 WHERE course_id = $1 and event_id < $3") 121 | .bind(course_id) 122 | .bind(name) 123 | .bind(event_id) 124 | .execute(&self.pool) 125 | .await 126 | .unwrap(); 127 | } 128 | _ => {} 129 | } 130 | Ok(()) 131 | } 132 | } 133 | -------------------------------------------------------------------------------- /docs/docs/decision.md: -------------------------------------------------------------------------------- 1 | --- 2 | sidebar_position: 4 3 | --- 4 | 5 | # Decision 6 | 7 | `Decision` encapsulates a specific action or behavior triggered by external commands or events. To implement a `Decision`, developers must implement the `Decision` trait, which contains the following methods: 8 | * `state_query`:  A state query represents the current state of the system, derived from past events stored in the event store. It provides the necessary context for making decisions and serves as the input for decision logic. 9 | * `process`: It defines business logic based on the queried state, and returns a vector of events representing the changes to be applied to the system. 10 | * `validation_query`: This method provides an optional state query used to determine if the decision is still valid after new events have been applied to the system before writing the decision events. If this method is not implemented, the default implementation uses the state query returned by the state_query method. This ensures that the decision was taken using an updated state. However, sometimes you may want to define a validation query to improve performance by tailoring the validation scope. 11 | 12 | `Decision`s provide developers with a structured and scalable approach to implementing business logic. They enable: 13 | * Modularity: `Decision`s embody specific business logic, promoting modularity and enabling the segregation of concerns within the application architecture. This structured approach facilitates the maintenance of the system. 14 | * Testability: `Decision`s facilitate test-driven development (TDD) practices by defining clear boundaries for writing test cases and verifying behavior. 15 | 16 | ```rust 17 | pub struct WithdrawAmount { 18 | account_id: String, 19 | amount: u32, 20 | } 21 | 22 | impl WithdrawAmount { 23 | pub fn new(account_id: String, amount: u32) -> Self { 24 | Self { account_id, amount } 25 | } 26 | } 27 | 28 | impl Decision for WithdrawAmount { 29 | type Event = DomainEvent; 30 | type StateQuery = AccountState; 31 | type Error = AccountError; 32 | 33 | fn state_query(&self) -> Self::StateQuery { 34 | AccountState::new(&self.account_id) 35 | } 36 | 37 | fn process(&self, state: &Self::StateQuery) -> Result, Self::Error> { 38 | // Validate account balance and perform withdrawal logic 39 | // Construct and return events representing the changes 40 | } 41 | } 42 | ``` 43 | 44 | ## Developing a new Decision 45 | 46 | Before implementing a Decision, it's advisable to start by writing tests. Disintegrate offers the TestHarness, a utility for writing tests in a given-when-then style. This tool assists you in defining the business logic of your application following a Test-Driven Development (TDD) approach: 47 | 48 | ```rust 49 | #[test] 50 | fn it_withdraws_an_amount() { 51 | disintegrate::TestHarness::given([ 52 | DomainEvent::AccountOpened { 53 | account_id: "some account".into(), 54 | }, 55 | DomainEvent::AmountDeposited { 56 | account_id: "some account".into(), 57 | amount: 10, 58 | }, 59 | ]) 60 | .when(WithdrawAmount::new("some account".into(), 10)) 61 | .then([DomainEvent::AmountWithdrawn { 62 | account_id: "some account".into(), 63 | amount: 10, 64 | }]); 65 | } 66 | 67 | #[test] 68 | fn it_should_not_withdraw_an_amount_when_the_balance_is_insufficient() { 69 | disintegrate::TestHarness::given([ 70 | DomainEvent::AccountOpened { 71 | account_id: "some account".into(), 72 | }, 73 | DomainEvent::AmountDeposited { 74 | account_id: "some account".into(), 75 | amount: 10, 76 | }, 77 | DomainEvent::AmountWithdrawn { 78 | account_id: "some account".into(), 79 | amount: 26, 80 | }, 81 | ]) 82 | .when(WithdrawAmount::new("some account".into(), 5)) 83 | .then_err(Error::InsufficientBalance); 84 | } 85 | ``` 86 | 87 | ## Decision Maker 88 | 89 | `DecisionMaker` executes decisions and the persistence of resulting events into the event store. It acts as the orchestrator for applying business logic and updating the system state based on the decisions made. 90 | 91 | ```rust 92 | let decision_maker = disintegrate_postgres::decision_maker(event_store); 93 | decision_maker 94 | .make(WithdrawAmount::new(id, amount)) 95 | .await?; 96 | ``` 97 | 98 | In this example, the code shows the execution of the `WithdrawAmount` decision. 99 | 100 | -------------------------------------------------------------------------------- /docs/docs/developer_journey/add_new_feature.md: -------------------------------------------------------------------------------- 1 | --- 2 | sidebar_position: 3 3 | --- 4 | 5 | # Add New Features 6 | 7 | ## Old Approaches vs Disintegrate 8 | Traditional approaches to adding new features in a shopping cart application might involve tightly coupling components, making it challenging to extend or modify the system. However, by adopting Disintegrate, developers can leverage its modular and decoupled architecture to seamlessly integrate new features. 9 | 10 | ### Example: 11 | Consider the scenario where we aim to enhance our shopping cart application with a new feature: coupons. The business intends to provide customers with a limited number of coupons that they can apply to their purchases. However, traditional aggregate-based implementation of this feature presents challenges, primarily due to the coordination required between multiple aggregates. 12 | 13 | Let's imagine we already have a `ShoppingCart` aggregate in our system (there are various ways to design this system, but for simplicity's sake, let's assume we have this aggregate). Now, we could opt to introduce a new aggregate named Coupon to manage the availability of a coupon identified by its unique ID. In the following section, we'll delve into the challenges posed by this approach and how Disintegrate simplifies the integration of this new feature. 14 | 15 | ## Challenges with Aggregates 16 | To implement coupons using aggregates, we may introduce a new aggregate for coupons to track their availability. When a customer proceeds to checkout, they can choose to apply a coupon. This introduces complexity as we need to ensure that the coupon is available before applying it and update its availability afterward. This may require the following considerations: 17 | 18 | * Introduction of a New Aggregate: To manage coupons effectively, a new aggregate should be introduced to track their availability. It's essential to ensure that coupons are available before they are applied and to update their availability afterward. 19 | 20 | * Workflow Management: Introducing a `Coupon` aggregate necessitates efficient workflow management between the `Cart` and `Coupon` aggregates. Policies must be devised to coordinate actions such as updating coupon availability and applying coupons during checkout seamlessly. 21 | 22 | * Implementation of Compensation Mechanisms: It may be necessary to implement compensation mechanisms or two-phase commit protocols to ensure consistency in case of failures or errors during coupon application. This will help maintain integrity and reliability in coupon management processes. 23 | 24 | * Integration of Recovery Mechanisms: Robust recovery mechanisms should be integrated into the system to handle system crashes or failures effectively. These mechanisms ensure that the system can restore itself to a consistent state upon restart, preventing loss or inconsistency between aggregates. 25 | 26 | ## Disintegrate Approach 27 | 28 | With Disintegrate, this new feature can be integrated much more easily. One way to achieve this in Disintegrate is to add a new state that represents coupon availability. We can call this new state Coupon: 29 | 30 | ```rust 31 | #[derive(Debug, Clone, StateQuery, Default, Deserialize, Serialize)] 32 | #[state_query(CouponEvent)] 33 | struct Coupon { 34 | #[id] 35 | coupon_id: String, 36 | quantity: i32 37 | } 38 | ``` 39 | 40 | Then, in the `ApplyCoupon` decision, we can use this new state query to retrieve the coupon state and check if the coupon is available: 41 | 42 | ```rust 43 | impl StateMutate for Coupon { 44 | fn mutate(&mut self, event: Self::Event) { 45 | match event { 46 | CouponEvent::CouponEmitted { quantity, .. } => self.quantity += quantity, 47 | CouponEvent::CouponApplied { .. } => self.quantity -= 1, 48 | } 49 | } 50 | } 51 | 52 | pub struct ApplyCoupon { 53 | user_id: String, 54 | coupon_id: String, 55 | } 56 | 57 | impl ApplyCoupon { 58 | #[allow(dead_code)] 59 | pub fn new(user_id: String, coupon_id: String) -> Self { 60 | Self { user_id, coupon_id } 61 | } 62 | } 63 | 64 | impl Decision for ApplyCoupon { 65 | type Event = DomainEvent; 66 | type StateQuery = (Cart, Coupon); 67 | type Error = CartError; 68 | 69 | fn state_query(&self) -> Self::StateQuery { 70 | (Cart::new(&self.user_id), Coupon::new(&self.coupon_id)) 71 | } 72 | 73 | fn process(&self, (cart, coupon): &Self::StateQuery) -> Result, Self::Error> { 74 | if cart.applied_coupon.is_some() { 75 | return Err(CartError::CouponAlreadyApplied); 76 | } 77 | if coupon.quantity == 0 { 78 | return Err(CartError::CouponNotAvailable); 79 | } 80 | Ok(vec![DomainEvent::CouponApplied { 81 | coupon_id: self.coupon_id.clone(), 82 | user_id: self.user_id.clone(), 83 | }]) 84 | } 85 | } 86 | ``` 87 | 88 | 89 | -------------------------------------------------------------------------------- /disintegrate-macros/src/lib.rs: -------------------------------------------------------------------------------- 1 | mod event; 2 | mod state_query; 3 | mod symbol; 4 | 5 | extern crate proc_macro; 6 | 7 | use proc_macro::TokenStream; 8 | use proc_macro2::{Ident, TokenStream as TokenStream2}; 9 | 10 | use syn::{parse_macro_input, DeriveInput}; 11 | 12 | /// Derives the `Event` trait for an enum, allowing it to be used as an event in Disintegrate. 13 | /// 14 | /// The `Event` trait is used to mark an enum as an event type in Disintegrate. By deriving this 15 | /// trait, the enum gains the necessary functionality to be used as an event. 16 | /// 17 | /// The `Event` trait can be customized using attributes. The `id` attribute can be used to specify 18 | /// the domain identifier of an event, while the `stream` attribute can be used to stream related 19 | /// events together. 20 | /// 21 | /// # Example 22 | /// 23 | /// ```rust 24 | /// use disintegrate::Event; 25 | /// 26 | /// #[derive(Event)] 27 | /// #[stream(UserEvent, [UserRegistered, UserUpdated])] 28 | /// #[stream(OrderEvent, [OrderCreated, OrderCancelled])] 29 | /// enum DomainEvent{ 30 | /// UserCreated { 31 | /// #[id] 32 | /// user_id: String, 33 | /// name: String, 34 | /// email: String, 35 | /// }, 36 | /// UserUpdated { 37 | /// #[id] 38 | /// user_id: String, 39 | /// email: String, 40 | /// }, 41 | /// OrderCreated { 42 | /// #[id] 43 | /// order_id: String, 44 | /// amount: u32 45 | /// }, 46 | /// OrderCancelled { 47 | /// #[id] 48 | /// order_id: String 49 | /// }, 50 | /// } 51 | /// ``` 52 | /// 53 | /// In this example, the `OrderEvent` enum is marked as an event by deriving the `Event` trait. The 54 | /// `#[stream]` attribute specifies the event stream name and the list of variants to include in the stream, while the `#[id]` attribute is used 55 | /// to specify the domain identifiers of each variant. 56 | #[proc_macro_derive(Event, attributes(stream, id))] 57 | pub fn event(input: TokenStream) -> TokenStream { 58 | let ast = parse_macro_input!(input as DeriveInput); 59 | event::event_inner(&ast) 60 | .unwrap_or_else(syn::Error::into_compile_error) 61 | .into() 62 | } 63 | 64 | /// Derives the `StateQuery` trait for a struct, enabling its use as a state query in Disintegrate. 65 | /// 66 | /// The `state_query` attribute is mandatory and must include the event type associated with the state query. 67 | /// Additionally, the `id` attribute can be utilized to specify the domain identifier of a state query. It is employed 68 | /// in generating a stream query for the state, querying for the event specified in the `state_query` 69 | /// attribute, with the identifiers marked in `or`. 70 | /// 71 | /// It is also possible to rename a state using the `rename` argument in the `state_query` attribute. This feature is beneficial 72 | /// for snapshotting, and the name specified in `rename` is used to identify the snapshot. 73 | /// 74 | /// # Example 75 | /// 76 | /// ```rust 77 | /// # use disintegrate::Event; 78 | /// # #[derive(Event, Clone)] 79 | /// # enum DomainEvent{ 80 | /// # UserCreated { 81 | /// # #[id] 82 | /// # user_id: String, 83 | /// # }, 84 | /// # } 85 | /// 86 | /// use disintegrate::StateQuery; 87 | /// 88 | /// #[derive(StateQuery, Clone)] 89 | /// #[state_query(DomainEvent, rename = "user-query-v1")] // Rename the state for snapshotting 90 | /// struct UserStateQuery { 91 | /// #[id] 92 | /// user_id: String, 93 | /// name: String 94 | /// } 95 | /// ``` 96 | /// 97 | /// In this example, the `UserStateQuery` struct is annotated with the `StateQuery` derive, 98 | /// indicating its role as a state query. The `#[state_query]` attribute specifies the associated event type, 99 | /// and the `#[id]` attribute is used to define the domain identifiers. The `#[state_query]` attribute with `rename` 100 | /// renames the state to 'user-query-v1' for snapshotting purposes. 101 | #[proc_macro_derive(StateQuery, attributes(state_query, id))] 102 | pub fn state_query(input: TokenStream) -> TokenStream { 103 | let ast = parse_macro_input!(input as DeriveInput); 104 | state_query::state_query_inner(&ast) 105 | .unwrap_or_else(syn::Error::into_compile_error) 106 | .into() 107 | } 108 | 109 | fn reserved_identifier_names(identifiers_fields: &[&Ident]) -> Option { 110 | const RESERVED_NAMES: &[&str] = &["event_id", "payload", "event_type", "inserted_at"]; 111 | 112 | identifiers_fields 113 | .iter() 114 | .find(|id| RESERVED_NAMES.contains(&id.to_string().as_str())) 115 | .map(|id| { 116 | syn::Error::new( 117 | id.span(), 118 | "Reserved domain identifier name. Please use a different name", 119 | ) 120 | .to_compile_error() 121 | }) 122 | } 123 | -------------------------------------------------------------------------------- /disintegrate-serde/src/serde/avro.rs: -------------------------------------------------------------------------------- 1 | //! A module for serializing and deserializing data using Avro schema. 2 | use std::marker::PhantomData; 3 | 4 | use super::Error; 5 | use apache_avro::{from_value, Codec, Reader, Schema, Writer}; 6 | use serde::{Deserialize, Serialize}; 7 | 8 | use crate::serde::{Deserializer, Serializer}; 9 | 10 | /// An Avro serialization and deserialization module. 11 | #[derive(Debug, Clone)] 12 | pub struct Avro { 13 | schema: Schema, 14 | input: PhantomData, 15 | output: PhantomData, 16 | } 17 | 18 | impl Avro { 19 | /// Create a new instance of `Avro` with the specified Avro schema. 20 | /// 21 | /// # Arguments 22 | /// 23 | /// * `schema` - A string representing the Avro schema. 24 | /// 25 | /// # Returns 26 | /// 27 | /// A new `Avro` instance 28 | pub fn new(schema: &str) -> Self { 29 | let schema = Schema::parse_str(schema).unwrap(); 30 | Self { 31 | schema, 32 | input: PhantomData, 33 | output: PhantomData, 34 | } 35 | } 36 | } 37 | 38 | impl Serializer for Avro 39 | where 40 | O: From + Serialize, 41 | { 42 | /// Serialize the given value to Avro format and return the serialized bytes. 43 | /// 44 | /// # Arguments 45 | /// 46 | /// * `value` - The value to be serialized. 47 | /// 48 | /// # Returns 49 | /// 50 | /// Serialized bytes representing the value in Avro format. 51 | fn serialize(&self, value: I) -> Vec { 52 | let target = O::from(value); 53 | let mut writer = Writer::with_codec(&self.schema, Vec::new(), Codec::Deflate); 54 | writer 55 | .append_ser(target) 56 | .expect("avro serialization should not fail"); 57 | writer.into_inner().expect("encoded avro should not fail") 58 | } 59 | } 60 | 61 | impl Deserializer for Avro 62 | where 63 | I: TryFrom, 64 | for<'d> O: Deserialize<'d>, 65 | { 66 | /// Deserialize the given Avro serialized bytes to produce a value of type `I`. 67 | /// 68 | /// # Arguments 69 | /// 70 | /// * `data` - The Avro serialized bytes to be deserialized. 71 | /// 72 | /// # Returns 73 | /// 74 | /// A `Result` containing the deserialized value on success, or an error on failure. 75 | fn deserialize(&self, data: Vec) -> Result { 76 | let mut reader = Reader::new(&data[..]).map_err(|e| Error::Deserialization(Box::new(e)))?; 77 | let value = reader 78 | .next() 79 | .expect("at least one value should be present") 80 | .map_err(|e| Error::Deserialization(Box::new(e)))?; 81 | let target: O = from_value(&value).map_err(|e| Error::Deserialization(Box::new(e)))?; 82 | I::try_from(target).map_err(|_| Error::Conversion) 83 | } 84 | } 85 | 86 | #[cfg(test)] 87 | mod tests { 88 | use super::*; 89 | use std::convert::TryFrom; 90 | 91 | #[derive(Debug, PartialEq, Clone)] 92 | struct InputData { 93 | value: u32, 94 | } 95 | 96 | #[derive(Debug, Serialize, Deserialize, PartialEq, Clone)] 97 | struct SerializedData { 98 | value: String, 99 | } 100 | 101 | const TEST_SCHEMA: &str = r#" 102 | { 103 | "type": "record", 104 | "name": "TestRecord", 105 | "fields": [ 106 | { "name": "value", "type": "string" } 107 | ] 108 | } 109 | "#; 110 | 111 | #[derive(Debug, PartialEq)] 112 | enum ConversionError { 113 | InvalidValue, 114 | } 115 | 116 | impl TryFrom for InputData { 117 | type Error = ConversionError; 118 | 119 | fn try_from(data: SerializedData) -> Result { 120 | let input_value = data 121 | .value 122 | .parse::() 123 | .map_err(|_| ConversionError::InvalidValue)?; 124 | Ok(InputData { value: input_value }) 125 | } 126 | } 127 | 128 | impl From for SerializedData { 129 | fn from(data: InputData) -> Self { 130 | SerializedData { 131 | value: data.value.to_string(), 132 | } 133 | } 134 | } 135 | 136 | #[test] 137 | fn it_serializes_and_deserializes_avro_data() { 138 | // Create an instance of the Avro module with the test schema 139 | let avro = Avro::::new(TEST_SCHEMA); 140 | 141 | let input = InputData { value: 42 }; 142 | 143 | // Serialize the input data 144 | let serialized = avro.serialize(input.clone()); 145 | 146 | // Deserialize the serialized data 147 | let deserialized: InputData = avro.deserialize(serialized).unwrap(); 148 | 149 | // Ensure the deserialized data matches the original input 150 | assert_eq!(deserialized, input); 151 | } 152 | } 153 | -------------------------------------------------------------------------------- /examples/cart/src/cart.rs: -------------------------------------------------------------------------------- 1 | use crate::event::{CartEvent, CouponEvent, DomainEvent}; 2 | use disintegrate::StateQuery; 3 | use disintegrate::{Decision, StateMutate}; 4 | use serde::{Deserialize, Serialize}; 5 | use std::collections::HashSet; 6 | use thiserror::Error; 7 | 8 | #[derive(Clone, Eq, Hash, PartialEq, Serialize, Deserialize)] 9 | pub struct Item { 10 | id: String, 11 | quantity: u32, 12 | } 13 | 14 | impl Item { 15 | fn new(id: String, quantity: u32) -> Self { 16 | Item { id, quantity } 17 | } 18 | } 19 | 20 | #[derive(Default, StateQuery, Clone, Serialize, Deserialize)] 21 | #[state_query(CartEvent)] 22 | pub struct Cart { 23 | #[id] 24 | user_id: String, 25 | items: HashSet, 26 | applied_coupon: Option, 27 | } 28 | 29 | impl Cart { 30 | pub fn new(user_id: &str) -> Self { 31 | Self { 32 | user_id: user_id.into(), 33 | ..Default::default() 34 | } 35 | } 36 | } 37 | 38 | impl StateMutate for Cart { 39 | fn mutate(&mut self, event: Self::Event) { 40 | match event { 41 | CartEvent::ItemAdded { 42 | item_id, quantity, .. 43 | } => { 44 | self.items.insert(Item::new(item_id, quantity)); 45 | } 46 | CartEvent::ItemRemoved { item_id, .. } => { 47 | self.items.retain(|item| item.id != *item_id); 48 | } 49 | CartEvent::ItemUpdated { 50 | item_id, 51 | new_quantity, 52 | .. 53 | } => { 54 | self.items.replace(Item::new(item_id, new_quantity)); 55 | } 56 | CartEvent::CouponApplied { coupon_id, .. } => { 57 | self.applied_coupon = Some(coupon_id); 58 | } 59 | } 60 | } 61 | } 62 | 63 | #[derive(Debug, Error)] 64 | pub enum CartError { 65 | // cart errors 66 | #[error("coupon already applied")] 67 | CouponAlreadyApplied, 68 | #[error("coupon not available")] 69 | CouponNotAvailable, 70 | } 71 | 72 | pub struct AddItem { 73 | user_id: String, 74 | item_id: String, 75 | quantity: u32, 76 | } 77 | 78 | impl AddItem { 79 | pub fn new(user_id: String, item_id: String, quantity: u32) -> Self { 80 | Self { 81 | user_id, 82 | item_id, 83 | quantity, 84 | } 85 | } 86 | } 87 | 88 | /// Implement your business logic 89 | impl Decision for AddItem { 90 | type Event = DomainEvent; 91 | type StateQuery = Cart; 92 | type Error = CartError; 93 | 94 | fn state_query(&self) -> Self::StateQuery { 95 | Cart::new(&self.user_id) 96 | } 97 | 98 | fn process(&self, _state: &Self::StateQuery) -> Result, Self::Error> { 99 | // check your business constraints... 100 | Ok(vec![DomainEvent::ItemAdded { 101 | user_id: self.user_id.clone(), 102 | item_id: self.item_id.to_string(), 103 | quantity: self.quantity, 104 | }]) 105 | } 106 | } 107 | 108 | #[derive(Default, StateQuery, Clone, Serialize, Deserialize)] 109 | #[state_query(CouponEvent)] 110 | pub struct Coupon { 111 | #[id] 112 | coupon_id: String, 113 | quantity: u32, 114 | } 115 | 116 | impl Coupon { 117 | pub fn new(coupon_id: &str) -> Self { 118 | Self { 119 | coupon_id: coupon_id.to_string(), 120 | ..Default::default() 121 | } 122 | } 123 | } 124 | 125 | impl StateMutate for Coupon { 126 | fn mutate(&mut self, event: Self::Event) { 127 | match event { 128 | CouponEvent::CouponEmitted { quantity, .. } => self.quantity += quantity, 129 | CouponEvent::CouponApplied { .. } => self.quantity -= 1, 130 | } 131 | } 132 | } 133 | 134 | pub struct ApplyCoupon { 135 | user_id: String, 136 | coupon_id: String, 137 | } 138 | 139 | impl ApplyCoupon { 140 | #[allow(dead_code)] 141 | pub fn new(user_id: String, coupon_id: String) -> Self { 142 | Self { user_id, coupon_id } 143 | } 144 | } 145 | 146 | /// Implement your business logic 147 | impl Decision for ApplyCoupon { 148 | type Event = DomainEvent; 149 | type StateQuery = (Cart, Coupon); 150 | type Error = CartError; 151 | 152 | fn state_query(&self) -> Self::StateQuery { 153 | (Cart::new(&self.user_id), Coupon::new(&self.coupon_id)) 154 | } 155 | 156 | fn process(&self, (cart, coupon): &Self::StateQuery) -> Result, Self::Error> { 157 | // check your business constraints... 158 | if cart.applied_coupon.is_some() { 159 | return Err(CartError::CouponAlreadyApplied); 160 | } 161 | if coupon.quantity == 0 { 162 | return Err(CartError::CouponNotAvailable); 163 | } 164 | Ok(vec![DomainEvent::CouponApplied { 165 | coupon_id: self.coupon_id.clone(), 166 | user_id: self.user_id.clone(), 167 | }]) 168 | } 169 | } 170 | -------------------------------------------------------------------------------- /disintegrate-postgres/src/snapshotter/tests.rs: -------------------------------------------------------------------------------- 1 | use disintegrate::{ 2 | domain_identifiers, ident, query, DomainIdentifierInfo, DomainIdentifierSet, Event, EventId, 3 | EventInfo, EventSchema, IdentifierType, IntoState, IntoStatePart, PersistedEvent, StateMutate, 4 | }; 5 | use disintegrate_serde::{serde::json::Json, Deserializer}; 6 | use serde::Deserialize; 7 | use sqlx::PgPool; 8 | 9 | use super::*; 10 | 11 | #[derive(Clone)] 12 | enum CartEvent { 13 | #[allow(dead_code)] 14 | ItemAdded { cart_id: String, item_id: String }, 15 | } 16 | 17 | impl Event for CartEvent { 18 | const SCHEMA: EventSchema = EventSchema { 19 | events: &["CartEventItemAdded"], 20 | events_info: &[&EventInfo { 21 | name: "CartProductAdded", 22 | domain_identifiers: &[&ident!(#cart_id), &ident!(#product_id)], 23 | }], 24 | domain_identifiers: &[ 25 | &DomainIdentifierInfo { 26 | ident: ident!(#cart_id), 27 | type_info: IdentifierType::String, 28 | }, 29 | &DomainIdentifierInfo { 30 | ident: ident!(#product_id), 31 | type_info: IdentifierType::String, 32 | }, 33 | ], 34 | }; 35 | fn name(&self) -> &'static str { 36 | match self { 37 | CartEvent::ItemAdded { .. } => "CartProductAdded", 38 | } 39 | } 40 | fn domain_identifiers(&self) -> DomainIdentifierSet { 41 | match self { 42 | CartEvent::ItemAdded { 43 | item_id, cart_id, .. 44 | } => domain_identifiers! {item_id: item_id, cart_id: cart_id}, 45 | } 46 | } 47 | } 48 | 49 | #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] 50 | struct CartState { 51 | cart_id: String, 52 | items: Vec, 53 | } 54 | 55 | impl CartState { 56 | fn new(cart_id: &str, items: [&str; N]) -> Self { 57 | Self { 58 | cart_id: cart_id.to_string(), 59 | items: items.iter().map(|s| s.to_string()).collect(), 60 | } 61 | } 62 | } 63 | 64 | impl StateQuery for CartState { 65 | const NAME: &'static str = "cart-state"; 66 | type Event = CartEvent; 67 | 68 | fn query(&self) -> disintegrate::StreamQuery { 69 | query!(CartEvent; cart_id == self.cart_id) 70 | } 71 | } 72 | 73 | impl StateMutate for CartState { 74 | fn mutate(&mut self, event: Self::Event) { 75 | match event { 76 | CartEvent::ItemAdded { item_id, .. } => self.items.push(item_id), 77 | } 78 | } 79 | } 80 | 81 | #[derive(sqlx::FromRow)] 82 | struct SnapshotRow { 83 | id: Uuid, 84 | name: String, 85 | query: String, 86 | version: PgEventId, 87 | payload: String, 88 | } 89 | 90 | #[sqlx::test] 91 | async fn it_stores_snapshots(pool: PgPool) { 92 | let snapshotter = PgSnapshotter::new(pool.clone(), 0).await.unwrap(); 93 | let mut state = CartState::new("c1", []).into_state_part(); 94 | 95 | state.mutate_part(PersistedEvent::new( 96 | 1, 97 | CartEvent::ItemAdded { 98 | cart_id: "c1".to_string(), 99 | item_id: "p1".to_string(), 100 | }, 101 | )); 102 | 103 | snapshotter.store_snapshot(&state.clone()).await.unwrap(); 104 | 105 | let stored_snapshot = sqlx::query_as::<_, SnapshotRow>("SELECT * FROM snapshot") 106 | .fetch_one(&pool) 107 | .await 108 | .unwrap(); 109 | 110 | let query_key = query_key(&state.query()); 111 | let snapshot_id = snapshot_id(CartState::NAME, &query_key); 112 | assert_eq!(stored_snapshot.id, snapshot_id); 113 | assert_eq!(stored_snapshot.name, CartState::NAME); 114 | assert_eq!(stored_snapshot.query, query_key); 115 | assert_eq!( 116 | Json::::default() 117 | .deserialize(stored_snapshot.payload.into_bytes()) 118 | .unwrap(), 119 | state.into_state() 120 | ); 121 | assert_eq!(stored_snapshot.version, 1); 122 | } 123 | 124 | #[sqlx::test] 125 | async fn it_loads_snapshots(pool: PgPool) { 126 | let snapshotter = PgSnapshotter::new(pool.clone(), 2).await.unwrap(); 127 | let default_state = CartState::new("c1", []); 128 | let expected_state = CartState::new("c1", ["p1", "p2"]); 129 | let query_key = query_key(&default_state.query()); 130 | let snapshot_id = snapshot_id(CartState::NAME, &query_key); 131 | sqlx::query("INSERT INTO snapshot (id, name, query, payload, version) VALUES ($1,$2,$3,$4,$5) ON CONFLICT(id) DO UPDATE SET name = $2, query = $3, payload = $4, version = $5 WHERE snapshot.version < $5") 132 | .bind(snapshot_id) 133 | .bind(CartState::NAME) 134 | .bind(query_key) 135 | .bind(serde_json::to_string(&expected_state).unwrap()) 136 | .bind(3) 137 | .execute(&pool) 138 | .await.unwrap(); 139 | 140 | let loaded_state = snapshotter 141 | .load_snapshot(default_state.into_state_part()) 142 | .await; 143 | 144 | assert_eq!(loaded_state.version(), 3); 145 | assert_eq!(loaded_state.into_state(), expected_state); 146 | } 147 | -------------------------------------------------------------------------------- /disintegrate-macros/src/state_query.rs: -------------------------------------------------------------------------------- 1 | use proc_macro2::{Ident, TokenStream}; 2 | use quote::quote; 3 | use syn::parse::{Parse, ParseStream}; 4 | use syn::token::Comma; 5 | use syn::{Data, DeriveInput, Error}; 6 | use syn::{DataStruct, LitStr}; 7 | 8 | use crate::symbol::{ID, RENAME, STATE_QUERY}; 9 | 10 | enum StateQueryOptionalArgs { 11 | Rename(LitStr), 12 | } 13 | 14 | impl Parse for StateQueryOptionalArgs { 15 | fn parse(input: ParseStream) -> syn::Result { 16 | let name = input.parse::()?; 17 | input.parse::()?; 18 | 19 | if name == RENAME { 20 | let value = input.parse::()?; 21 | return Ok(Self::Rename(value)); 22 | } 23 | 24 | Err(Error::new(name.span(), "invalid argument")) 25 | } 26 | } 27 | 28 | struct StateQueryArgs { 29 | event: Ident, 30 | optional_args: Vec, 31 | } 32 | 33 | impl Parse for StateQueryArgs { 34 | fn parse(input: ParseStream) -> syn::Result { 35 | let event = input.parse::()?; 36 | 37 | let comma = input.parse::().ok(); 38 | 39 | let mut optional_args: Vec = vec![]; 40 | if comma.is_some() { 41 | optional_args = input 42 | .parse_terminated(StateQueryOptionalArgs::parse, Comma)? 43 | .into_iter() 44 | .collect(); 45 | } 46 | 47 | Ok(Self { 48 | event, 49 | optional_args, 50 | }) 51 | } 52 | } 53 | 54 | pub fn state_query_inner(ast: &DeriveInput) -> Result { 55 | match ast.data { 56 | Data::Struct(ref data) => impl_struct(ast, data), 57 | _ => panic!("Not supported type"), 58 | } 59 | } 60 | 61 | fn impl_struct(ast: &DeriveInput, data: &DataStruct) -> syn::Result { 62 | let state_query_ident = ast.ident.clone(); 63 | 64 | let state_query_attrs: Vec<_> = ast 65 | .attrs 66 | .iter() 67 | .filter(|attr| attr.path() == STATE_QUERY) 68 | .collect(); 69 | 70 | if state_query_attrs.len() != 1 { 71 | return Err(Error::new( 72 | state_query_ident.span(), 73 | format!("expected a `{STATE_QUERY}` attribute"), 74 | )); 75 | } 76 | 77 | let state_query_attrs = state_query_attrs 78 | .first() 79 | .unwrap() 80 | .parse_args::()?; 81 | let event_type = state_query_attrs.event; 82 | let state_query_name = state_query_attrs 83 | .optional_args 84 | .iter() 85 | .map(|attrs| { 86 | let StateQueryOptionalArgs::Rename(rename) = attrs; 87 | rename.value() 88 | }) 89 | .next_back() 90 | .unwrap_or_else(|| state_query_ident.to_string()); 91 | 92 | let identifiers_fields: Vec<_> = data 93 | .fields 94 | .iter() 95 | .filter(|f| f.attrs.iter().any(|attr| attr.path() == ID)) 96 | .flat_map(|f| f.ident.as_ref()) 97 | .collect(); 98 | 99 | let state_query = impl_state_query(event_type.clone(), &identifiers_fields); 100 | 101 | Ok(quote! { 102 | #[automatically_derived] 103 | impl disintegrate::StateQuery for #state_query_ident { 104 | const NAME: &'static str = #state_query_name; 105 | 106 | type Event = #event_type; 107 | 108 | fn query(&self) -> disintegrate::StreamQuery { 109 | #state_query 110 | } 111 | } 112 | 113 | impl From<#state_query_ident> for disintegrate::StreamQuery 114 | where 115 | ID: disintegrate::EventId, 116 | E: disintegrate::Event + Clone, <#state_query_ident as disintegrate::StateQuery>::Event: Into 117 | { 118 | fn from(state: #state_query_ident) -> Self { 119 | state.query().cast() 120 | } 121 | } 122 | 123 | impl #state_query_ident { 124 | pub fn exclude_events(&self, events: &'static [&'static str]) -> disintegrate::StreamQuery::Event> { 125 | self.query().exclude_events(events) 126 | } 127 | } 128 | 129 | }) 130 | } 131 | 132 | fn impl_state_query(event_type: Ident, identifiers_fields: &[&Ident]) -> TokenStream { 133 | if identifiers_fields.is_empty() { 134 | quote! { 135 | disintegrate::query!(#event_type) 136 | } 137 | } else { 138 | let filters = impl_state_filters(identifiers_fields); 139 | quote! { 140 | disintegrate::query!(#event_type; #filters) 141 | } 142 | } 143 | } 144 | 145 | fn impl_state_filters(identifiers_fields: &[&Ident]) -> Option { 146 | if identifiers_fields.is_empty() { 147 | return None; 148 | } 149 | 150 | if identifiers_fields.len() == 1 { 151 | Some(quote! {#(#identifiers_fields == self.#identifiers_fields)*}) 152 | } else { 153 | let first = identifiers_fields[0]; 154 | let rest = impl_state_filters(&identifiers_fields[1..]); 155 | Some(quote! { 156 | #first == self.#first, #rest 157 | }) 158 | } 159 | } 160 | -------------------------------------------------------------------------------- /disintegrate-macros/tests/event.rs: -------------------------------------------------------------------------------- 1 | use disintegrate::{ident, DomainIdentifierInfo, Event, IdentifierType, IntoIdentifierValue}; 2 | 3 | #[derive(Event, Clone, Debug, PartialEq, Eq)] 4 | struct UserUpdatedData { 5 | #[id] 6 | user_id: String, 7 | email: String, 8 | } 9 | 10 | #[derive(Event, Clone, Debug, PartialEq, Eq)] 11 | struct UserDeleted { 12 | #[id] 13 | user_id: String, 14 | } 15 | 16 | #[allow(clippy::enum_variant_names)] 17 | #[derive(Event, Debug, PartialEq, Eq)] 18 | #[stream(UserEvent, [UserCreated, UserUpdated, UserDeleted])] 19 | #[stream(OrderEvent, [OrderCreated, OrderCancelled])] 20 | enum DomainEvent { 21 | UserCreated { 22 | #[id] 23 | user_id: String, 24 | name: String, 25 | email: String, 26 | }, 27 | UserUpdated(UserUpdatedData), 28 | UserDeleted(Box), 29 | OrderCreated { 30 | #[id] 31 | order_id: String, 32 | amount: u32, 33 | }, 34 | OrderCancelled { 35 | #[id] 36 | order_id: String, 37 | }, 38 | UserChanged, 39 | } 40 | 41 | #[test] 42 | fn it_correctly_sets_event_names() { 43 | assert_eq!( 44 | DomainEvent::SCHEMA.events, 45 | &[ 46 | "UserCreated", 47 | "UserUpdated", 48 | "UserDeleted", 49 | "OrderCreated", 50 | "OrderCancelled", 51 | "UserChanged" 52 | ] 53 | ); 54 | } 55 | 56 | #[test] 57 | fn it_returns_correct_domain_identifiers() { 58 | let user_id = "user123".to_string(); 59 | let enum_struct_variant_event = DomainEvent::UserCreated { 60 | user_id: user_id.clone(), 61 | name: "John Doe".to_string(), 62 | email: "john@example.com".to_string(), 63 | }; 64 | 65 | let domain_identifiers = enum_struct_variant_event.domain_identifiers(); 66 | assert_eq!( 67 | domain_identifiers.get(&ident!(#user_id)), 68 | Some(&user_id.clone().into_identifier_value()) 69 | ); 70 | 71 | let enum_unit_variant_event = DomainEvent::UserUpdated(UserUpdatedData { 72 | user_id: user_id.clone(), 73 | email: "john@example.com".to_string(), 74 | }); 75 | 76 | let domain_identifiers = enum_unit_variant_event.domain_identifiers(); 77 | assert_eq!( 78 | domain_identifiers.get(&ident!(#user_id)), 79 | Some(&user_id.clone().into_identifier_value()) 80 | ); 81 | 82 | let enum_boxed_variant_event = DomainEvent::UserDeleted(Box::new(UserDeleted { 83 | user_id: user_id.clone(), 84 | })); 85 | 86 | let domain_identifiers = enum_boxed_variant_event.domain_identifiers(); 87 | assert_eq!( 88 | domain_identifiers.get(&ident!(#user_id)), 89 | Some(&user_id.into_identifier_value()) 90 | ); 91 | 92 | let enum_unit_variant_event = DomainEvent::UserChanged; 93 | 94 | let domain_identifiers = enum_unit_variant_event.domain_identifiers(); 95 | assert!(domain_identifiers.is_empty()); 96 | } 97 | 98 | #[test] 99 | fn it_generates_event_streams() { 100 | let user_event = UserEvent::UserCreated { 101 | user_id: "user123".to_string(), 102 | name: "John Doe".to_string(), 103 | email: "john@example.com".to_string(), 104 | }; 105 | 106 | let user_event: DomainEvent = user_event.into(); 107 | assert_eq!( 108 | user_event, 109 | DomainEvent::UserCreated { 110 | user_id: "user123".to_string(), 111 | name: "John Doe".to_string(), 112 | email: "john@example.com".to_string(), 113 | } 114 | ); 115 | 116 | let user_boxed_event = UserEvent::UserDeleted(Box::new(UserDeleted { 117 | user_id: "user123".to_string(), 118 | })); 119 | 120 | let user_event: DomainEvent = user_boxed_event.into(); 121 | assert_eq!( 122 | user_event, 123 | DomainEvent::UserDeleted(Box::new(UserDeleted { 124 | user_id: "user123".to_string(), 125 | })) 126 | ); 127 | let order_event = OrderEvent::OrderCreated { 128 | order_id: "order456".to_string(), 129 | amount: 100, 130 | }; 131 | 132 | let order_event: DomainEvent = order_event.into(); 133 | assert_eq!( 134 | order_event, 135 | DomainEvent::OrderCreated { 136 | order_id: "order456".to_string(), 137 | amount: 100, 138 | } 139 | ); 140 | 141 | assert_eq!( 142 | UserEvent::SCHEMA.events, 143 | &["UserCreated", "UserUpdated", "UserDeleted"] 144 | ); 145 | 146 | assert_eq!( 147 | OrderEvent::SCHEMA.events, 148 | &["OrderCreated", "OrderCancelled"] 149 | ); 150 | } 151 | 152 | #[test] 153 | fn it_generates_domain_identifiers_schema_set() { 154 | assert_eq!( 155 | OrderEvent::SCHEMA.domain_identifiers, 156 | &[&DomainIdentifierInfo { 157 | ident: ident!(#order_id), 158 | type_info: IdentifierType::String 159 | }] 160 | ); 161 | 162 | assert_eq!( 163 | UserEvent::SCHEMA.domain_identifiers, 164 | &[&DomainIdentifierInfo { 165 | ident: ident!(#user_id), 166 | type_info: IdentifierType::String 167 | }] 168 | ); 169 | 170 | assert_eq!( 171 | DomainEvent::SCHEMA.domain_identifiers, 172 | &[ 173 | &DomainIdentifierInfo { 174 | ident: ident!(#order_id), 175 | type_info: IdentifierType::String 176 | }, 177 | &DomainIdentifierInfo { 178 | ident: ident!(#user_id), 179 | type_info: IdentifierType::String 180 | } 181 | ] 182 | ); 183 | } 184 | -------------------------------------------------------------------------------- /examples/courses/src/grpc.rs: -------------------------------------------------------------------------------- 1 | use async_trait::async_trait; 2 | 3 | use crate::{ 4 | application::Application, 5 | domain::{self}, 6 | proto, 7 | }; 8 | 9 | #[derive(Clone)] 10 | pub struct CourseApi { 11 | app: Application, 12 | } 13 | 14 | impl CourseApi { 15 | pub fn new(app: Application) -> Self { 16 | Self { app } 17 | } 18 | } 19 | 20 | #[async_trait] 21 | impl proto::course_server::Course for CourseApi { 22 | async fn create( 23 | &self, 24 | request: tonic::Request, 25 | ) -> Result, tonic::Status> { 26 | let request = request.into_inner(); 27 | 28 | self.app 29 | .create_course(domain::CreateCourse { 30 | course_id: request.course_id, 31 | name: request.name, 32 | seats: request.seats, 33 | }) 34 | .await 35 | .map(|_| tonic::Response::new(proto::CreateCourseResponse {})) 36 | .map_err(|e| tonic::Status::internal(e.to_string())) 37 | } 38 | 39 | async fn close( 40 | &self, 41 | request: tonic::Request, 42 | ) -> Result, tonic::Status> { 43 | let request = request.into_inner(); 44 | self.app 45 | .close_course(domain::CloseCourse { 46 | course_id: request.course_id, 47 | }) 48 | .await 49 | .map(|_| tonic::Response::new(proto::CloseCourseResponse {})) 50 | .map_err(|e| tonic::Status::internal(e.to_string())) 51 | } 52 | 53 | async fn rename( 54 | &self, 55 | request: tonic::Request, 56 | ) -> Result, tonic::Status> { 57 | let request = request.into_inner(); 58 | 59 | self.app 60 | .rename_course(domain::RenameCourse { 61 | course_id: request.course_id, 62 | name: request.name, 63 | }) 64 | .await 65 | .map(|_| tonic::Response::new(proto::RenameCourseResponse {})) 66 | .map_err(|e| tonic::Status::internal(e.to_string())) 67 | } 68 | 69 | async fn get( 70 | &self, 71 | request: tonic::Request, 72 | ) -> Result, tonic::Status> { 73 | let request = request.into_inner(); 74 | self.app 75 | .course_by_id(request.course_id) 76 | .await 77 | .map_err(|e| tonic::Status::internal(e.to_string()))? 78 | .map(|c| { 79 | tonic::Response::new(proto::GetCourseResponse { 80 | course_id: c.course_id, 81 | name: c.name, 82 | available_seats: c.available_seats, 83 | closed: c.closed, 84 | }) 85 | }) 86 | .ok_or(tonic::Status::not_found("course not found")) 87 | } 88 | } 89 | 90 | #[derive(Clone)] 91 | pub struct StudentApi { 92 | app: Application, 93 | } 94 | 95 | impl StudentApi { 96 | pub fn new(app: Application) -> Self { 97 | Self { app } 98 | } 99 | } 100 | 101 | #[async_trait] 102 | impl proto::student_server::Student for StudentApi { 103 | async fn register( 104 | &self, 105 | request: tonic::Request, 106 | ) -> Result, tonic::Status> { 107 | let request = request.into_inner(); 108 | self.app 109 | .register_student(domain::RegisterStudent { 110 | student_id: request.student_id, 111 | name: request.name, 112 | }) 113 | .await 114 | .map(|_| tonic::Response::new(proto::RegisterStudentResponse {})) 115 | .map_err(|e| tonic::Status::internal(e.to_string())) 116 | } 117 | } 118 | 119 | #[derive(Clone)] 120 | pub struct SubscriptionApi { 121 | app: Application, 122 | } 123 | 124 | impl SubscriptionApi { 125 | pub fn new(app: Application) -> Self { 126 | Self { app } 127 | } 128 | } 129 | 130 | #[async_trait] 131 | impl proto::subscription_server::Subscription for SubscriptionApi { 132 | async fn subscribe( 133 | &self, 134 | request: tonic::Request, 135 | ) -> Result, tonic::Status> { 136 | let request = request.into_inner(); 137 | self.app 138 | .subscribe_student(domain::SubscribeStudent { 139 | course_id: request.course_id, 140 | student_id: request.student_id, 141 | }) 142 | .await 143 | .map(|_| tonic::Response::new(proto::SubscribeStudentResponse {})) 144 | .map_err(|e| tonic::Status::internal(e.to_string())) 145 | } 146 | async fn unsubscribe( 147 | &self, 148 | request: tonic::Request, 149 | ) -> Result, tonic::Status> { 150 | let request = request.into_inner(); 151 | self.app 152 | .unsubscribe_student(domain::UnsubscribeStudent { 153 | course_id: request.course_id, 154 | student_id: request.student_id, 155 | }) 156 | .await 157 | .map(|_| tonic::Response::new(proto::UnsubscribeStudentResponse {})) 158 | .map_err(|e| tonic::Status::internal(e.to_string())) 159 | } 160 | } 161 | -------------------------------------------------------------------------------- /docs/docs/faq.md: -------------------------------------------------------------------------------- 1 | --- 2 | sidebar_position: 7 3 | --- 4 | 5 | # FAQ 6 | 7 | **Q: What is Disintegrate and how does it relate to CQRS and Event Sourcing?** 8 | 9 | A: Disintegrate is a framework that simplifies the implementation of CQRS and Event Sourcing patterns. 10 | Disintegrate focuses on handling commands `Decision` directly, instead of working through aggregates. 11 | This makes it easier to identify and add new commands without changing existing aggregate structures. 12 | 13 | ```mermaid 14 | flowchart TD 15 | 16 | subgraph Traditional CQRS 17 | A1[Command] --> A2[Aggregate] 18 | A2 --> A3[Emit Events] 19 | A3 --> A4[(Event Store)] 20 | end 21 | 22 | subgraph Disintegrate Approach 23 | D1[Command] --> D2["⚙️ Decision"] 24 | D2 --> D3[Emit Events] 25 | D3 --> D4[(Event Store)] 26 | end 27 | 28 | ``` 29 | 30 | **Q: Why use Disintegrate instead of traditional approaches?** 31 | 32 | A: Traditional CQRS approaches often involve tightly coupled components, making modifications difficult. 33 | The challenge of identifying the right aggregates and changing them adds complexity. 34 | **Disintegrate** simplifies integration and enhances flexibility for evolving system requirements. 35 | Additionally, it offers a way to query the event store, enabling dynamic splitting or joining of multiple streams to create customized states for decision-making; 36 | It simplifies the process compared to modifying aggregates in traditional systems. 37 | 38 | **Q: What problem does Disintegrate solve?** 39 | 40 | Disintegrate addresses the difficulty of identifying the right aggregates at the start of a project and the common need to access information from multiple aggregates as new features emerge. 41 | It enables dynamic querying of event stores to split or join multiple streams, allowing flexible state creation for decision-making without imposing strict stream boundaries 42 | 43 | 44 | **Q: How does Disintegrate relate to event sourcing?** 45 | 46 | Disintegrate operates on event streams, allowing applications to read and combine multiple streams dynamically. 47 | This approach supports evolving requirements by enabling stream splitting or joining either at the storage level or dynamically at the application level, enhancing the flexibility of event-sourced systems. 48 | 49 | 50 | **Q: What is the "Cheating" chapter mentioned in the context of Disintegrate?** 51 | 52 | The "Cheating" chapter in Gregory Young's book discusses techniques to overcome the limitations of fixed stream boundaries by either duplicating events to create new streams (splitting) or combining streams (joining). 53 | Disintegrate implements this concept by allowing dynamic querying and combination of event streams to build domain states. 54 | 55 | **Q: How do I start a new project with Disintegrate?** 56 | 57 | - Begin with an Event Storming session. This helps you understand the business, find important events, commands, and groups of related data (called aggregates). 58 | - Next, organize and improve your model by identifying aggregates, sub-domains, and bounded contexts (different parts of the system). 59 | - With Disintegrate, focus on the decisions your system needs to make, because decisions trigger important changes (events) in the system. 60 | 61 | **Q: How does Disintegrate handle concurrency conflicts, especially with high usage of new features like coupons?** 62 | 63 | A: Disintegrate may experience concurrency conflicts when multiple users try to access and modify the same resources (e.g., coupons) simultaneously. 64 | 65 | Approaches to address this include: 66 | - Introducing a lock mechanism for exclusive access, allowing only one client to access. 67 | - Employing a queue to execute commands sequentially. Multiple queues could be designed to process each command with same ID or group. 68 | - Allowing overbooking (if permissible by the business model) by excluding specific events during conflict checking, using `ValidationQuery`. 69 | 70 | **Q: How do I add a new feature using Disintegrate?** 71 | 72 | A: To add a new feature, define a new state (if needed) and a new Decision that represents the command. 73 | Use state queries to retrieve relevant state and implement the business logic in the `process` method of the `Decision` . 74 | For example, when adding a coupon feature, define a `Coupon` state `ApplyCoupon` decision 75 | 76 | **Q: Can you provide an example of defining a new state in Disintegrate?** 77 | 78 | A: Yes, here's an example of defining a state: `Coupon` 79 | ```rust 80 | #[derive(Debug, Clone, StateQuery, Default, Deserialize, Serialize)] 81 | #[state_query(CouponEvent)] 82 | struct Coupon { 83 | #[id] 84 | coupon_id: String, 85 | quantity: i32 86 | } 87 | ``` 88 | 89 | **Q: How do I handle coupon availability when applying a coupon using Disintegrate?** 90 | 91 | A: In the `ApplyCoupon` decision, use state queries to retrieve the `Coupon` state and check if the coupon is available (quantity > 0) before applying it. 92 | 93 | **Q: How do I allow overbooking of coupons in Disintegrate?** 94 | 95 | A: To allow overbooking, use the method `validation_query` in the `ApplyCoupon` decision to exclude `CouponApplied` events from the conflict validation. 96 | This can be achieved using the `exclude_events` method. 97 | 98 | Here's the example: 99 | 100 | ```rust 101 | impl Decision for ApplyCoupon { 102 | type Event = DomainEvent; 103 | type StateQuery = (Cart, Coupon); 104 | type Error = CartError; 105 | 106 | fn validation_query(&self) -> Option> { 107 | let (cart, coupon) = self.state_query(); 108 | // the validation query is the union of the two state queries used by the decision 109 | Some(union!( 110 | //the original cart state query will be used to validate the decision against user's cart changes. 111 | &cart, 112 | // exclude the `AppliedCoupon` event from the coupon state query to allow some overbooking. 113 | coupon.exclude_events(event_types!(DomainEvent, [CouponApplied])) 114 | )) 115 | } 116 | } 117 | 118 | ``` 119 | 120 | 121 | -------------------------------------------------------------------------------- /disintegrate-postgres/src/snapshotter.rs: -------------------------------------------------------------------------------- 1 | //! # PostgreSQL Snapshotter 2 | //! 3 | //! This module provides an implementation of the `Snapshotter` trait using PostgreSQL as the underlying storage. 4 | //! It allows storing and retrieving snapshots from a PostgreSQL database. 5 | use async_trait::async_trait; 6 | use disintegrate::{BoxDynError, Event, IntoState, StateSnapshotter, StreamQuery}; 7 | use disintegrate::{StatePart, StateQuery}; 8 | use md5::{Digest, Md5}; 9 | use serde::de::DeserializeOwned; 10 | use serde::Serialize; 11 | use sqlx::PgPool; 12 | use sqlx::Row; 13 | use uuid::Uuid; 14 | 15 | use crate::{Error, PgEventId}; 16 | 17 | #[cfg(test)] 18 | mod tests; 19 | 20 | /// PostgreSQL implementation for the `Snapshotter` trait. 21 | /// 22 | /// The `PgSnapshotter` struct implements the `Snapshotter` trait for PostgreSQL databases. 23 | /// It allows for stroring and retrieving snapshots of `StateQuery` from PostgreSQL database. 24 | #[derive(Clone)] 25 | pub struct PgSnapshotter { 26 | pool: PgPool, 27 | every: u64, 28 | } 29 | 30 | impl PgSnapshotter { 31 | /// Creates and initializes a new instance of `PgSnapshotter` with the specified PostgreSQL connection pool and snapshot frequency. 32 | /// 33 | /// # Arguments 34 | /// 35 | /// - `pool`: A PostgreSQL connection pool (`PgPool`) representing the database connection. 36 | /// - `every`: The frequency of snapshot creation, specified as the number of events between consecutive snapshots. 37 | /// 38 | /// # Returns 39 | /// 40 | /// A new `PgSnapshotter` instance. 41 | pub async fn new(pool: PgPool, every: u64) -> Result { 42 | setup(&pool).await?; 43 | Ok(Self::new_uninitialized(pool, every)) 44 | } 45 | 46 | /// Creates a new instance of `PgSnapshotter` with the specified PostgreSQL connection pool and snapshot frequency. 47 | /// 48 | /// This constructor does not initialize the database. If you need to initialize the database, 49 | /// use `PgSnapshotter::new` instead. 50 | /// 51 | /// If you use this constructor, ensure that the database is already initialized. 52 | /// Refer to the SQL files in the `snapshotter/sql` folder for the necessary schema. 53 | /// 54 | /// # Arguments 55 | /// 56 | /// - `pool`: A PostgreSQL connection pool (`PgPool`) representing the database connection. 57 | /// - `every`: The frequency of snapshot creation, defined as the number of events between consecutive snapshots. 58 | /// 59 | /// # Returns 60 | /// 61 | /// A new `PgSnapshotter` instance. 62 | pub fn new_uninitialized(pool: PgPool, every: u64) -> Self { 63 | Self { pool, every } 64 | } 65 | } 66 | 67 | #[async_trait] 68 | impl StateSnapshotter for PgSnapshotter { 69 | async fn load_snapshot(&self, default: StatePart) -> StatePart 70 | where 71 | S: Send + Sync + DeserializeOwned + StateQuery + 'static, 72 | { 73 | let query = query_key(&default.query()); 74 | let stored_snapshot = 75 | sqlx::query("SELECT name, query, payload, version FROM snapshot where id = $1") 76 | .bind(snapshot_id(S::NAME, &query)) 77 | .fetch_one(&self.pool) 78 | .await; 79 | if let Ok(row) = stored_snapshot { 80 | let snapshot_name: String = row.get(0); 81 | let snapshot_query: String = row.get(1); 82 | if S::NAME == snapshot_name && query == snapshot_query { 83 | let payload = serde_json::from_str(row.get(2)).unwrap_or(default.into_state()); 84 | return StatePart::new(row.get(3), payload); 85 | } 86 | } 87 | 88 | default 89 | } 90 | 91 | async fn store_snapshot(&self, state: &StatePart) -> Result<(), BoxDynError> 92 | where 93 | S: Send + Sync + Serialize + StateQuery + 'static, 94 | { 95 | if state.applied_events() <= self.every { 96 | return Ok(()); 97 | } 98 | let query = query_key(&state.query()); 99 | let id = snapshot_id(S::NAME, &query); 100 | let version = state.version(); 101 | let payload = serde_json::to_string(&state.clone().into_state())?; 102 | sqlx::query("INSERT INTO snapshot (id, name, query, payload, version) VALUES ($1,$2,$3,$4,$5) ON CONFLICT(id) DO UPDATE SET name = $2, query = $3, payload = $4, version = $5 WHERE snapshot.version < $5") 103 | .bind(id) 104 | .bind(S::NAME) 105 | .bind(query) 106 | .bind(payload) 107 | .bind(version) 108 | .execute(&self.pool) 109 | .await?; 110 | 111 | Ok(()) 112 | } 113 | } 114 | 115 | fn snapshot_id(state_name: &str, query: &str) -> Uuid { 116 | let mut hasher = Md5::new(); 117 | hasher.update(state_name); 118 | 119 | uuid::Uuid::new_v3( 120 | &uuid::Uuid::from_bytes(hasher.finalize().into()), 121 | query.as_bytes(), 122 | ) 123 | } 124 | 125 | fn query_key(query: &StreamQuery) -> String { 126 | let mut result = String::new(); 127 | for f in query.filters() { 128 | let excluded_events = if let Some(exclued_events) = f.excluded_events() { 129 | format!("-{}", exclued_events.join(",")) 130 | } else { 131 | "".to_string() 132 | }; 133 | result += &format!( 134 | "({}|{}{}|{})", 135 | f.origin(), 136 | f.events().join(","), 137 | excluded_events, 138 | f.identifiers() 139 | .iter() 140 | .map(|(k, v)| format!("{k}={v}")) 141 | .collect::>() 142 | .join(",") 143 | ); 144 | } 145 | result 146 | } 147 | 148 | pub async fn setup(pool: &PgPool) -> Result<(), Error> { 149 | sqlx::query(include_str!("snapshotter/sql/table_snapshot.sql")) 150 | .execute(pool) 151 | .await?; 152 | Ok(()) 153 | } 154 | -------------------------------------------------------------------------------- /disintegrate-macros/src/event/stream.rs: -------------------------------------------------------------------------------- 1 | use heck::ToSnakeCase; 2 | use proc_macro2::TokenStream; 3 | use quote::{format_ident, quote}; 4 | use syn::{ 5 | bracketed, 6 | parse::{Parse, ParseStream}, 7 | punctuated::Punctuated, 8 | token::Comma, 9 | Data, DeriveInput, Error, Field, Ident, Result, Token, Type, Variant, 10 | }; 11 | 12 | #[derive(Debug)] 13 | pub struct QueryArgs { 14 | name: Ident, 15 | variants: Vec, 16 | } 17 | 18 | impl Parse for QueryArgs { 19 | fn parse(input: ParseStream) -> syn::Result { 20 | let name = input.parse::()?; 21 | 22 | input.parse::()?; 23 | 24 | let content; 25 | bracketed!(content in input); 26 | let variants: Punctuated = 27 | content.parse_terminated(Ident::parse, Token![,])?; 28 | 29 | Ok(Self { 30 | name, 31 | variants: variants.into_iter().collect(), 32 | }) 33 | } 34 | } 35 | 36 | pub fn streams(ast: &DeriveInput) -> Result> { 37 | ast.attrs 38 | .iter() 39 | .filter(|attr| attr.path().is_ident("stream")) 40 | .map(|g| { 41 | let args: QueryArgs = g.parse_args().unwrap(); 42 | let stream_ident = args.name; 43 | let selected_variants: Vec<_> = args.variants; 44 | 45 | let event_data = match ast.data { 46 | Data::Enum(ref enum_data) => Ok(enum_data), 47 | _ => Err(Error::new( 48 | stream_ident.span(), 49 | "Can only derive from an enum", 50 | )), 51 | }?; 52 | 53 | let mut stream_data = event_data.clone(); 54 | stream_data.variants = event_data 55 | .variants 56 | .iter() 57 | .filter(|variant| selected_variants.contains(&variant.ident)) 58 | .cloned() 59 | .collect(); 60 | 61 | let mut stream = ast.clone(); 62 | stream.ident = stream_ident; 63 | stream.data = Data::Enum(stream_data); 64 | stream.attrs = vec![]; 65 | 66 | Ok(stream) 67 | }) 68 | .collect() 69 | } 70 | 71 | pub fn impl_stream(parent: &DeriveInput, stream: &DeriveInput) -> Result { 72 | let mut stream = stream.clone(); 73 | let stream_ident = &stream.ident; 74 | let parent_ident = &parent.ident; 75 | 76 | let error = format_ident!("{stream_ident}ConvertError"); 77 | 78 | let stream_data = match stream.data { 79 | Data::Enum(ref mut enum_data) => Ok(enum_data), 80 | _ => Err(Error::new( 81 | stream_ident.span(), 82 | "Can only derive from an enum", 83 | )), 84 | }?; 85 | 86 | stream_data 87 | .variants 88 | .iter_mut() 89 | .for_each(|variant| match &mut variant.fields { 90 | syn::Fields::Named(fields) => { 91 | fields.named.iter_mut().for_each(|f| f.attrs = vec![]); 92 | } 93 | syn::Fields::Unnamed(_) => (), 94 | syn::Fields::Unit => (), 95 | }); 96 | 97 | let pats: Vec = stream_data 98 | .variants 99 | .iter() 100 | .map(variant_to_unary_pat) 101 | .collect(); 102 | 103 | let from_stream_arms = pats 104 | .iter() 105 | .map(|pat| quote!(#stream_ident::#pat => #parent_ident::#pat)); 106 | 107 | let try_from_event_arms = pats 108 | .iter() 109 | .map(|pat| quote!(#parent_ident::#pat => std::result::Result::Ok(#stream_ident::#pat))); 110 | 111 | let vis = &stream.vis; 112 | let (_stream_impl, stream_ty, _stream_where) = stream.generics.split_for_impl(); 113 | 114 | let (event_impl, event_ty, event_where) = parent.generics.split_for_impl(); 115 | 116 | Ok(quote! { 117 | #[allow(clippy::enum_variant_names)] 118 | #[derive(Clone, Debug, PartialEq, Eq)] 119 | #stream 120 | 121 | #[derive(Copy, Clone, Debug)] 122 | #vis struct #error; 123 | 124 | impl std::fmt::Display for #error { 125 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 126 | std::fmt::Debug::fmt(self, f) 127 | } 128 | } 129 | 130 | impl std::error::Error for #error {} 131 | 132 | #[automatically_derived] 133 | impl #event_impl std::convert::From<#stream_ident #stream_ty> for #parent_ident #event_ty #event_where { 134 | fn from(child: #stream_ident #stream_ty) -> Self { 135 | match child { 136 | #(#from_stream_arms),* 137 | } 138 | } 139 | } 140 | 141 | #[automatically_derived] 142 | impl #event_impl std::convert::TryFrom<#parent_ident #event_ty> for #stream_ident #stream_ty #event_where { 143 | type Error = #error; 144 | 145 | fn try_from(parent: #parent_ident #event_ty) -> std::result::Result { 146 | match parent { 147 | #(#try_from_event_arms),*, 148 | _ => std::result::Result::Err(#error) 149 | } 150 | } 151 | } 152 | }) 153 | } 154 | 155 | fn variant_to_unary_pat(variant: &Variant) -> TokenStream { 156 | let ident = &variant.ident; 157 | 158 | match &variant.fields { 159 | syn::Fields::Named(named) => { 160 | let vars: Punctuated = named.named.iter().map(snake_case).collect(); 161 | quote!(#ident{#vars}) 162 | } 163 | syn::Fields::Unnamed(unnamed) => { 164 | let vars: Punctuated = unnamed 165 | .unnamed 166 | .iter() 167 | .enumerate() 168 | .map(|(idx, _)| format_ident!("var{idx}")) 169 | .collect(); 170 | quote!(#ident(#vars)) 171 | } 172 | syn::Fields::Unit => quote!(#ident), 173 | } 174 | } 175 | 176 | fn snake_case(field: &Field) -> Ident { 177 | let ident = field.ident.as_ref().unwrap_or_else(|| { 178 | // No ident; the Type must be Path. Use that. 179 | match &field.ty { 180 | Type::Path(path) => path.path.get_ident().unwrap(), 181 | _ => unimplemented!(), 182 | } 183 | }); 184 | Ident::new(&ident.to_string().to_snake_case(), ident.span()) 185 | } 186 | -------------------------------------------------------------------------------- /docs/docs/postgres.md: -------------------------------------------------------------------------------- 1 | --- 2 | sidebar_position: 6 3 | --- 4 | 5 | # PostgreSQL Event Store 6 | 7 | Currently, Disintegrate exclusively supports PostgreSQL database implementation. This section offers insights into how Disintegrate interacts with PostgreSQL, focusing on managing application changes and handling data migrations. 8 | 9 | ## Postgres Database Schema 10 | 11 | Disintegrate automatically generates the following tables when a PostgreSQL event source is created: 12 | 13 | * **Event:** Stores all events within the event stream. 14 | * `event_id`: Global identifier of the event. 15 | * `event_type`: Type of the event. 16 | * `payload`: Contains the event's payload. 17 | * `inserted_at`: Timestamp indicating when the event was written (in UTC time). 18 | * "Domain identifier" columns: Automatically created by the library when a field in the `Event` is marked as `#[id]`, used for indexing and query optimization. 19 | 20 | * **Event Sequence:** This technical table is crucial for implementing optimistic locking and managing conflicts. 21 | * `event_id`: Generates globally unique identifiers for events sequentially. 22 | * `event_type`: Specifies the type of the appended event. 23 | * `consumed`: Boolean column used by the optimistic locking logic. 24 | * `committed`: Indicates if the event has been written into the event stream. 25 | * `inserted_at`: Timestamp indicating when the event was written (in UTC time). 26 | * "Domain identifier" columns: Automatically created by the library when a field in the `Event` is marked as `#[id]`, used for indexing and query optimization. 27 | 28 | * **Event Listener:** Maintains records of the last event processed for each listener: 29 | * `id`: Identifier of the event listener. 30 | * `last_processed_id`: ID of the last event processed by the event listener. 31 | * `updated_at`: Timestamp indicating the last time the table was updated. 32 | 33 | * **Snapshot:** Stores stream query payloads to speed up loading: 34 | * `id`: Identifier of the stream query. 35 | * `name`: Name of the stream query. 36 | * `query`: String representation of the query. 37 | * `version`: Last event ID processed by the stream query. 38 | * `payload`: Payload of the stream query. 39 | * `inserted_at`: Timestamp indicating the last time the row was inserted. 40 | 41 | ## Append Events 42 | 43 | The append API of the event stream requires three arguments: 44 | 45 | * List of new events to be appended. 46 | * Stream query to check if new events have been appended to the event store that would make stale the state used to make the decision. 47 | * `last_event_id` retrieved during the query. 48 | 49 | The append process and optimistic lock unfold as follows: 50 | 51 | The library adds a row to the event_sequence table for each new event, serializing all writes to reserve a spot for the new events in the stream. It then attempts to update the consumed field to "1" for all events matching the query from the last_event_id to the last inserted event_id. This operation marks all pending events of other concurrent appends as invalidated. If this update fails due to either: 52 | 53 | * Another concurrent process invalidating one or more of the new events 54 | * A new event being written that matches the query 55 | 56 | a concurrency error is raised, indicating that the state used by the Decision is stale. If the update succeeds, it means events invalidating this decision did not occur, and the new events can be written to the events table. 57 | 58 | ## Query Events 59 | 60 | The query API requires a `StreamQuery` to fetch data from the `event` table, enabling the search and filtering of events based on specified criteria. Domain identifiers are stored in a dedicated column, and indexed to optimize query operations. The library autonomously adds domain identifier columns when an `Event` field is tagged with the `#[id]` attribute. To properly manage the addition and removal of domain identifiers, consult the data migration section. 61 | 62 | ## Data Migration 63 | 64 | Manual data migration is may be needed when the following changes are made to the event structure: 65 | 66 | 1. **Adding a New Domain Identifier**: If you want to search old event with this new id, a data migration is required to populate the new id column in the existing events. 67 | 2. **Declaring an Existing Field as a Domain Identifier**: Migration is necessary to populate this identifier for old events. Even if the event payload contains the domain identifier value, Disintegrate does not automatically populate the domain identifier column for the already persisted events. 68 | To address this, we provide an `EventListener` called `PgIdIndexer`. This helper processes old events and populates the missing domain identifier column in the event store, ensuring they are indexed correctly. 69 | 70 | ```rust 71 | let id_indexer = PgIdIndexer::::new("index_existing_id", pool); 72 | PgEventListener::builder(event_store) 73 | .register_listener( 74 | id_indexer, 75 | PgEventListenerConfig::poller(Duration::from_secs(10)).with_notifier() 76 | ) 77 | .start_with_shutdown(shutdown()) 78 | .await?; 79 | ``` 80 | 81 | 3. **Deleting an Existing Domain Identifier**: Disintegrate does not automatically remove the domain identifier column. This deliberate design choice is made to support a blue-green rollout strategy. 82 | 83 | :::warning 84 | For cases 2 and 3, automation may be provided by the library in the future. Currently, users of the library need to manually make these changes in the database using SQL scripts. 85 | ::: 86 | 87 | ## Snapshots 88 | 89 | If snapshotting is enabled, the library saves snapshots of stream queries in the `snapshot` table. Snapshots can be configured to store the result of a query at specified intervals, with the frequency determined by the number of events retrieved from the event store. 90 | 91 | ```rust 92 | let decision_maker = 93 | disintegrate_postgres::decision_maker_with_snapshot(event_store.clone(), 10).await?; 94 | ``` 95 | 96 | The library can automatically discard a snapshot under certain conditions: 97 | - Changes are made to the queries used to build it. 98 | - The library cannot deserialize the snapshot due to changes in the query state shape. 99 | - Addition of new fields to the state query. 100 | - Changes in the data type of existing fields. 101 | 102 | :::warning 103 | There may be situations where the output stays the same even though the computation underneath has changed. For example, a field of type `i32` may still exist but its calculation method has been altered. In such cases, you'll need to manually delete the snapshot. 104 | ::: 105 | -------------------------------------------------------------------------------- /disintegrate-postgres/src/listener/id_indexer.rs: -------------------------------------------------------------------------------- 1 | //! An `EventListener` implementation for indexing existing fields tagged with `#[id]`. 2 | use std::{collections::BTreeMap, marker::PhantomData}; 3 | 4 | use async_trait::async_trait; 5 | use disintegrate::{DomainIdentifierSet, Event, EventListener, PersistedEvent, StreamQuery}; 6 | use sqlx::{PgPool, Postgres}; 7 | 8 | use crate::PgEventId; 9 | 10 | /// The `PgIdIndexer` is a helper to index existing fields that have been newly tagged with the `#[id]` attribute in events. 11 | /// 12 | /// # Overview 13 | /// 14 | /// The `PgIdIndexer` is an `EventListener` responsible for indexing fields in the event store 15 | /// that are tagged with the `#[id]` attribute. This allows querying of old events based 16 | /// on this new domain identifier once the indexing is complete. The `PgIdIndexer` listens to events and updates the 17 | /// `event` table in the database with the appropriate values. 18 | /// 19 | /// # Workflow 20 | /// 21 | /// After you have tagged an existing field in your event structure with the `#[id]` attribute to mark it 22 | /// as a domain identifier: 23 | /// 24 | /// ```rust 25 | /// use disintegrate_macros::Event; 26 | /// use serde::{Serialize, Deserialize}; 27 | /// #[derive(Event, Clone, Serialize, Deserialize)] 28 | /// struct MyEvent { 29 | /// #[id] 30 | /// existing_id: String, 31 | /// other_field: String, 32 | /// } 33 | /// ``` 34 | /// 35 | /// 1. **Register the `PgIdIndexer` as an `EventListener`**: Integrate the indexer 36 | /// with the event listener system to process the newly tagged domain identifier: 37 | /// 38 | /// ```rust 39 | /// use disintegrate_postgres::PgIdIndexer; 40 | /// use disintegrate_postgres::PgEventListenerConfig; 41 | /// use disintegrate_postgres::PgEventListener; 42 | /// use disintegrate_postgres::PgEventStore; 43 | /// use std::time::Duration; 44 | /// use disintegrate_macros::Event; 45 | /// use disintegrate::serde::json::Json; 46 | /// use serde::{Serialize, Deserialize}; 47 | /// use sqlx::PgPool; 48 | /// 49 | /// #[derive(Event, Clone, Serialize, Deserialize)] 50 | /// struct MyEvent { 51 | /// #[id] 52 | /// existing_id: String, 53 | /// other_field: String, 54 | /// } 55 | /// 56 | /// async fn setup_listener(pool: PgPool, event_store: PgEventStore>) { 57 | /// let id_indexer = PgIdIndexer::::new("index_exsting_id", pool); 58 | /// PgEventListener::builder(event_store) 59 | /// .register_listener( 60 | /// id_indexer, 61 | /// PgEventListenerConfig::poller(Duration::from_secs(5)).with_notifier() 62 | /// ) 63 | /// .start_with_shutdown(shutdown()) 64 | /// .await 65 | /// .expect("start event listener failed"); 66 | /// } 67 | /// 68 | /// async fn shutdown() { 69 | /// tokio::signal::ctrl_c().await.expect("ctrl_c signal failed"); 70 | /// } 71 | /// ``` 72 | /// 73 | /// 2. **Deploy the application**: Start the application with the updated event 74 | /// structure and the `PgIdIndexer` integration. Newly created events with the new 75 | /// domain identifier will automatically have the identifier indexed. 76 | /// 77 | /// Once the indexing process is complete, you can query the event store using the 78 | /// new domain identifier to fetch events. 79 | /// 80 | /// If indexing is done, you can remove the `PgIdIndexer` from the list of registered event listeners. 81 | pub struct PgIdIndexer { 82 | id: &'static str, 83 | pool: PgPool, 84 | query: StreamQuery, 85 | _event: PhantomData, 86 | } 87 | 88 | impl PgIdIndexer { 89 | /// Creates a new `PgIdIndexer` instance for indexing events. 90 | /// 91 | /// # Arguments 92 | /// 93 | /// * `id` - A unique identifier for the listener, used to store the last processed `event_id` in the database. 94 | /// * `pool` - A `PgPool` instance for Postgres. 95 | pub fn new(id: &'static str, pool: PgPool) -> Self { 96 | Self { 97 | id, 98 | pool, 99 | query: disintegrate::query!(E), 100 | _event: PhantomData, 101 | } 102 | } 103 | } 104 | 105 | /// PostgreSQL Id Indexer error. 106 | #[derive(thiserror::Error, Debug)] 107 | #[error(transparent)] 108 | pub struct Error(#[from] sqlx::Error); 109 | 110 | #[async_trait] 111 | impl EventListener for PgIdIndexer { 112 | type Error = Error; 113 | 114 | fn id(&self) -> &'static str { 115 | self.id 116 | } 117 | 118 | fn query(&self) -> &StreamQuery { 119 | &self.query 120 | } 121 | 122 | async fn handle(&self, event: PersistedEvent) -> Result<(), Self::Error> { 123 | let mut query_builder = sql_builder(event.id(), event.domain_identifiers()); 124 | query_builder.build().execute(&self.pool).await?; 125 | Ok(()) 126 | } 127 | } 128 | 129 | fn sql_builder( 130 | event_id: PgEventId, 131 | domain_identifiers: DomainIdentifierSet, 132 | ) -> sqlx::QueryBuilder<'static, Postgres> { 133 | let domain_identifiers = as Clone>::clone(&domain_identifiers).into_iter(); 134 | let mut sql_builder = sqlx::QueryBuilder::new("UPDATE event SET "); 135 | let mut separated = sql_builder.separated(","); 136 | for (id_name, id_value) in domain_identifiers { 137 | separated.push(format!("{id_name} = ")); 138 | 139 | match id_value { 140 | disintegrate::IdentifierValue::String(value) => { 141 | separated.push_bind_unseparated(value.clone()) 142 | } 143 | disintegrate::IdentifierValue::i64(value) => separated.push_bind_unseparated(value), 144 | disintegrate::IdentifierValue::Uuid(value) => separated.push_bind_unseparated(value), 145 | }; 146 | } 147 | separated.push_unseparated(" WHERE event_id = "); 148 | separated.push_bind_unseparated(event_id); 149 | 150 | sql_builder 151 | } 152 | 153 | #[cfg(test)] 154 | mod test { 155 | use disintegrate::domain_identifiers; 156 | use uuid::Uuid; 157 | 158 | use super::sql_builder; 159 | 160 | #[test] 161 | fn it_builds_event_update() { 162 | let ids = 163 | domain_identifiers! {cart_id: "cart1", product_id: 1, customer_id: Uuid::new_v4()}; 164 | 165 | let builder = sql_builder(1, ids); 166 | 167 | assert_eq!( 168 | builder.sql(), 169 | "UPDATE event SET cart_id = $1,customer_id = $2,product_id = $3 WHERE event_id = $4" 170 | ); 171 | } 172 | } 173 | -------------------------------------------------------------------------------- /disintegrate/src/testing.rs: -------------------------------------------------------------------------------- 1 | //! Utility for testing a Decision implementation 2 | //! 3 | //! The test harness allows you to set up a history of events, perform the given decision, 4 | //! and make assertions about the resulting changes. 5 | use std::fmt::Debug; 6 | 7 | use crate::{Decision, Event, IntoState, IntoStatePart, MultiState, PersistedEvent}; 8 | 9 | /// Test harness for testing decisions. 10 | pub struct TestHarness; 11 | 12 | impl TestHarness { 13 | /// Sets up a history of events. 14 | /// 15 | /// # Arguments 16 | /// 17 | /// * `history` - A history of events to derive the current state. 18 | /// 19 | /// # Returns 20 | /// 21 | /// A `TestHarnessStep` representing the "given" step. 22 | pub fn given(history: impl Into>) -> TestHarnessStep { 23 | TestHarnessStep { 24 | history: history.into(), 25 | _step: Given, 26 | } 27 | } 28 | } 29 | 30 | /// Represents the given step of the test harness. 31 | pub struct Given; 32 | 33 | /// Represents when step of the test harness. 34 | pub struct When { 35 | result: Result, ERR>, 36 | } 37 | 38 | pub struct TestHarnessStep { 39 | history: Vec, 40 | _step: ST, 41 | } 42 | 43 | impl TestHarnessStep { 44 | /// Executes a decision on the state derived from the given history. 45 | /// 46 | /// # Arguments 47 | /// 48 | /// * `decision` - The decision to test. 49 | /// 50 | /// # Returns 51 | /// 52 | /// A `TestHarnessStep` representing the "when" step. 53 | pub fn when(self, decision: D) -> TestHarnessStep> 54 | where 55 | D: Decision, 56 | S: IntoStatePart, 57 | SP: IntoState + MultiState, 58 | { 59 | let mut state = decision.state_query().into_state_part(); 60 | for event in self 61 | .history 62 | .iter() 63 | .enumerate() 64 | .map(|(id, event)| PersistedEvent::new((id + 1) as i64, event.clone())) 65 | { 66 | state.mutate_all(event); 67 | } 68 | let result = decision.process(&state.into_state()); 69 | TestHarnessStep { 70 | history: self.history, 71 | _step: When { result }, 72 | } 73 | } 74 | } 75 | 76 | impl TestHarnessStep> 77 | where 78 | E: Event + Clone + PartialEq, 79 | R: Debug + PartialEq, 80 | ERR: Debug + PartialEq, 81 | { 82 | /// Makes assertions about the changes. 83 | /// 84 | /// # Arguments 85 | /// 86 | /// * `expected` - The expected changes. 87 | /// 88 | /// # Panics 89 | /// 90 | /// Panics if the action result is not `Ok` or if the changes do not match the expected changes. 91 | /// 92 | /// # Examples 93 | #[track_caller] 94 | pub fn then(self, expected: impl Into>) { 95 | assert_eq!(Ok(expected.into()), self._step.result); 96 | } 97 | 98 | /// Allows for custom assertions on the resulting events from a decision execution. 99 | /// 100 | /// The `then_assert` method enables more complex verification logic beyond simple equality checks. 101 | /// This is particularly useful when you need to perform detailed validation of event properties or 102 | /// when the exact sequence or content of events requires custom validation logic. 103 | /// 104 | /// # Parameters 105 | /// 106 | /// * `assertion` - A closure that receives a reference to the vector of resulting events and performs custom assertions on them. 107 | /// 108 | /// # Example 109 | /// 110 | /// ```no_run 111 | /// 112 | /// #[test] 113 | /// fn test_with_custom_assertions() { 114 | /// disintegrate::TestHarness::given([ 115 | /// DomainEvent::AccountOpened { account_id: 1 }, 116 | /// DomainEvent::AmountDeposited { 117 | /// account_id: 1, 118 | /// amount: 10, 119 | /// }, 120 | /// ]) 121 | /// .when(WithdrawAmount::new(1, 10)) 122 | /// .then_assert(|events| { 123 | /// // Complex assertions can be implemented here 124 | /// assert_eq!(events.len(), 1, "Expected exactly one event"); 125 | /// if let DomainEvent::AmountWithdrawn { account_id, amount } = &events[0] { 126 | /// assert_eq!(*account_id, 1); 127 | /// assert_eq!(*amount, 10); 128 | /// // Additional validation like checking timestamps, etc. 129 | /// } else { 130 | /// panic!("Expected AmountWithdrawn event failed"); 131 | /// } 132 | /// }); 133 | /// } 134 | /// ``` 135 | /// 136 | /// # Notes 137 | /// 138 | /// * This method is tracked by the Rust caller location system, so error messages will point to the correct line in your test. 139 | /// * Use `then()` for straightforward equality assertions 140 | /// * For asserting errors rather than events, use `then_err()` instead. 141 | #[track_caller] 142 | pub fn then_assert(self, assertion: impl FnOnce(&Vec)) { 143 | assertion(&self._step.result.unwrap()); 144 | } 145 | 146 | /// Makes assertions about the expected error result. 147 | /// 148 | /// # Arguments 149 | /// 150 | /// * `expected` - The expected error. 151 | /// 152 | /// # Panics 153 | /// 154 | /// Panics if the action result is not `Err` or if the error does not match the expected error. 155 | #[track_caller] 156 | pub fn then_err(self, expected: ERR) { 157 | let err = self._step.result.unwrap_err(); 158 | assert_eq!(err, expected); 159 | } 160 | } 161 | 162 | #[cfg(test)] 163 | mod tests { 164 | use std::vec; 165 | 166 | use super::*; 167 | use crate::utils::tests::*; 168 | 169 | #[test] 170 | fn it_should_set_up_initial_state_and_apply_the_history() { 171 | let mut mock_add_item = MockDecision::new(); 172 | mock_add_item 173 | .expect_state_query() 174 | .once() 175 | .return_once(|| cart("c1", [])); 176 | mock_add_item 177 | .expect_process() 178 | .once() 179 | .return_once(|_| Ok(vec![item_added_event("p2", "c1")])); 180 | 181 | TestHarness::given(vec![item_added_event("p1", "c1")]) 182 | .when(mock_add_item) 183 | .then([item_added_event("p2", "c1")]); 184 | } 185 | 186 | #[test] 187 | #[should_panic] 188 | fn it_should_panic_when_action_failed_and_events_were_expected() { 189 | let mut mock_add_item = MockDecision::new(); 190 | mock_add_item 191 | .expect_process() 192 | .once() 193 | .return_once(|_| Err(CartError("Some error".to_string()))); 194 | TestHarness::given([]) 195 | .when(mock_add_item) 196 | .then([item_added_event("p2", "c1")]); 197 | } 198 | 199 | #[test] 200 | fn it_should_assert_expected_error_with_then_err() { 201 | let mut mock_add_item = MockDecision::new(); 202 | mock_add_item 203 | .expect_state_query() 204 | .once() 205 | .return_once(|| cart("c1", [])); 206 | mock_add_item 207 | .expect_process() 208 | .once() 209 | .return_once(|_| Err(CartError("Some error".to_string()))); 210 | TestHarness::given([]) 211 | .when(mock_add_item) 212 | .then_err(CartError("Some error".to_string())); 213 | } 214 | 215 | #[test] 216 | #[should_panic] 217 | fn it_should_panic_when_an_error_is_expected() { 218 | let mut mock_add_item = MockDecision::new(); 219 | mock_add_item 220 | .expect_process() 221 | .once() 222 | .return_once(|_| Ok(vec![item_added_event("p2", "c1")])); 223 | 224 | TestHarness::given(vec![item_added_event("p1", "c1")]) 225 | .when(mock_add_item) 226 | .then_err(CartError("Some error".to_string())); 227 | } 228 | } 229 | -------------------------------------------------------------------------------- /examples/courses/src/domain/course.rs: -------------------------------------------------------------------------------- 1 | use disintegrate::{Decision, StateMutate, StateQuery}; 2 | use serde::{Deserialize, Serialize}; 3 | 4 | use super::{CourseEvent, DomainEvent}; 5 | 6 | pub type CourseId = String; 7 | 8 | #[derive(Debug, Clone, PartialEq, Eq, thiserror::Error)] 9 | pub enum CourseError { 10 | #[error("not found")] 11 | NotFound, 12 | #[error("already created")] 13 | AlreadyCreated, 14 | #[error("course alread closed")] 15 | AlreadyClosed, 16 | #[error("invalid seats")] 17 | InvalidSeats, 18 | #[error("name empty")] 19 | NameEmpty, 20 | } 21 | 22 | #[derive(Debug, StateQuery, Clone, Serialize, Deserialize)] 23 | #[state_query(CourseEvent)] 24 | pub struct Course { 25 | #[id] 26 | course_id: CourseId, 27 | name: String, 28 | created: bool, 29 | closed: bool, 30 | } 31 | 32 | impl Course { 33 | pub fn new(course_id: CourseId) -> Self { 34 | Self { 35 | course_id, 36 | name: "".to_string(), 37 | created: false, 38 | closed: false, 39 | } 40 | } 41 | } 42 | 43 | impl StateMutate for Course { 44 | fn mutate(&mut self, event: Self::Event) { 45 | match event { 46 | CourseEvent::CourseCreated { name, .. } => { 47 | self.name = name; 48 | self.created = true; 49 | } 50 | CourseEvent::CourseClosed { .. } => { 51 | self.closed = true; 52 | } 53 | CourseEvent::CourseRenamed { name, .. } => { 54 | self.name = name; 55 | } 56 | } 57 | } 58 | } 59 | 60 | #[derive(Debug)] 61 | pub struct CreateCourse { 62 | pub course_id: CourseId, 63 | pub name: String, 64 | pub seats: u32, 65 | } 66 | 67 | impl CreateCourse { 68 | pub fn new(course_id: CourseId, name: &str, seats: u32) -> Self { 69 | Self { 70 | course_id, 71 | name: name.into(), 72 | seats, 73 | } 74 | } 75 | } 76 | 77 | impl Decision for CreateCourse { 78 | type Event = DomainEvent; 79 | 80 | type StateQuery = Course; 81 | 82 | type Error = CourseError; 83 | 84 | fn state_query(&self) -> Self::StateQuery { 85 | Course::new(self.course_id.clone()) 86 | } 87 | 88 | fn process(&self, state: &Self::StateQuery) -> Result, Self::Error> { 89 | if state.created { 90 | return Err(CourseError::AlreadyCreated); 91 | } 92 | 93 | if self.name.is_empty() { 94 | return Err(CourseError::NameEmpty); 95 | } 96 | 97 | Ok(vec![DomainEvent::CourseCreated { 98 | course_id: self.course_id.clone(), 99 | name: self.name.clone(), 100 | seats: self.seats, 101 | }]) 102 | } 103 | } 104 | 105 | #[derive(Debug)] 106 | pub struct CloseCourse { 107 | pub course_id: CourseId, 108 | } 109 | 110 | impl CloseCourse { 111 | pub fn new(course_id: CourseId) -> Self { 112 | Self { course_id } 113 | } 114 | } 115 | impl Decision for CloseCourse { 116 | type Event = DomainEvent; 117 | 118 | type StateQuery = Course; 119 | 120 | type Error = CourseError; 121 | 122 | fn state_query(&self) -> Self::StateQuery { 123 | Course::new(self.course_id.clone()) 124 | } 125 | 126 | fn process(&self, state: &Self::StateQuery) -> Result, Self::Error> { 127 | if !state.created { 128 | return Err(CourseError::NotFound); 129 | } 130 | 131 | if state.closed { 132 | return Err(CourseError::AlreadyClosed); 133 | } 134 | 135 | Ok(vec![DomainEvent::CourseClosed { 136 | course_id: self.course_id.clone(), 137 | }]) 138 | } 139 | } 140 | 141 | #[derive(Debug)] 142 | pub struct RenameCourse { 143 | pub course_id: CourseId, 144 | pub name: String, 145 | } 146 | 147 | impl RenameCourse { 148 | pub fn new(course_id: CourseId, name: &str) -> Self { 149 | Self { 150 | course_id, 151 | name: name.into(), 152 | } 153 | } 154 | } 155 | impl Decision for RenameCourse { 156 | type Event = DomainEvent; 157 | 158 | type StateQuery = Course; 159 | 160 | type Error = CourseError; 161 | 162 | fn state_query(&self) -> Self::StateQuery { 163 | Course::new(self.course_id.clone()) 164 | } 165 | 166 | fn process(&self, state: &Self::StateQuery) -> Result, Self::Error> { 167 | if !state.created { 168 | return Err(CourseError::NotFound); 169 | } 170 | 171 | if self.name.is_empty() { 172 | return Err(CourseError::NameEmpty); 173 | } 174 | 175 | Ok(vec![DomainEvent::CourseRenamed { 176 | course_id: self.course_id.clone(), 177 | name: self.name.to_string(), 178 | }]) 179 | } 180 | } 181 | 182 | #[cfg(test)] 183 | mod test { 184 | use super::*; 185 | 186 | #[test] 187 | fn it_creates_a_new_course() { 188 | disintegrate::TestHarness::given([]) 189 | .when(CreateCourse::new("1".into(), "test course", 1)) 190 | .then([DomainEvent::CourseCreated { 191 | course_id: "1".into(), 192 | name: "test course".into(), 193 | seats: 1, 194 | }]); 195 | } 196 | 197 | #[test] 198 | fn it_should_not_create_a_course_when_it_already_exists() { 199 | disintegrate::TestHarness::given([DomainEvent::CourseCreated { 200 | course_id: "1".into(), 201 | name: "test course".into(), 202 | seats: 3, 203 | }]) 204 | .when(CreateCourse::new("1".into(), "some course", 1)) 205 | .then_err(CourseError::AlreadyCreated); 206 | } 207 | 208 | #[test] 209 | fn it_should_not_create_a_course_when_the_provided_name_is_empty() { 210 | disintegrate::TestHarness::given([]) 211 | .when(RenameCourse::new("1".into(), "new name")) 212 | .then_err(CourseError::NotFound); 213 | } 214 | 215 | #[test] 216 | fn it_renames_a_course() { 217 | disintegrate::TestHarness::given([DomainEvent::CourseCreated { 218 | course_id: "1".into(), 219 | name: "old name".into(), 220 | seats: 1, 221 | }]) 222 | .when(RenameCourse::new("1".into(), "new name")) 223 | .then(vec![DomainEvent::CourseRenamed { 224 | course_id: "1".into(), 225 | name: "new name".into(), 226 | }]); 227 | } 228 | 229 | #[test] 230 | fn it_should_not_rename_a_course_when_it_does_not_exist() { 231 | disintegrate::TestHarness::given([]) 232 | .when(RenameCourse::new("1".into(), "new name")) 233 | .then_err(CourseError::NotFound); 234 | } 235 | 236 | #[test] 237 | fn it_should_not_rename_a_course_when_the_new_name_is_empty() { 238 | disintegrate::TestHarness::given([DomainEvent::CourseCreated { 239 | course_id: "1".into(), 240 | name: "old name".into(), 241 | seats: 1, 242 | }]) 243 | .when(RenameCourse::new("1".into(), "")) 244 | .then_err(CourseError::NameEmpty); 245 | } 246 | 247 | #[test] 248 | fn it_closes_a_course() { 249 | disintegrate::TestHarness::given([DomainEvent::CourseCreated { 250 | course_id: "1".into(), 251 | name: "old name".into(), 252 | seats: 1, 253 | }]) 254 | .when(CloseCourse::new("1".into())) 255 | .then(vec![DomainEvent::CourseClosed { 256 | course_id: "1".into(), 257 | }]) 258 | } 259 | 260 | #[test] 261 | fn it_should_not_close_a_course_when_it_does_not_exist() { 262 | disintegrate::TestHarness::given([]) 263 | .when(CloseCourse::new("1".into())) 264 | .then_err(CourseError::NotFound) 265 | } 266 | 267 | #[test] 268 | fn it_should_not_close_a_course_when_it_is_already_closed() { 269 | disintegrate::TestHarness::given([ 270 | DomainEvent::CourseCreated { 271 | course_id: "1".into(), 272 | name: "old name".into(), 273 | seats: 1, 274 | }, 275 | DomainEvent::CourseClosed { 276 | course_id: "1".into(), 277 | }, 278 | ]) 279 | .when(CloseCourse::new("1".into())) 280 | .then_err(CourseError::AlreadyClosed) 281 | } 282 | } 283 | --------------------------------------------------------------------------------