├── .clippy.toml ├── .editorconfig ├── .envrc ├── .github ├── ISSUE_TEMPLATE │ ├── bug_report.md │ └── feature_request.md └── workflows │ ├── ci.yml │ └── docs.yml ├── .gitignore ├── .vscode └── settings.json ├── CONTRIBUTING.md ├── Cargo.toml ├── LICENSE ├── README.md ├── codecov.yml ├── eventually-macros ├── Cargo.toml └── src │ └── lib.rs ├── eventually-postgres ├── Cargo.toml ├── migrations │ ├── 1_events.down.sql │ ├── 1_events.up.sql │ ├── 2_aggregates.down.sql │ └── 2_aggregates.up.sql ├── src │ ├── aggregate.rs │ ├── event.rs │ └── lib.rs └── tests │ ├── aggregate_repository.rs │ ├── event_store.rs │ └── setup │ └── mod.rs ├── eventually ├── Cargo.toml └── src │ ├── aggregate │ ├── mod.rs │ ├── repository.rs │ └── test.rs │ ├── command │ ├── mod.rs │ └── test.rs │ ├── event │ ├── mod.rs │ └── store.rs │ ├── lib.rs │ ├── message.rs │ ├── query.rs │ ├── serde.rs │ ├── tracing.rs │ └── version.rs ├── examples ├── bank-accounting │ ├── Cargo.toml │ ├── README.md │ ├── build.rs │ ├── docker-compose.yml │ ├── proto │ │ ├── bank_account.proto │ │ └── bank_accounting.proto │ └── src │ │ ├── application.rs │ │ ├── domain.rs │ │ ├── grpc.rs │ │ ├── lib.rs │ │ ├── main.rs │ │ ├── postgres.rs │ │ ├── serde.rs │ │ └── tracing.rs └── light-switch │ ├── Cargo.toml │ ├── README.md │ └── src │ ├── application.rs │ ├── commands │ ├── install_light_switch.rs │ ├── mod.rs │ ├── turn_light_switch_off.rs │ └── turn_light_switch_on.rs │ ├── domain.rs │ ├── main.rs │ └── queries │ ├── get_switch_state.rs │ └── mod.rs ├── flake.lock ├── flake.nix ├── renovate.json5 ├── resources └── logo.png └── rustfmt.toml /.clippy.toml: -------------------------------------------------------------------------------- 1 | allowed-duplicate-crates = [ 2 | "thiserror", 3 | "thiserror-impl", 4 | # These are all coming from eventually-postgres, quite likely sqlx... 5 | "hashbrown", 6 | "wasi", 7 | "windows-sys", 8 | "windows-targets", 9 | "windows_aarch64_gnullvm", 10 | "windows_aarch64_msvc", 11 | "windows_i686_gnu", 12 | "windows_i686_msvc", 13 | "windows_x86_64_gnu", 14 | "windows_x86_64_gnullvm", 15 | "windows_x86_64_msvc", 16 | "zerocopy", 17 | "zerocopy-derive", 18 | "getrandom", 19 | ] 20 | -------------------------------------------------------------------------------- /.editorconfig: -------------------------------------------------------------------------------- 1 | root = true 2 | 3 | [*] 4 | charset = utf-8 5 | end_of_line = lf 6 | indent_size = 4 7 | indent_style = space 8 | insert_final_newline = true 9 | max_line_length = 160 10 | tab_width = 4 11 | trim_trailing_whitespace = true 12 | 13 | [*.{json, md, yaml, yml, proto}] 14 | indent_size = 2 15 | -------------------------------------------------------------------------------- /.envrc: -------------------------------------------------------------------------------- 1 | use flake 2 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: Create a report to help us improve 4 | title: '' 5 | labels: bug 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Describe the bug** 11 | A clear and concise description of what the bug is. 12 | 13 | **To Reproduce** 14 | Steps to reproduce the behavior: 15 | 1. Go to '...' 16 | 2. Click on '....' 17 | 3. Scroll down to '....' 18 | 4. See error 19 | 20 | **Expected behavior** 21 | A clear and concise description of what you expected to happen. 22 | 23 | **Screenshots** 24 | If applicable, add screenshots to help explain your problem. 25 | 26 | **Additional context** 27 | Add any other context about the problem here. 28 | 29 | - [ ] I added the relevant crate labels (e.g. `eventually-postgres` if the bug is showing on the crate) 30 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature request 3 | about: Suggest an idea for this project 4 | title: '' 5 | labels: feature 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Is your feature request related to a problem? Please describe.** 11 | A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] 12 | 13 | **Describe the solution you'd like** 14 | A clear and concise description of what you want to happen. 15 | 16 | **Describe alternatives you've considered** 17 | A clear and concise description of any alternative solutions or features you've considered. 18 | 19 | **Additional context** 20 | Add any other context or screenshots about the feature request here. 21 | -------------------------------------------------------------------------------- /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | name: Continuous Integration 2 | on: 3 | push: 4 | branches: [main] 5 | pull_request: 6 | 7 | jobs: 8 | test: 9 | name: Test 10 | runs-on: ubuntu-latest 11 | continue-on-error: ${{ matrix.required }} 12 | strategy: 13 | matrix: 14 | build: [stable, nightly] 15 | include: 16 | - build: stable 17 | required: true 18 | toolchain: stable 19 | - build: nightly 20 | required: false 21 | toolchain: nightly 22 | 23 | steps: 24 | - name: Install Protoc 25 | uses: arduino/setup-protoc@v3 26 | with: 27 | repo-token: ${{ secrets.GITHUB_TOKEN }} 28 | 29 | - uses: actions/checkout@v4 30 | 31 | - uses: actions-rs/toolchain@v1 32 | with: 33 | profile: minimal 34 | toolchain: ${{ matrix.toolchain }} 35 | override: true 36 | components: rustfmt 37 | 38 | - name: Rust cache 39 | uses: Swatinem/rust-cache@v2 40 | 41 | - uses: actions-rs/cargo@v1 42 | with: 43 | command: test 44 | args: --workspace --all-features 45 | env: 46 | DATABASE_URL: postgres://eventually:password@localhost:5432/eventually?sslmode=disable 47 | 48 | coverage: 49 | name: Code Coverage 50 | runs-on: ubuntu-latest 51 | 52 | services: 53 | postgres: 54 | env: 55 | POSTGRES_USER: eventually 56 | POSTGRES_PASSWORD: password 57 | POSTGRES_DB: eventually 58 | image: postgres:latest 59 | ports: ["5432:5432"] 60 | options: >- 61 | --health-cmd pg_isready 62 | --health-interval 10s 63 | --health-timeout 5s 64 | --health-retries 5 65 | 66 | steps: 67 | - name: Install Protoc 68 | uses: arduino/setup-protoc@v3 69 | with: 70 | repo-token: ${{ secrets.GITHUB_TOKEN }} 71 | 72 | - name: checkout source 73 | uses: actions/checkout@v4 74 | 75 | - name: Install Rust stable toolchain 76 | uses: actions-rs/toolchain@v1 77 | with: 78 | toolchain: nightly 79 | profile: minimal 80 | override: true 81 | 82 | - name: Rust cache 83 | uses: Swatinem/rust-cache@v2 84 | 85 | - name: Install cargo-llvm-cov 86 | uses: taiki-e/install-action@cargo-llvm-cov 87 | 88 | - name: Run llvm-cov 89 | run: cargo llvm-cov --all-features --doctests --workspace --lcov --output-path lcov.info 90 | env: 91 | DATABASE_URL: postgres://eventually:password@localhost:5432/eventually?sslmode=disable 92 | 93 | - name: Upload to codecov.io 94 | uses: codecov/codecov-action@ad3126e916f78f00edff4ed0317cf185271ccc2d # v5 95 | with: 96 | files: lcov.info 97 | 98 | - name: Archive code coverage results 99 | uses: actions/upload-artifact@v4 100 | with: 101 | name: code-coverage-report 102 | path: lcov.info 103 | 104 | formatting: 105 | name: Rustfmt Check 106 | runs-on: ubuntu-latest 107 | steps: 108 | - name: Checkout sources 109 | uses: actions/checkout@v4 110 | 111 | - name: Install nightly toolchain 112 | uses: actions-rs/toolchain@v1 113 | with: 114 | profile: minimal 115 | toolchain: nightly 116 | override: true 117 | components: rustfmt 118 | 119 | - name: Run cargo fmt 120 | uses: actions-rs/cargo@v1 121 | with: 122 | command: fmt 123 | args: --all -- --check 124 | 125 | lint: 126 | name: Clippy Lint 127 | runs-on: ubuntu-latest 128 | steps: 129 | - name: Install Protoc 130 | uses: arduino/setup-protoc@v3 131 | with: 132 | repo-token: ${{ secrets.GITHUB_TOKEN }} 133 | 134 | - name: Checkout sources 135 | uses: actions/checkout@v4 136 | 137 | - name: Install stable toolchain 138 | uses: actions-rs/toolchain@v1 139 | with: 140 | profile: minimal 141 | toolchain: stable 142 | override: true 143 | components: clippy,rustfmt 144 | 145 | - name: Rust cache 146 | uses: Swatinem/rust-cache@v2 147 | 148 | - name: Run cargo clippy 149 | uses: actions-rs/clippy-check@v1 150 | with: 151 | token: ${{ secrets.GITHUB_TOKEN }} 152 | args: --all-features 153 | -------------------------------------------------------------------------------- /.github/workflows/docs.yml: -------------------------------------------------------------------------------- 1 | name: Generate docs (main) 2 | on: 3 | push: 4 | branches: 5 | - main 6 | 7 | jobs: 8 | docs: 9 | name: Docs 10 | runs-on: ubuntu-latest 11 | steps: 12 | - name: Install Protoc 13 | uses: arduino/setup-protoc@v3 14 | with: 15 | repo-token: ${{ secrets.GITHUB_TOKEN }} 16 | 17 | - name: Checkout sources 18 | uses: actions/checkout@v4 19 | 20 | - name: Install stable toolchain 21 | uses: actions-rs/toolchain@v1 22 | with: 23 | profile: minimal 24 | toolchain: stable 25 | override: true 26 | components: rustfmt 27 | 28 | - name: Run cargo doc --no-deps 29 | uses: actions-rs/cargo@v1 30 | with: 31 | command: doc 32 | args: --no-deps 33 | 34 | - name: Deploy docs to gh-pages branch 35 | uses: peaceiris/actions-gh-pages@v4 36 | with: 37 | personal_token: ${{ secrets.GITHUB_TOKEN }} 38 | publish_dir: ./target/doc 39 | publish_branch: gh-pages 40 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | **/target 2 | **/*.rs.bk 3 | /Cargo.lock 4 | lcov.info 5 | .direnv 6 | -------------------------------------------------------------------------------- /.vscode/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "editor.tabSize": 2, 3 | "editor.formatOnSave": true, 4 | "files.associations": { 5 | "*.json5": "jsonc", 6 | }, 7 | "[json][jsonc]": { 8 | "editor.defaultFormatter": "vscode.json-language-features", 9 | "editor.indentSize": "tabSize", 10 | }, 11 | "[rust]": { 12 | "editor.tabSize": 4, 13 | }, 14 | "rust-analyzer.cargo.features": "all", 15 | "rust-analyzer.check.command": "clippy", 16 | "nix.serverPath": "nil", 17 | "nix.enableLanguageServer": true, 18 | "nix.serverSettings": { 19 | "nil": { 20 | "formatting": { 21 | "command": [ 22 | "nixpkgs-fmt" 23 | ] 24 | } 25 | } 26 | }, 27 | } 28 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing to `eventually` 2 | 3 | Thank you so much for considering contributing to `eventually`! :tada: 4 | 5 | - [Contributing to `eventually`](#contributing-to-eventually) 6 | - [What should I know before I get started?](#what-should-i-know-before-i-get-started) 7 | - [I want to contribute, but I don't know what can I contribute to...](#i-want-to-contribute-but-i-dont-know-what-can-i-contribute-to) 8 | - [Pull Requests](#pull-requests) 9 | - [How should my Pull Request look like?](#how-should-my-pull-request-look-like) 10 | - [Describe the scope of the PR](#describe-the-scope-of-the-pr) 11 | - [Git commit messages](#git-commit-messages) 12 | - [Rust styleguide](#rust-styleguide) 13 | 14 | ## What should I know before I get started? 15 | 16 | The current project structure is using a [Rust virtual workspace][workspace], 17 | where all the crates share the same dependencies and lock file. 18 | (This might be changed in the future) 19 | 20 | The workspace has the following crates: 21 | 22 | - [`eventually`]\: the main crate, users should depend solely on this crate 23 | - [`eventually-macros`]\: the crate that contains procedural macros to make the development experience nicer for users, 24 | - [`eventually-postgres`]\: the crate that contains implementations of abstractions from `eventually` for PostgreSQL databases, 25 | 26 | When submitting a PR involving one or more of these crates, please make sure to use the **corresponding label**. 27 | 28 | ### I want to contribute, but I don't know what can I contribute to... 29 | 30 | If you want to contribute but you have no idea how, or what specific features are 31 | missing, please check the [issues] page: we usually have some issues that are marked as "good for 32 | beginners". 33 | 34 | Being an open-source project, we are always looking for new contributors, so no matter 35 | your level of Event Sourcing skills or language mastery, your PRs are always welcome :heart: 36 | 37 | ## Pull Requests 38 | 39 | Before opening a Pull Request, check the [Issues section][issues] for an issue that might describe the feature you're considering adding. 40 | 41 | If no such issue has been created, [feel free to create one][new-issue] and use the `rfc` label to discuss technical 42 | implementation before the code review process. You can also reach out to our [Gitter chat][gitter] to discuss new 43 | features or other technical aspects of the crate. 44 | 45 | In order to submit a PR, follow these steps: 46 | 47 | 1. **Fork** the repository 48 | 2. **Clone** your fork of the repository in your local machine 49 | 3. Add the upstream repository in your local machine copy: 50 | ```bash 51 | git remote add upstream git@github.com:get-eventually/eventually-rs 52 | git fetch upstream 53 | ``` 54 | 4. Create a new branch starting from `upstream/main`: 55 | ```bash 56 | git checkout -b "" --track upstream/main 57 | ``` 58 | 5. Do your magic :tada: 59 | 6. Once ready, open a PR pointing to `upstream/main` :+1: 60 | 61 | After the PR is created, wait for the CI pipeline to run and the test to pass. Whenever introducing new changes to the 62 | repository, make sure your changes are **covered** by **unit** or **integration tests**. 63 | 64 | When all PR badges are green, the review process can start. 65 | Maintainer and collaborators will try to finish the PR review as fast as possible. 66 | 67 | Once the PR has been approved, the maintainer or collaborators will **squash-merge** the PR onto `main`. 68 | 69 | ## How should my Pull Request look like? 70 | 71 | ### Describe the scope of the PR 72 | 73 | Please, include a meaningful PR description, detailing is the scope of the PR and the changes 74 | introduced to fulfill it. 75 | 76 | If an issue has been opened already, and if the technical discussion has already happened in the issue, you can avoid 77 | including a detailed PR description by linking the issue to the PR (e.g. `Closes #..` or `Fixes #..`). 78 | 79 | ### Git commit messages 80 | 81 | The project makes use of [Conventional Commits][conventional-commits] style for the Git commit messages. 82 | 83 | Please, make sure to follow the style specifications, and try to leave a clear commit history: this will make the review 84 | process easier for reviewers, as the review can be carried _commit-by-commit_. 85 | 86 | ### Rust styleguide 87 | 88 | As you might've guessed, `eventually` is a Rust project. 89 | 90 | The CI pipeline will check the code style by using `clippy` and `cargo check` 91 | every time a new commit has been pushed to an open PR. 92 | 93 | Make sure you run `rustfmt` **before committing any changes**, as failing to do so will most likely fail the CI pipeline steps. 94 | 95 | [workspace]: https://doc.rust-lang.org/book/ch14-03-cargo-workspaces.html 96 | [`eventually`]: ./eventually 97 | [`eventually-macros`]: ./eventually-macros 98 | [`eventually-postgres`]: ./eventually-postgres 99 | [issues]: https://github.com/get-eventually/eventually-rs/issues 100 | [new-issue]: https://github.com/get-eventually/eventually-rs/issues/new 101 | [conventional-commits]: https://www.conventionalcommits.org/en/v1.0.0/ 102 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [workspace] 2 | resolver = "2" 3 | members = [ 4 | "eventually", 5 | "eventually-macros", 6 | "eventually-postgres", 7 | # Examples 8 | "examples/bank-accounting", 9 | "examples/light-switch", 10 | ] 11 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2019 Danilo Cianfrone 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 |
2 | 3 |
4 | Eventually 5 |
6 | 7 |
8 | 9 |
10 | 11 | Event Sourcing for Rust 12 | 13 |
14 | 15 |
16 | 17 |
18 | 19 | 20 | GitHub Workflow Status 22 | 23 | 24 | 25 | Codecov 27 | 28 | 29 | 30 | Crates.io 32 | 33 | 34 | 35 | latest main docs 37 | 38 | 39 | 40 | docs.rs docs 42 | 43 | 44 | 45 | GitHub license 47 | 48 |
49 | 50 |
51 | 52 | 53 | Collection of traits and other utilities to help you build your Event-sourced applications in Rust. 54 | 55 | ## Installation 56 | 57 | > ⚠️ **v0.5.0 is under active development**: Breaking changes are expected. If you are using `eventually` as a git dependency you should use a pinned version! 58 | 59 | Add `eventually` into your project dependencies: 60 | 61 | ```toml 62 | [dependencies] 63 | eventually = { version = "0.5.0", features = ["full"], git = "https://github.com/get-eventually/eventually-rs" } 64 | ``` 65 | 66 | ### Note on semantic versioning 67 | 68 | This library is **actively being developed**, and prior to `v1` release the following [Semantic versioning]() 69 | is being adopted: 70 | 71 | * Breaking changes are tagged with a new `MINOR` release 72 | * New features, patches and documentation are tagged with a new `PATCH` release 73 | 74 | ## What is Event Sourcing? 75 | 76 | Before diving into the crate's internals, you may be wondering what Event Sourcing is. 77 | 78 | From [eventstore.com](https://eventstore.com/) introduction: 79 | 80 | >Event Sourcing is an architectural pattern that is gaining popularity as a method for building modern systems. Unlike traditional databases which only store and update the current state of data, event-sourced systems store all changes as an immutable series of events in the order that they occurred and current state is derived from that event log. 81 | 82 | ## How does `eventually-rs` support Event Sourcing? 83 | 84 | `eventually` exposes all the necessary abstraction to model your 85 | Domain Entities (in lingo, _Aggregates_) using Domain Events, and 86 | to save these Events using an _Event Store_ (the append-only event log). 87 | 88 | For more information, [check out the crate documentation](https://docs.rs/eventually). 89 | 90 | You can also take a look at the [`bank-accounting`](https://github.com/get-eventually/eventually-rs/tree/main/examples/bank-accounting) example, 91 | showcasing Event-sourced application for a generic (and simple) Bank Accounting bounded context. 92 | 93 | ### Event Store backends 94 | 95 | `eventually-rs` provides the necessary abstractions for modeling and interacting 96 | with an Event Store. 97 | 98 | These are the following officially-supported backend implementations: 99 | * [`eventually::event::store::InMemory`](./eventually/src/event/store.rs): simple inmemory Event Store implementation, using `std::collections::HashMap`, 100 | * [`eventually-postgres`](./eventually-postgres): Event Store and Aggregate Root Repository implementations for PostgreSQL databases. 101 | 102 | ## Contributing 103 | 104 | You want to contribute to `eventually-rs` but you don't know where to start? 105 | 106 | First of all, thank you for considering contributing ❤️ 107 | 108 | You can head over our [`CONTRIBUTING`](./CONTRIBUTING.md) section to know 109 | how to contribute to the project, and — in case you don't have a clear idea what 110 | to contribute — what is most needed needed from contributors. 111 | 112 | ## License 113 | 114 | This project is licensed under the [MIT license](LICENSE). 115 | 116 | ### Contribution 117 | 118 | Unless you explicitly state otherwise, any contribution intentionally submitted for inclusion in `eventually-rs` by you, shall be licensed as MIT, without any additional terms or conditions. 119 | -------------------------------------------------------------------------------- /codecov.yml: -------------------------------------------------------------------------------- 1 | ignore: 2 | # Example projects are ignored for the calculation of coverage, 3 | # as they don't impact the functionality of the crate. 4 | - examples/**/* 5 | -------------------------------------------------------------------------------- /eventually-macros/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "eventually-macros" 3 | description = "Macros for eventually crate" 4 | version = "0.1.0" 5 | edition = "2021" 6 | authors = ["Danilo Cianfrone "] 7 | license = "MIT" 8 | readme = "../README.md" 9 | repository = "https://github.com/get-eventually/eventually-rs" 10 | 11 | categories = [ 12 | "rust-patterns", 13 | "web-programming", 14 | "asynchronous", 15 | "data-structures", 16 | ] 17 | keywords = ["architecture", "ddd", "event-sourcing", "cqrs", "es"] 18 | 19 | [lib] 20 | proc-macro = true 21 | 22 | [dependencies] 23 | syn = { version = "2.0.100", features = ["full"] } 24 | quote = "1.0.40" 25 | eventually = { path = "../eventually" } 26 | -------------------------------------------------------------------------------- /eventually-macros/src/lib.rs: -------------------------------------------------------------------------------- 1 | //! `eventually-macros` contains useful macros that provides 2 | //! different implementations of traits and functionalities from [eventually]. 3 | 4 | #![deny(unsafe_code, unused_qualifications, trivial_casts, missing_docs)] 5 | #![deny(clippy::all, clippy::pedantic, clippy::cargo)] 6 | 7 | use proc_macro::TokenStream; 8 | use quote::quote; 9 | use syn::punctuated::Punctuated; 10 | use syn::{parse_macro_input, Fields, ItemStruct, Token, Type}; 11 | 12 | /// Implements a newtype to use the [`eventually::aggregate::Root`] instance with 13 | /// user-defined [`eventually::aggregate::Aggregate`] types. 14 | /// 15 | /// # Context 16 | /// 17 | /// The eventually API uses [`aggregate::Root`][eventually::aggregate::Root] 18 | /// to manage the versioning and list of events to commit for an `Aggregate` instance. 19 | /// Domain commands are to be implemented on the `aggregate::Root` instance, as it gives 20 | /// access to use `Root.record_that` or `Root.record_new` to record Domain Events. 21 | /// 22 | /// However, it's not possible to use `impl aggregate::Root` (`MyAggregateType` 23 | /// being an example of user-defined `Aggregate` type) outside the `eventually` crate (E0116). 24 | /// Therefore, a newtype that uses `aggregate::Root` is required. 25 | /// 26 | /// This attribute macro makes the implementation of a newtype easy, as it implements 27 | /// conversion traits from and to `aggregate::Root` and implements automatic deref 28 | /// through [`std::ops::Deref`] and [`std::ops::DerefMut`]. 29 | /// 30 | /// # Panics 31 | /// 32 | /// This method will panic if the Aggregate Root type is not provided as a macro parameter. 33 | #[proc_macro_attribute] 34 | pub fn aggregate_root(args: TokenStream, item: TokenStream) -> TokenStream { 35 | let mut item = parse_macro_input!(item as ItemStruct); 36 | let item_ident = item.ident.clone(); 37 | 38 | let aggregate_type: Type = 39 | parse_macro_input!(args with Punctuated::::parse_terminated) 40 | .into_iter() 41 | .next() 42 | .expect("the aggregate root type must be provided as macro parameter"); 43 | 44 | item.fields = Fields::Unnamed( 45 | syn::parse2(quote! { (eventually::aggregate::Root<#aggregate_type>) }).unwrap(), 46 | ); 47 | 48 | let result = quote! { 49 | #item 50 | 51 | impl std::ops::Deref for #item_ident { 52 | type Target = eventually::aggregate::Root<#aggregate_type>; 53 | 54 | fn deref(&self) -> &Self::Target { 55 | &self.0 56 | } 57 | } 58 | 59 | impl std::ops::DerefMut for #item_ident { 60 | fn deref_mut(&mut self) -> &mut Self::Target { 61 | &mut self.0 62 | } 63 | } 64 | 65 | impl From> for #item_ident { 66 | fn from(root: eventually::aggregate::Root<#aggregate_type>) -> Self { 67 | Self(root) 68 | } 69 | } 70 | 71 | impl From<#item_ident> for eventually::aggregate::Root<#aggregate_type> { 72 | fn from(value: #item_ident) -> Self { 73 | value.0 74 | } 75 | } 76 | }; 77 | 78 | result.into() 79 | } 80 | -------------------------------------------------------------------------------- /eventually-postgres/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "eventually-postgres" 3 | description = "PostgreSQL-specific trait implementations and utilities for the eventually crate" 4 | version = "0.5.0" 5 | edition = "2021" 6 | authors = ["Danilo Cianfrone "] 7 | license = "MIT" 8 | readme = "../README.md" 9 | repository = "https://github.com/get-eventually/eventually-rs" 10 | 11 | categories = ["web-programming", "asynchronous"] 12 | keywords = ["postgres", "postgresql", "database", "ddd", "event-sourcing"] 13 | 14 | [dependencies] 15 | anyhow = "1.0.97" 16 | async-trait = "0.1.77" 17 | chrono = "0.4.40" 18 | eventually = { path = "../eventually", version = "0.5.0", features = [ 19 | "serde-json", 20 | ] } 21 | futures = "0.3.31" 22 | regex = "1.11.1" 23 | sqlx = { version = "0.8.3", features = [ 24 | "runtime-tokio-rustls", 25 | "postgres", 26 | "migrate", 27 | ] } 28 | thiserror = "2.0.12" 29 | 30 | [dev-dependencies] 31 | tokio = { version = "1.44.1", features = ["macros", "rt"] } 32 | eventually = { path = "../eventually", version = "0.5.0", features = [ 33 | "serde-json", 34 | ] } 35 | eventually-macros = { path = "../eventually-macros", version = "0.1.0" } 36 | serde = { version = "1.0.219", features = ["derive"] } 37 | rand = "0.9.0" 38 | testcontainers-modules = { version = "0.11.6", features = ["postgres"] } 39 | -------------------------------------------------------------------------------- /eventually-postgres/migrations/1_events.down.sql: -------------------------------------------------------------------------------- 1 | DROP TABLE events; 2 | DROP TABLE event_streams; 3 | -------------------------------------------------------------------------------- /eventually-postgres/migrations/1_events.up.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE event_streams ( 2 | event_stream_id TEXT NOT NULL PRIMARY KEY, 3 | "version" INTEGER NOT NULL CHECK ("version" > 0) 4 | ); 5 | 6 | CREATE TABLE events ( 7 | event_stream_id TEXT NOT NULL, 8 | "type" TEXT NOT NULL, 9 | "version" INTEGER NOT NULL CHECK ("version" > 0), 10 | "event" BYTEA NOT NULL, 11 | metadata JSONB, 12 | 13 | PRIMARY KEY (event_stream_id, "version"), 14 | FOREIGN KEY (event_stream_id) REFERENCES event_streams (event_stream_id) ON DELETE CASCADE 15 | ); 16 | 17 | CREATE INDEX event_stream_id_idx ON events (event_stream_id); 18 | 19 | CREATE PROCEDURE upsert_event_stream( 20 | _event_stream_id TEXT, 21 | _expected_version INTEGER, 22 | _new_version INTEGER 23 | ) 24 | LANGUAGE PLPGSQL 25 | AS $$ 26 | DECLARE 27 | current_event_stream_version INTEGER; 28 | BEGIN 29 | -- Retrieve the latest version for the target Event Stream. 30 | SELECT es."version" 31 | INTO current_event_stream_version 32 | FROM event_streams es 33 | WHERE es.event_stream_id = _event_stream_id; 34 | 35 | IF (NOT FOUND AND _expected_version <> 0) OR (current_event_stream_version <> _expected_version) 36 | THEN 37 | RAISE EXCEPTION 'event stream version check failed, expected: %, got: %', _expected_version, current_event_stream_version; 38 | END IF; 39 | 40 | INSERT INTO event_streams (event_stream_id, "version") 41 | VALUES (_event_stream_id, _new_version) 42 | ON CONFLICT (event_stream_id) DO 43 | UPDATE SET "version" = _new_version; 44 | END; 45 | $$; 46 | 47 | CREATE FUNCTION upsert_event_stream_with_no_version_check( 48 | _event_stream_id TEXT, 49 | _new_version_offset INTEGER 50 | ) 51 | RETURNS INTEGER 52 | LANGUAGE PLPGSQL 53 | AS $$ 54 | DECLARE 55 | current_event_stream_version INTEGER; 56 | new_event_stream_version INTEGER; 57 | BEGIN 58 | -- Retrieve the latest version for the target Event Stream. 59 | SELECT es."version" 60 | INTO current_event_stream_version 61 | FROM event_streams es 62 | WHERE es.event_stream_id = _event_stream_id; 63 | 64 | IF NOT FOUND THEN 65 | current_event_stream_version := 0; 66 | END IF; 67 | 68 | new_event_stream_version := current_event_stream_version + _new_version_offset; 69 | 70 | INSERT INTO event_streams (event_stream_id, "version") 71 | VALUES (_event_stream_id, new_event_stream_version) 72 | ON CONFLICT (event_stream_id) DO 73 | UPDATE SET "version" = new_event_stream_version; 74 | 75 | RETURN new_event_stream_version; 76 | END; 77 | $$; 78 | -------------------------------------------------------------------------------- /eventually-postgres/migrations/2_aggregates.down.sql: -------------------------------------------------------------------------------- 1 | DROP TABLE aggregates; 2 | DROP PROCEDURE upsert_aggregate; 3 | -------------------------------------------------------------------------------- /eventually-postgres/migrations/2_aggregates.up.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE aggregates ( 2 | aggregate_id TEXT NOT NULL PRIMARY KEY REFERENCES event_streams (event_stream_id) ON DELETE CASCADE, 3 | "type" TEXT NOT NULL, 4 | "version" INTEGER NOT NULL CHECK ("version" > 0), 5 | "state" BYTEA NOT NULL 6 | ); 7 | 8 | CREATE PROCEDURE upsert_aggregate( 9 | _aggregate_id TEXT, 10 | _type TEXT, 11 | _expected_version INTEGER, 12 | _new_version INTEGER, 13 | _state BYTEA 14 | ) 15 | LANGUAGE PLPGSQL 16 | AS $$ 17 | DECLARE 18 | current_aggregate_version INTEGER; 19 | BEGIN 20 | -- Retrieve the latest version for the target aggregate. 21 | SELECT a."version" 22 | INTO current_aggregate_version 23 | FROM aggregates a 24 | WHERE a.aggregate_id = _aggregate_id; 25 | 26 | IF (NOT FOUND AND _expected_version <> 0) OR (current_aggregate_version <> _expected_version) 27 | THEN 28 | RAISE EXCEPTION 'aggregate version check failed, expected: %, got: %', _expected_version, current_aggregate_version; 29 | END IF; 30 | 31 | -- An Aggregate Root is also an Event Stream. 32 | INSERT INTO event_streams (event_stream_id, "version") 33 | VALUES (_aggregate_id, _new_version) 34 | ON CONFLICT (event_stream_id) DO 35 | UPDATE SET "version" = _new_version; 36 | 37 | INSERT INTO aggregates (aggregate_id, "type", "version", "state") 38 | VALUES (_aggregate_id, _type, _new_version, _state) 39 | ON CONFLICT (aggregate_id) DO 40 | UPDATE SET "version" = _new_version, "state" = _state; 41 | END; 42 | $$; 43 | -------------------------------------------------------------------------------- /eventually-postgres/src/aggregate.rs: -------------------------------------------------------------------------------- 1 | //! This module contains the implementation of the [`eventually::aggregate::Repository`] trait, 2 | //! to work specifically with `PostgreSQL` databases. 3 | //! 4 | //! Check out the [Repository] type for more information. 5 | 6 | use std::marker::PhantomData; 7 | 8 | use anyhow::anyhow; 9 | use async_trait::async_trait; 10 | use eventually::aggregate::Aggregate; 11 | use eventually::version::Version; 12 | use eventually::{aggregate, serde, version}; 13 | use sqlx::{PgPool, Postgres, Row}; 14 | 15 | /// Implements the [`eventually::aggregate::Repository`] trait for 16 | /// `PostgreSQL` databases. 17 | #[derive(Debug, Clone)] 18 | pub struct Repository 19 | where 20 | T: Aggregate, 21 | ::Id: ToString, 22 | Serde: serde::Serde, 23 | EvtSerde: serde::Serde, 24 | { 25 | pool: PgPool, 26 | aggregate_serde: Serde, 27 | event_serde: EvtSerde, 28 | t: PhantomData, 29 | } 30 | 31 | impl Repository 32 | where 33 | T: Aggregate, 34 | ::Id: ToString, 35 | Serde: serde::Serde, 36 | EvtSerde: serde::Serde, 37 | { 38 | /// Runs the latest migrations necessary for the implementation to work, 39 | /// then returns a new [`Repository`] instance. 40 | /// 41 | /// # Errors 42 | /// 43 | /// An error is returned if the migrations fail to run. 44 | pub async fn new( 45 | pool: PgPool, 46 | aggregate_serde: Serde, 47 | event_serde: EvtSerde, 48 | ) -> Result { 49 | // Make sure the latest migrations are used before using the Repository instance. 50 | crate::MIGRATIONS.run(&pool).await?; 51 | 52 | Ok(Self { 53 | pool, 54 | aggregate_serde, 55 | event_serde, 56 | t: PhantomData, 57 | }) 58 | } 59 | } 60 | 61 | impl Repository 62 | where 63 | T: Aggregate + Send + Sync, 64 | ::Id: ToString, 65 | Serde: serde::Serde + Send + Sync, 66 | EvtSerde: serde::Serde + Send + Sync, 67 | { 68 | async fn save_aggregate_state( 69 | &self, 70 | tx: &mut sqlx::Transaction<'_, Postgres>, 71 | aggregate_id: &str, 72 | expected_version: Version, 73 | root: &mut aggregate::Root, 74 | ) -> Result<(), aggregate::repository::SaveError> { 75 | let out_state = root.to_aggregate_type::(); 76 | let bytes_state = self 77 | .aggregate_serde 78 | .serialize(out_state) 79 | .map_err(|err| anyhow!("failed to serialize aggregate root state: {}", err))?; 80 | 81 | #[allow(clippy::cast_possible_truncation)] 82 | sqlx::query("CALL upsert_aggregate($1, $2, $3, $4, $5)") 83 | .bind(aggregate_id) 84 | .bind(T::type_name()) 85 | .bind(expected_version as i32) 86 | .bind(root.version() as i32) 87 | .bind(bytes_state) 88 | .execute(&mut **tx) 89 | .await 90 | .map_err(|err| match crate::check_for_conflict_error(&err) { 91 | Some(err) => aggregate::repository::SaveError::Conflict(err), 92 | None => match err 93 | .as_database_error() 94 | .and_then(sqlx::error::DatabaseError::code) 95 | { 96 | Some(code) if code == "40001" => version::ConflictError { 97 | expected: expected_version, 98 | actual: root.version(), 99 | } 100 | .into(), 101 | _ => anyhow!("failed to save aggregate state: {}", err).into(), 102 | }, 103 | })?; 104 | 105 | Ok(()) 106 | } 107 | } 108 | 109 | #[async_trait] 110 | impl aggregate::repository::Getter for Repository 111 | where 112 | T: Aggregate + Send + Sync, 113 | ::Id: ToString, 114 | Serde: serde::Serde + Send + Sync, 115 | EvtSerde: serde::Serde + Send + Sync, 116 | { 117 | async fn get(&self, id: &T::Id) -> Result, aggregate::repository::GetError> { 118 | let aggregate_id = id.to_string(); 119 | 120 | let row = sqlx::query( 121 | r#"SELECT version, state 122 | FROM aggregates 123 | WHERE aggregate_id = $1 AND "type" = $2"#, 124 | ) 125 | .bind(&aggregate_id) 126 | .bind(T::type_name()) 127 | .fetch_one(&self.pool) 128 | .await 129 | .map_err(|err| match err { 130 | sqlx::Error::RowNotFound => aggregate::repository::GetError::NotFound, 131 | _ => anyhow!("failed to fetch the aggregate state row: {}", err).into(), 132 | })?; 133 | 134 | let version: i32 = row 135 | .try_get("version") 136 | .map_err(|err| anyhow!("failed to get 'version' column from row: {}", err))?; 137 | 138 | let bytes_state: Vec = row 139 | .try_get("state") 140 | .map_err(|err| anyhow!("failed to get 'state' column from row: {}", err))?; 141 | 142 | let aggregate: T = self 143 | .aggregate_serde 144 | .deserialize(&bytes_state) 145 | .map_err(|err| { 146 | anyhow!( 147 | "failed to deserialize the aggregate state from the database row: {}", 148 | err 149 | ) 150 | })?; 151 | 152 | #[allow(clippy::cast_sign_loss)] 153 | Ok(aggregate::Root::rehydrate_from_state( 154 | version as Version, 155 | aggregate, 156 | )) 157 | } 158 | } 159 | 160 | #[async_trait] 161 | impl aggregate::repository::Saver for Repository 162 | where 163 | T: Aggregate + Send + Sync, 164 | ::Id: ToString, 165 | Serde: serde::Serde + Send + Sync, 166 | EvtSerde: serde::Serde + Send + Sync, 167 | { 168 | async fn save( 169 | &self, 170 | root: &mut aggregate::Root, 171 | ) -> Result<(), aggregate::repository::SaveError> { 172 | let events_to_commit = root.take_uncommitted_events(); 173 | 174 | if events_to_commit.is_empty() { 175 | return Ok(()); 176 | } 177 | 178 | let mut tx = self 179 | .pool 180 | .begin() 181 | .await 182 | .map_err(|err| anyhow!("failed to begin transaction: {}", err))?; 183 | 184 | sqlx::query("SET TRANSACTION ISOLATION LEVEL SERIALIZABLE DEFERRABLE") 185 | .execute(&mut *tx) 186 | .await 187 | .map_err(|err| anyhow!("failed to begin transaction: {}", err))?; 188 | 189 | let aggregate_id = root.aggregate_id().to_string(); 190 | let expected_root_version = root.version() - (events_to_commit.len() as Version); 191 | 192 | self.save_aggregate_state(&mut tx, &aggregate_id, expected_root_version, root) 193 | .await?; 194 | 195 | #[allow(clippy::cast_possible_truncation)] 196 | crate::event::append_domain_events( 197 | &mut tx, 198 | &self.event_serde, 199 | &aggregate_id, 200 | root.version() as i32, 201 | events_to_commit, 202 | ) 203 | .await 204 | .map_err(|err| anyhow!("failed to append aggregate root domain events: {}", err))?; 205 | 206 | tx.commit() 207 | .await 208 | .map_err(|err| anyhow!("failed to commit transaction: {}", err))?; 209 | 210 | Ok(()) 211 | } 212 | } 213 | -------------------------------------------------------------------------------- /eventually-postgres/src/event.rs: -------------------------------------------------------------------------------- 1 | use std::marker::PhantomData; 2 | use std::string::ToString; 3 | 4 | use anyhow::anyhow; 5 | use async_trait::async_trait; 6 | use chrono::Utc; 7 | use eventually::message::{Message, Metadata}; 8 | use eventually::version::Version; 9 | use eventually::{event, serde, version}; 10 | use futures::future::ready; 11 | use futures::{StreamExt, TryStreamExt}; 12 | use sqlx::postgres::PgRow; 13 | use sqlx::{PgPool, Postgres, Row, Transaction}; 14 | 15 | #[derive(Debug, thiserror::Error)] 16 | pub enum StreamError { 17 | #[error("failed to deserialize event from database: {0}")] 18 | DeserializeEvent(#[source] anyhow::Error), 19 | #[error("failed to get column '{name}' from result row: {error}")] 20 | ReadColumn { 21 | name: &'static str, 22 | #[source] 23 | error: sqlx::Error, 24 | }, 25 | #[error("db returned an error: {0}")] 26 | Database(#[source] sqlx::Error), 27 | } 28 | 29 | pub(crate) async fn append_domain_event( 30 | tx: &mut Transaction<'_, Postgres>, 31 | serde: &impl serde::Serializer, 32 | event_stream_id: &str, 33 | event_version: i32, 34 | new_event_stream_version: i32, 35 | event: event::Envelope, 36 | ) -> anyhow::Result<()> 37 | where 38 | Evt: Message, 39 | { 40 | let event_type = event.message.name(); 41 | let mut metadata = event.metadata; 42 | let serialized_event = serde 43 | .serialize(event.message) 44 | .map_err(|err| anyhow!("failed to serialize event message: {}", err))?; 45 | 46 | metadata.insert("Recorded-At".to_owned(), Utc::now().to_rfc3339()); 47 | metadata.insert( 48 | "Recorded-With-New-Version".to_owned(), 49 | new_event_stream_version.to_string(), 50 | ); 51 | 52 | sqlx::query( 53 | r#"INSERT INTO events (event_stream_id, "type", "version", event, metadata) VALUES ($1, $2, $3, $4, $5)"#, 54 | ) 55 | .bind(event_stream_id) 56 | .bind(event_type) 57 | .bind(event_version) 58 | .bind(serialized_event) 59 | .bind(sqlx::types::Json(metadata)) 60 | .execute(&mut **tx) 61 | .await?; 62 | 63 | Ok(()) 64 | } 65 | 66 | pub(crate) async fn append_domain_events( 67 | tx: &mut Transaction<'_, Postgres>, 68 | serde: &impl serde::Serializer, 69 | event_stream_id: &str, 70 | new_version: i32, 71 | events: Vec>, 72 | ) -> anyhow::Result<()> 73 | where 74 | Evt: Message, 75 | { 76 | #[allow(clippy::cast_possible_truncation, clippy::cast_possible_wrap)] 77 | let current_event_stream_version = new_version - (events.len() as i32); 78 | 79 | for (i, event) in events.into_iter().enumerate() { 80 | #[allow(clippy::cast_possible_truncation, clippy::cast_possible_wrap)] 81 | let event_version = current_event_stream_version + (i as i32) + 1; 82 | 83 | append_domain_event( 84 | tx, 85 | serde, 86 | event_stream_id, 87 | event_version, 88 | new_version, 89 | event, 90 | ) 91 | .await?; 92 | } 93 | 94 | Ok(()) 95 | } 96 | 97 | #[derive(Debug, Clone)] 98 | pub struct Store 99 | where 100 | Id: ToString + Clone, 101 | Serde: serde::Serde, 102 | { 103 | pool: PgPool, 104 | serde: Serde, 105 | id_type: PhantomData, 106 | evt_type: PhantomData, 107 | } 108 | 109 | impl Store 110 | where 111 | Id: ToString + Clone, 112 | Serde: serde::Serde, 113 | { 114 | /// Runs the latest migrations necessary for the implementation to work, 115 | /// then returns a new [`Store`] instance. 116 | /// 117 | /// # Errors 118 | /// 119 | /// An error is returned if the migrations fail to run. 120 | pub async fn new(pool: PgPool, serde: Serde) -> Result { 121 | // Make sure the latest migrations are used before using the Store instance. 122 | crate::MIGRATIONS.run(&pool).await?; 123 | 124 | Ok(Self { 125 | pool, 126 | serde, 127 | id_type: PhantomData, 128 | evt_type: PhantomData, 129 | }) 130 | } 131 | } 132 | 133 | fn try_get_column(row: &PgRow, name: &'static str) -> Result 134 | where 135 | for<'a> T: sqlx::Type + sqlx::Decode<'a, Postgres>, 136 | { 137 | row.try_get(name) 138 | .map_err(|err| StreamError::ReadColumn { name, error: err }) 139 | } 140 | 141 | impl Store 142 | where 143 | Id: ToString + Clone + Send + Sync, 144 | Evt: Message + Send + Sync, 145 | Serde: serde::Serde + Send + Sync, 146 | { 147 | fn event_row_to_persisted_event( 148 | &self, 149 | stream_id: Id, 150 | row: &PgRow, 151 | ) -> Result, StreamError> { 152 | let version_column: i32 = try_get_column(row, "version")?; 153 | let event_column: Vec = try_get_column(row, "event")?; 154 | let metadata_column: sqlx::types::Json = try_get_column(row, "metadata")?; 155 | 156 | let deserialized_event = self 157 | .serde 158 | .deserialize(&event_column) 159 | .map_err(StreamError::DeserializeEvent)?; 160 | 161 | #[allow(clippy::cast_sign_loss)] 162 | Ok(event::Persisted { 163 | stream_id, 164 | version: version_column as Version, 165 | event: event::Envelope { 166 | message: deserialized_event, 167 | metadata: metadata_column.0, 168 | }, 169 | }) 170 | } 171 | } 172 | 173 | impl event::store::Streamer for Store 174 | where 175 | Id: ToString + Clone + Send + Sync, 176 | Evt: Message + Send + Sync, 177 | Serde: serde::Serde + Send + Sync, 178 | { 179 | type Error = StreamError; 180 | 181 | fn stream(&self, id: &Id, select: event::VersionSelect) -> event::Stream { 182 | #[allow(clippy::cast_possible_truncation)] 183 | let from_version: i32 = match select { 184 | event::VersionSelect::All => 0, 185 | event::VersionSelect::From(v) => v as i32, 186 | }; 187 | 188 | let query = sqlx::query( 189 | r"SELECT version, event, metadata 190 | FROM events 191 | WHERE event_stream_id = $1 AND version >= $2 192 | ORDER BY version", 193 | ); 194 | 195 | let id = id.clone(); 196 | 197 | query 198 | .bind(id.to_string()) 199 | .bind(from_version) 200 | .fetch(&self.pool) 201 | .map_err(StreamError::Database) 202 | .and_then(move |row| ready(self.event_row_to_persisted_event(id.clone(), &row))) 203 | .boxed() 204 | } 205 | } 206 | 207 | #[async_trait] 208 | impl event::store::Appender for Store 209 | where 210 | Id: ToString + Clone + Send + Sync, 211 | Evt: Message + Send + Sync, 212 | Serde: serde::Serde + Send + Sync, 213 | { 214 | async fn append( 215 | &self, 216 | id: Id, 217 | version_check: version::Check, 218 | events: Vec>, 219 | ) -> Result { 220 | let mut tx = self 221 | .pool 222 | .begin() 223 | .await 224 | .map_err(|err| anyhow!("failed to begin transaction: {}", err))?; 225 | 226 | sqlx::query("SET TRANSACTION ISOLATION LEVEL SERIALIZABLE DEFERRABLE") 227 | .execute(&mut *tx) 228 | .await 229 | .map_err(|err| anyhow!("failed to begin transaction: {}", err))?; 230 | 231 | let string_id = id.to_string(); 232 | 233 | let new_version: i32 = match version_check { 234 | version::Check::Any => { 235 | #[allow(clippy::cast_possible_truncation, clippy::cast_possible_wrap)] 236 | let events_len = events.len() as i32; 237 | 238 | sqlx::query("SELECT * FROM upsert_event_stream_with_no_version_check($1, $2)") 239 | .bind(&string_id) 240 | .bind(events_len) 241 | .fetch_one(&mut *tx) 242 | .await 243 | .and_then(|row| row.try_get(0)) 244 | .map_err(|err| anyhow!("failed to upsert new event stream version: {}", err))? 245 | }, 246 | version::Check::MustBe(v) => { 247 | let new_version = v + (events.len() as Version); 248 | 249 | #[allow(clippy::cast_possible_truncation)] 250 | sqlx::query("CALL upsert_event_stream($1, $2, $3)") 251 | .bind(&string_id) 252 | .bind(v as i32) 253 | .bind(new_version as i32) 254 | .execute(&mut *tx) 255 | .await 256 | .map_err(|err| match crate::check_for_conflict_error(&err) { 257 | Some(err) => event::store::AppendError::Conflict(err), 258 | None => match err 259 | .as_database_error() 260 | .and_then(sqlx::error::DatabaseError::code) 261 | { 262 | Some(code) if code == "40001" => { 263 | event::store::AppendError::Conflict(version::ConflictError { 264 | expected: v, 265 | actual: new_version, 266 | }) 267 | }, 268 | _ => event::store::AppendError::Internal(anyhow!( 269 | "failed to upsert new event stream version: {}", 270 | err 271 | )), 272 | }, 273 | }) 274 | .map(|_| new_version as i32)? 275 | }, 276 | }; 277 | 278 | append_domain_events(&mut tx, &self.serde, &string_id, new_version, events) 279 | .await 280 | .map_err(|err| anyhow!("failed to append new domain events: {}", err))?; 281 | 282 | tx.commit() 283 | .await 284 | .map_err(|err| anyhow!("failed to commit transaction: {}", err))?; 285 | 286 | #[allow(clippy::cast_sign_loss)] 287 | Ok(new_version as Version) 288 | } 289 | } 290 | -------------------------------------------------------------------------------- /eventually-postgres/src/lib.rs: -------------------------------------------------------------------------------- 1 | //! `eventually-postgres` contains different implementations of traits 2 | //! from the [eventually] crate that are specific for `PostgreSQL` databases. 3 | //! 4 | //! Check out the [`aggregate::Repository`] and [`event::Store`] implementations 5 | //! to know more. 6 | 7 | #![deny(unsafe_code, unused_qualifications, trivial_casts)] 8 | #![deny(clippy::all, clippy::pedantic, clippy::cargo)] 9 | #![warn(missing_docs)] 10 | 11 | pub mod aggregate; 12 | pub mod event; 13 | 14 | pub(crate) static MIGRATIONS: sqlx::migrate::Migrator = sqlx::migrate!("./migrations"); 15 | 16 | use std::sync::LazyLock; 17 | 18 | use eventually::version::{ConflictError, Version}; 19 | use regex::Regex; 20 | 21 | static CONFLICT_ERROR_REGEX: LazyLock = LazyLock::new(|| { 22 | Regex::new(r"version check failed, expected: (?P\d), got: (?P\d)") 23 | .expect("regex compiles successfully") 24 | }); 25 | 26 | pub(crate) fn check_for_conflict_error(err: &sqlx::Error) -> Option { 27 | fn capture_to_version(captures: ®ex::Captures, name: &'static str) -> Version { 28 | let v: i32 = captures 29 | .name(name) 30 | .expect("field is captured") 31 | .as_str() 32 | .parse::() 33 | .expect("field should be a valid integer"); 34 | 35 | #[allow(clippy::cast_sign_loss)] 36 | { 37 | v as Version 38 | } 39 | } 40 | 41 | if let sqlx::Error::Database(ref pg_err) = err { 42 | return CONFLICT_ERROR_REGEX 43 | .captures(pg_err.message()) 44 | .map(|captures| ConflictError { 45 | actual: capture_to_version(&captures, "got"), 46 | expected: capture_to_version(&captures, "expected"), 47 | }); 48 | } 49 | 50 | None 51 | } 52 | -------------------------------------------------------------------------------- /eventually-postgres/tests/aggregate_repository.rs: -------------------------------------------------------------------------------- 1 | use eventually::aggregate::repository::{self, GetError, Getter, Saver}; 2 | use eventually::serde; 3 | use eventually_postgres::aggregate; 4 | use rand::Rng; 5 | use testcontainers_modules::postgres::Postgres; 6 | use testcontainers_modules::testcontainers::runners::AsyncRunner; 7 | 8 | mod setup; 9 | 10 | #[tokio::test] 11 | async fn it_works() { 12 | let container = Postgres::default() 13 | .start() 14 | .await 15 | .expect("the postgres container should start"); 16 | 17 | let (host, port) = futures::try_join!(container.get_host(), container.get_host_port_ipv4(5432)) 18 | .expect("the postgres container should have both a host and a port exposed"); 19 | 20 | println!("postgres container is running at {host}:{port}"); 21 | 22 | let pool = sqlx::PgPool::connect(&format!( 23 | "postgres://postgres:postgres@{}:{}/postgres", 24 | host, port, 25 | )) 26 | .await 27 | .expect("should be able to create a connection with the database"); 28 | 29 | let aggregate_repository = aggregate::Repository::new( 30 | pool, 31 | serde::Json::::default(), 32 | serde::Json::::default(), 33 | ) 34 | .await 35 | .unwrap(); 36 | 37 | let aggregate_id = setup::TestAggregateId(rand::rng().random::()); 38 | 39 | let result = aggregate_repository 40 | .get(&aggregate_id) 41 | .await 42 | .expect_err("should fail"); 43 | 44 | match result { 45 | GetError::NotFound => (), 46 | _ => panic!( 47 | "unexpected error received, should be 'not found': {:?}", 48 | result 49 | ), 50 | }; 51 | 52 | let mut root = setup::TestAggregateRoot::create(aggregate_id, "John Dee".to_owned()) 53 | .expect("aggregate root should be created"); 54 | 55 | // We also delete it just to cause more Domain Events in its Event Stream. 56 | root.delete().unwrap(); 57 | 58 | aggregate_repository 59 | .save(&mut root) 60 | .await 61 | .expect("storing the new aggregate root should be successful"); 62 | 63 | let found_root = aggregate_repository 64 | .get(&aggregate_id) 65 | .await 66 | .map(setup::TestAggregateRoot::from) 67 | .expect("the aggregate root should be found successfully"); 68 | 69 | assert_eq!(found_root, root); 70 | } 71 | 72 | #[tokio::test] 73 | async fn it_detects_data_races_and_returns_conflict_error() { 74 | let container = Postgres::default() 75 | .start() 76 | .await 77 | .expect("the postgres container should start"); 78 | 79 | let (host, port) = futures::try_join!(container.get_host(), container.get_host_port_ipv4(5432)) 80 | .expect("the postgres container should have both a host and a port exposed"); 81 | 82 | println!("postgres container is running at {host}:{port}"); 83 | 84 | let pool = sqlx::PgPool::connect(&format!( 85 | "postgres://postgres:postgres@{}:{}/postgres", 86 | host, port, 87 | )) 88 | .await 89 | .expect("should be able to create a connection with the database"); 90 | 91 | let aggregate_repository = aggregate::Repository::new( 92 | pool, 93 | serde::Json::::default(), 94 | serde::Json::::default(), 95 | ) 96 | .await 97 | .unwrap(); 98 | 99 | let aggregate_id = setup::TestAggregateId(rand::rng().random::()); 100 | 101 | let mut root = setup::TestAggregateRoot::create(aggregate_id, "John Dee".to_owned()) 102 | .expect("aggregate root should be created"); 103 | 104 | // We also delete it just to cause more Domain Events in its Event Stream. 105 | root.delete().unwrap(); 106 | 107 | // We clone the Aggregate Root instance so that we have the same 108 | // uncommitted events list as the original instance. 109 | let mut cloned_root = root.clone(); 110 | 111 | let result = futures::join!( 112 | aggregate_repository.save(&mut root), 113 | aggregate_repository.save(&mut cloned_root), 114 | ); 115 | 116 | match result { 117 | (Ok(()), Err(repository::SaveError::Conflict(_))) => (), 118 | (Err(repository::SaveError::Conflict(_)), Ok(())) => (), 119 | (first, second) => panic!( 120 | "invalid state detected, first: {:?}, second: {:?}", 121 | first, second 122 | ), 123 | }; 124 | } 125 | -------------------------------------------------------------------------------- /eventually-postgres/tests/event_store.rs: -------------------------------------------------------------------------------- 1 | use std::time::{SystemTime, UNIX_EPOCH}; 2 | 3 | use eventually::event::store::{self, AppendError, Appender, Streamer}; 4 | use eventually::event::{Persisted, VersionSelect}; 5 | use eventually::version::Version; 6 | use eventually::{serde, version}; 7 | use eventually_postgres::event; 8 | use futures::TryStreamExt; 9 | use rand::Rng; 10 | use testcontainers_modules::postgres::Postgres; 11 | use testcontainers_modules::testcontainers::runners::AsyncRunner; 12 | 13 | mod setup; 14 | 15 | #[tokio::test] 16 | async fn append_with_no_version_check_works() { 17 | let container = Postgres::default() 18 | .start() 19 | .await 20 | .expect("the postgres container should start"); 21 | 22 | let (host, port) = futures::try_join!(container.get_host(), container.get_host_port_ipv4(5432)) 23 | .expect("the postgres container should have both a host and a port exposed"); 24 | 25 | println!("postgres container is running at {host}:{port}"); 26 | 27 | let pool = sqlx::PgPool::connect(&format!( 28 | "postgres://postgres:postgres@{}:{}/postgres", 29 | host, port, 30 | )) 31 | .await 32 | .expect("should be able to create a connection with the database"); 33 | 34 | let event_store = event::Store::new(pool, serde::Json::::default()) 35 | .await 36 | .unwrap(); 37 | 38 | let id = rand::rng().random::(); 39 | let event_stream_id = format!("test-event-stream-{}", id); 40 | 41 | let expected_events = vec![setup::TestDomainEvent::WasCreated { 42 | id: setup::TestAggregateId(id), 43 | name: "test something".to_owned(), 44 | at: SystemTime::now() 45 | .duration_since(UNIX_EPOCH) 46 | .unwrap() 47 | .as_millis(), 48 | } 49 | .into()]; 50 | 51 | let expected_persisted_events: Vec<_> = expected_events 52 | .clone() 53 | .into_iter() 54 | .enumerate() 55 | .map(|(i, event)| Persisted { 56 | event, 57 | stream_id: event_stream_id.clone(), 58 | version: (i + 1) as Version, 59 | }) 60 | .collect(); 61 | 62 | let expected_event_stream_version = expected_events.len() as Version; 63 | 64 | let new_event_stream_version = event_store 65 | .append( 66 | event_stream_id.clone(), 67 | version::Check::Any, 68 | expected_events, 69 | ) 70 | .await 71 | .expect("the event store should append the events"); 72 | 73 | assert_eq!(new_event_stream_version, expected_event_stream_version); 74 | 75 | let actual_persisted_events = event_store 76 | .stream(&event_stream_id, VersionSelect::All) 77 | .try_collect::>() 78 | .await 79 | .expect("the event store should stream the events back"); 80 | 81 | assert_eq!(actual_persisted_events, expected_persisted_events); 82 | } 83 | 84 | #[tokio::test] 85 | async fn it_works_with_version_check_for_conflict() { 86 | let container = Postgres::default() 87 | .start() 88 | .await 89 | .expect("the postgres container should start"); 90 | 91 | let (host, port) = futures::try_join!(container.get_host(), container.get_host_port_ipv4(5432)) 92 | .expect("the postgres container should have both a host and a port exposed"); 93 | 94 | println!("postgres container is running at {host}:{port}"); 95 | 96 | let pool = sqlx::PgPool::connect(&format!( 97 | "postgres://postgres:postgres@{}:{}/postgres", 98 | host, port, 99 | )) 100 | .await 101 | .expect("should be able to create a connection with the database"); 102 | 103 | let event_store = event::Store::new(pool, serde::Json::::default()) 104 | .await 105 | .unwrap(); 106 | 107 | let id = rand::rng().random::(); 108 | let event_stream_id = format!("test-event-stream-{}", id); 109 | 110 | let expected_events = vec![setup::TestDomainEvent::WasCreated { 111 | id: setup::TestAggregateId(id), 112 | name: "test something".to_owned(), 113 | at: SystemTime::now() 114 | .duration_since(UNIX_EPOCH) 115 | .unwrap() 116 | .as_millis(), 117 | } 118 | .into()]; 119 | 120 | let expected_persisted_events: Vec<_> = expected_events 121 | .clone() 122 | .into_iter() 123 | .enumerate() 124 | .map(|(i, event)| Persisted { 125 | event, 126 | stream_id: event_stream_id.clone(), 127 | version: (i + 1) as Version, 128 | }) 129 | .collect(); 130 | 131 | let expected_event_stream_version = expected_events.len() as Version; 132 | 133 | let new_event_stream_version = event_store 134 | .append( 135 | event_stream_id.clone(), 136 | version::Check::MustBe(0), 137 | expected_events, 138 | ) 139 | .await 140 | .expect("the event store should append the events"); 141 | 142 | assert_eq!(new_event_stream_version, expected_event_stream_version); 143 | 144 | let actual_persisted_events = event_store 145 | .stream(&event_stream_id, VersionSelect::All) 146 | .try_collect::>() 147 | .await 148 | .expect("the event store should stream the events back"); 149 | 150 | assert_eq!(actual_persisted_events, expected_persisted_events); 151 | 152 | // Appending twice the with an unexpected Event Stream version should 153 | // result in a version::ConflictError. 154 | let error = event_store 155 | .append(event_stream_id.clone(), version::Check::MustBe(0), vec![]) 156 | .await 157 | .expect_err("the event store should have returned a conflict error"); 158 | 159 | if let AppendError::Conflict(err) = error { 160 | return assert_eq!( 161 | err, 162 | version::ConflictError { 163 | expected: 0, 164 | actual: new_event_stream_version, 165 | } 166 | ); 167 | } 168 | 169 | panic!("unexpected error received: {}", error); 170 | } 171 | 172 | #[tokio::test] 173 | async fn it_handles_concurrent_writes_to_the_same_stream() { 174 | let container = Postgres::default() 175 | .start() 176 | .await 177 | .expect("the postgres container should start"); 178 | 179 | let (host, port) = futures::try_join!(container.get_host(), container.get_host_port_ipv4(5432)) 180 | .expect("the postgres container should have both a host and a port exposed"); 181 | 182 | println!("postgres container is running at {host}:{port}"); 183 | 184 | let pool = sqlx::PgPool::connect(&format!( 185 | "postgres://postgres:postgres@{}:{}/postgres", 186 | host, port, 187 | )) 188 | .await 189 | .expect("should be able to create a connection with the database"); 190 | 191 | let event_store = event::Store::new(pool, serde::Json::::default()) 192 | .await 193 | .unwrap(); 194 | 195 | let id = rand::rng().random::(); 196 | let event_stream_id = format!("test-event-stream-{}", id); 197 | 198 | let expected_events = vec![setup::TestDomainEvent::WasCreated { 199 | id: setup::TestAggregateId(id), 200 | name: "test something".to_owned(), 201 | at: SystemTime::now() 202 | .duration_since(UNIX_EPOCH) 203 | .unwrap() 204 | .as_millis(), 205 | } 206 | .into()]; 207 | 208 | let result = futures::join!( 209 | event_store.append( 210 | event_stream_id.clone(), 211 | version::Check::MustBe(0), 212 | expected_events.clone(), 213 | ), 214 | event_store.append( 215 | event_stream_id.clone(), 216 | version::Check::MustBe(0), 217 | expected_events, 218 | ) 219 | ); 220 | 221 | match result { 222 | (Ok(_), Err(store::AppendError::Conflict(_))) 223 | | (Err(store::AppendError::Conflict(_)), Ok(_)) => { 224 | // This is the expected scenario :) 225 | }, 226 | (first, second) => panic!( 227 | "invalid state detected, first: {:?}, second: {:?}", 228 | first, second 229 | ), 230 | }; 231 | } 232 | -------------------------------------------------------------------------------- /eventually-postgres/tests/setup/mod.rs: -------------------------------------------------------------------------------- 1 | use std::fmt::{Display, Formatter, Result as FmtResult}; 2 | use std::time::{SystemTime, UNIX_EPOCH}; 3 | 4 | use eventually::aggregate; 5 | use eventually::aggregate::Aggregate; 6 | use eventually::message::Message; 7 | use eventually_macros::aggregate_root; 8 | use serde::{Deserialize, Serialize}; 9 | 10 | #[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)] 11 | pub struct TestAggregateId(pub i64); 12 | 13 | impl Display for TestAggregateId { 14 | fn fmt(&self, f: &mut Formatter<'_>) -> FmtResult { 15 | write!(f, "test-aggregate:{}", self.0) 16 | } 17 | } 18 | 19 | #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] 20 | pub enum TestDomainEvent { 21 | WasCreated { 22 | id: TestAggregateId, 23 | name: String, 24 | at: u128, 25 | }, 26 | WasDeleted { 27 | id: TestAggregateId, 28 | }, 29 | } 30 | 31 | impl Message for TestDomainEvent { 32 | fn name(&self) -> &'static str { 33 | match self { 34 | TestDomainEvent::WasCreated { .. } => "TestDomainSomethingWasCreated", 35 | TestDomainEvent::WasDeleted { .. } => "TestDomainSomethingWasDeleted", 36 | } 37 | } 38 | } 39 | 40 | #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] 41 | pub struct TestAggregate { 42 | id: TestAggregateId, 43 | name: String, 44 | is_deleted: bool, 45 | } 46 | 47 | #[derive(Debug, PartialEq, Eq, thiserror::Error)] 48 | pub enum TestAggregateError { 49 | #[error("already exists")] 50 | AlreadyExists, 51 | #[error("not created yet")] 52 | NotCreatedYet, 53 | } 54 | 55 | impl Aggregate for TestAggregate { 56 | type Id = TestAggregateId; 57 | type Event = TestDomainEvent; 58 | type Error = TestAggregateError; 59 | 60 | fn type_name() -> &'static str { 61 | "TestAggregate" 62 | } 63 | 64 | fn aggregate_id(&self) -> &Self::Id { 65 | &self.id 66 | } 67 | 68 | fn apply(state: Option, event: Self::Event) -> Result { 69 | match (state, event) { 70 | (None, TestDomainEvent::WasCreated { id, name, .. }) => Ok(Self { 71 | id, 72 | name, 73 | is_deleted: false, 74 | }), 75 | (Some(_), TestDomainEvent::WasCreated { .. }) => Err(TestAggregateError::AlreadyExists), 76 | (Some(mut a), TestDomainEvent::WasDeleted { .. }) => { 77 | a.is_deleted = true; 78 | Ok(a) 79 | }, 80 | (None, TestDomainEvent::WasDeleted { .. }) => Err(TestAggregateError::NotCreatedYet), 81 | } 82 | } 83 | } 84 | 85 | #[aggregate_root(TestAggregate)] 86 | #[derive(Debug, Clone, PartialEq)] 87 | pub struct TestAggregateRoot; 88 | 89 | impl TestAggregateRoot { 90 | pub fn create(id: TestAggregateId, name: String) -> Result { 91 | let now = SystemTime::now() 92 | .duration_since(UNIX_EPOCH) 93 | .unwrap() 94 | .as_nanos(); 95 | 96 | Ok(aggregate::Root::::record_new( 97 | TestDomainEvent::WasCreated { name, id, at: now }.into(), 98 | )? 99 | .into()) 100 | } 101 | 102 | pub fn delete(&mut self) -> Result<(), TestAggregateError> { 103 | let id = self.id; 104 | 105 | if !self.is_deleted { 106 | self.record_that(TestDomainEvent::WasDeleted { id }.into())?; 107 | } 108 | 109 | Ok(()) 110 | } 111 | } 112 | -------------------------------------------------------------------------------- /eventually/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "eventually" 3 | description = "Eventually is a minimalistic crate that exposes a few building blocks to build Event-sourced applications in Rust." 4 | version = "0.5.0" 5 | edition = "2021" 6 | authors = ["Danilo Cianfrone "] 7 | license = "MIT" 8 | readme = "../README.md" 9 | repository = "https://github.com/get-eventually/eventually-rs" 10 | 11 | categories = [ 12 | "rust-patterns", 13 | "web-programming", 14 | "asynchronous", 15 | "data-structures", 16 | ] 17 | keywords = ["architecture", "ddd", "event-sourcing", "cqrs", "es"] 18 | 19 | [features] 20 | default = [] 21 | tracing = ["dep:tracing"] 22 | serde-prost = ["dep:prost"] 23 | serde-json = ["dep:serde_json"] 24 | full = ["serde-prost", "serde-json", "tracing"] 25 | 26 | [dependencies] 27 | anyhow = "1.0.97" 28 | async-trait = "0.1.77" 29 | futures = "0.3.30" 30 | thiserror = "2.0.12" 31 | prost = { version = "0.13.5", optional = true } 32 | serde_json = { version = "1.0.114", optional = true } 33 | serde = { version = "1.0.197", features = ["derive"] } 34 | tracing = { version = "0.1.40", features = ["async-await"], optional = true } 35 | 36 | [dev-dependencies] 37 | lazy_static = "1.4.0" 38 | serde_json = "1.0.114" 39 | tokio = { version = "1.36.0", features = ["macros", "rt-multi-thread"] } 40 | -------------------------------------------------------------------------------- /eventually/src/aggregate/mod.rs: -------------------------------------------------------------------------------- 1 | //! Module containing support for the Aggregate pattern. 2 | //! 3 | //! ## What is an Aggregate? 4 | //! 5 | //! An [Aggregate] is the most important concept in your domain. 6 | //! 7 | //! It represents the entities your business domain is composed of, 8 | //! and the business logic your domain is exposing. 9 | //! 10 | //! For example: in an Order Management bounded-context (e.g. a 11 | //! microservice), the concepts of Order or Customer are two potential 12 | //! [Aggregate]s. 13 | //! 14 | //! Aggregates expose mutations with the concept of **commands**: 15 | //! from the previous example, an Order might expose some commands such as 16 | //! _"Add Order Item"_, or _"Remove Order Item"_, or _"Place Order"_ 17 | //! to close the transaction. 18 | //! 19 | //! In Event Sourcing, the Aggregate state is modified by the usage of 20 | //! **Domain Events**, which carry some or all the fields in the state 21 | //! in a certain logical meaning. 22 | //! 23 | //! As such, commands in Event Sourcing will **produce** Domain Events. 24 | //! 25 | //! Aggregates should provide a way to **fold** Domain Events on the 26 | //! current value of the state, to produce the next state. 27 | 28 | use crate::version::Version; 29 | use crate::{event, message}; 30 | 31 | pub mod repository; 32 | pub mod test; 33 | 34 | use futures::TryStreamExt; 35 | pub use repository::{EventSourced as EventSourcedRepository, Repository}; 36 | 37 | /// An Aggregate represents a Domain Model that, through an Aggregate [Root], 38 | /// acts as a _transactional boundary_. 39 | /// 40 | /// Aggregates are also used to enforce Domain invariants 41 | /// (i.e. certain constraints or rules that are unique to a specific Domain). 42 | /// 43 | /// Since this is an Event-sourced version of the Aggregate pattern, 44 | /// any change to the Aggregate state must be represented through 45 | /// a Domain Event, which is then applied to the current state 46 | /// using the [`Aggregate::apply`] method. 47 | /// 48 | /// More on Aggregates can be found here: `` 49 | pub trait Aggregate: Sized + Send + Sync + Clone { 50 | /// The type used to uniquely identify the Aggregate. 51 | type Id: Send + Sync; 52 | 53 | /// The type of Domain Events that interest this Aggregate. 54 | /// Usually, this type should be an `enum`. 55 | type Event: message::Message + Send + Sync + Clone; 56 | 57 | /// The error type that can be returned by [`Aggregate::apply`] when 58 | /// mutating the Aggregate state. 59 | type Error: Send + Sync; 60 | 61 | /// A unique name identifier for this Aggregate type. 62 | fn type_name() -> &'static str; 63 | 64 | /// Returns the unique identifier for the Aggregate instance. 65 | fn aggregate_id(&self) -> &Self::Id; 66 | 67 | /// Mutates the state of an Aggregate through a Domain Event. 68 | /// 69 | /// # Errors 70 | /// 71 | /// The method can return an error if the event to apply is unexpected 72 | /// given the current state of the Aggregate. 73 | fn apply(state: Option, event: Self::Event) -> Result; 74 | } 75 | 76 | /// An Aggregate Root represents the Domain Entity object used to 77 | /// load and save an [Aggregate] from and to a [Repository], and 78 | /// to perform actions that may result in new Domain Events 79 | /// to change the state of the Aggregate. 80 | /// 81 | /// The Aggregate state and list of Domain Events recorded 82 | /// are handled by the [Root] object itself. 83 | /// 84 | /// ```text 85 | /// #[derive(Debug, Clone)] 86 | /// struct MyAggregate { 87 | /// // Here goes the state of the Aggregate. 88 | /// }; 89 | /// 90 | /// #[derive(Debug, Clone, PartialEq, Eq)] 91 | /// enum MyAggregateEvent { 92 | /// // Here we list the Domain Events for the Aggregate. 93 | /// EventHasHappened, 94 | /// } 95 | /// 96 | /// impl Aggregate for MyAggregate { 97 | /// type Id = i64; // Just for the sake of the example. 98 | /// type Event = MyAggregateEvent; 99 | /// type Error = (); // Just for the sake of the example. Use a proper error here. 100 | /// 101 | /// fn aggregate_id(&self) -> &Self::Id { 102 | /// todo!() 103 | /// } 104 | /// 105 | /// fn apply(this: Option, event: Self::Event) -> Result { 106 | /// todo!() 107 | /// } 108 | /// } 109 | /// 110 | /// // This type is necessary in order to create a new vtable 111 | /// // for the method implementations in the block below. 112 | /// #[derive(Debug, Clone)] 113 | /// struct MyAggregateRoot(Root) 114 | /// 115 | /// impl MyAggregateRoot { 116 | /// pub fn do_something() -> Result { 117 | /// // Here, we record a new Domain Event through the Root object. 118 | /// // 119 | /// // This will record the new Domain Event in a list of events to commit, 120 | /// // and call the `MyAggregate::apply` method to create the Aggregate state. 121 | /// Root::::record_new(MyAggregateEvent::EventHasHappened) 122 | /// .map(MyAggregateRoot) 123 | /// } 124 | /// } 125 | /// ``` 126 | #[derive(Debug, Clone, PartialEq)] 127 | #[must_use] 128 | pub struct Root 129 | where 130 | T: Aggregate, 131 | { 132 | aggregate: T, 133 | version: Version, 134 | recorded_events: Vec>, 135 | } 136 | 137 | impl std::ops::Deref for Root 138 | where 139 | T: Aggregate, 140 | { 141 | type Target = T; 142 | 143 | fn deref(&self) -> &Self::Target { 144 | &self.aggregate 145 | } 146 | } 147 | 148 | impl Root 149 | where 150 | T: Aggregate, 151 | { 152 | /// Returns the current version for the [Aggregate]. 153 | pub fn version(&self) -> Version { 154 | self.version 155 | } 156 | 157 | /// Returns the unique identifier of the [Aggregate]. 158 | pub fn aggregate_id(&self) -> &T::Id { 159 | self.aggregate.aggregate_id() 160 | } 161 | 162 | /// Maps the [Aggregate] value contained within [Root] 163 | /// to a different type, that can be converted through [From] trait. 164 | /// 165 | /// Useful to convert an [Aggregate] type to a data transfer object to use 166 | /// for database storage. 167 | pub fn to_aggregate_type(&self) -> K 168 | where 169 | K: From, 170 | { 171 | K::from(self.aggregate.clone()) 172 | } 173 | 174 | /// Returns the list of uncommitted, recorded Domain [Event]s from the [Root] 175 | /// and resets the internal list to its default value. 176 | #[doc(hidden)] 177 | pub fn take_uncommitted_events(&mut self) -> Vec> { 178 | std::mem::take(&mut self.recorded_events) 179 | } 180 | 181 | /// Creates a new [Aggregate] [Root] instance by applying the specified 182 | /// Domain Event. 183 | /// 184 | /// Example of usage: 185 | /// ```text 186 | /// use eventually::{ 187 | /// event, 188 | /// aggregate::Root, 189 | /// aggregate, 190 | /// }; 191 | /// 192 | /// let my_aggregate_root = MyAggregateRoot::record_new( 193 | /// event::Envelope::from(MyDomainEvent { /* something */ }) 194 | /// )?; 195 | /// ``` 196 | /// 197 | /// # Errors 198 | /// 199 | /// The method can return an error if the event to apply is unexpected 200 | /// given the current state of the Aggregate. 201 | pub fn record_new(event: event::Envelope) -> Result { 202 | Ok(Root { 203 | version: 1, 204 | aggregate: T::apply(None, event.message.clone())?, 205 | recorded_events: vec![event], 206 | }) 207 | } 208 | 209 | /// Records a change to the [Aggregate] [Root], expressed by the specified 210 | /// Domain Event. 211 | /// 212 | /// Example of usage: 213 | /// ```text 214 | /// use eventually::{ 215 | /// event, 216 | /// aggregate::Root, 217 | /// }; 218 | /// 219 | /// impl MyAggregateRoot { 220 | /// pub fn update_name(&mut self, name: String) -> Result<(), MyAggregateError> { 221 | /// if name.is_empty() { 222 | /// return Err(MyAggregateError::NameIsEmpty); 223 | /// } 224 | /// 225 | /// self.record_that( 226 | /// event::Envelope::from(MyAggergateEvent::NameWasChanged { name }) 227 | /// ) 228 | /// } 229 | /// } 230 | /// ``` 231 | /// 232 | /// # Errors 233 | /// 234 | /// The method can return an error if the event to apply is unexpected 235 | /// given the current state of the Aggregate. 236 | pub fn record_that(&mut self, event: event::Envelope) -> Result<(), T::Error> { 237 | self.aggregate = T::apply(Some(self.aggregate.clone()), event.message.clone())?; 238 | self.recorded_events.push(event); 239 | self.version += 1; 240 | 241 | Ok(()) 242 | } 243 | } 244 | 245 | /// List of possible errors that can be returned by [`Root::rehydrate_async`]. 246 | #[derive(Debug, thiserror::Error)] 247 | pub enum RehydrateError { 248 | /// Error returned during rehydration when the [Aggregate Root][Root] 249 | /// is applying a Domain Event using [`Aggregate::apply`]. 250 | /// 251 | /// This usually implies the Event Stream for the [Aggregate] 252 | /// contains corrupted or unexpected data. 253 | #[error("failed to apply domain event while rehydrating aggregate: {0}")] 254 | Domain(#[source] T), 255 | 256 | /// This error is returned by [`Root::rehydrate_async`] when the underlying 257 | /// [`futures::TryStream`] has returned an error. 258 | #[error("failed to rehydrate aggregate from event stream: {0}")] 259 | Inner(#[source] I), 260 | } 261 | 262 | impl Root 263 | where 264 | T: Aggregate, 265 | { 266 | /// Rehydrates an [Aggregate] Root from its state and version. 267 | /// Useful for [Repository] implementations outside the [EventSourcedRepository] one. 268 | #[doc(hidden)] 269 | pub fn rehydrate_from_state(version: Version, aggregate: T) -> Root { 270 | Root { 271 | version, 272 | aggregate, 273 | recorded_events: Vec::default(), 274 | } 275 | } 276 | 277 | /// Rehydrates an [Aggregate Root][Root] from a stream of Domain Events. 278 | #[doc(hidden)] 279 | pub(crate) fn rehydrate( 280 | mut stream: impl Iterator>, 281 | ) -> Result>, T::Error> { 282 | stream.try_fold(None, |ctx: Option>, event| { 283 | let new_ctx_result = match ctx { 284 | None => Root::::rehydrate_from(event), 285 | Some(ctx) => ctx.apply_rehydrated_event(event), 286 | }; 287 | 288 | Ok(Some(new_ctx_result?)) 289 | }) 290 | } 291 | 292 | /// Rehydrates an [Aggregate Root][Root] from a stream of Domain Events. 293 | #[doc(hidden)] 294 | pub(crate) async fn rehydrate_async( 295 | stream: impl futures::TryStream, Error = Err>, 296 | ) -> Result>, RehydrateError> { 297 | stream 298 | .map_err(RehydrateError::Inner) 299 | .try_fold(None, |ctx: Option>, event| async { 300 | let new_ctx_result = match ctx { 301 | None => Root::::rehydrate_from(event), 302 | Some(ctx) => ctx.apply_rehydrated_event(event), 303 | }; 304 | 305 | Ok(Some(new_ctx_result.map_err(RehydrateError::Domain)?)) 306 | }) 307 | .await 308 | } 309 | 310 | /// Creates a new [Root] instance from a Domain [Event] 311 | /// while rehydrating an [Aggregate]. 312 | /// 313 | /// # Errors 314 | /// 315 | /// The method can return an error if the event to apply is unexpected 316 | /// given the current state of the Aggregate. 317 | #[doc(hidden)] 318 | pub(crate) fn rehydrate_from(event: event::Envelope) -> Result, T::Error> { 319 | Ok(Root { 320 | version: 1, 321 | aggregate: T::apply(None, event.message)?, 322 | recorded_events: Vec::default(), 323 | }) 324 | } 325 | 326 | /// Applies a new Domain [Event] to the [Root] while rehydrating 327 | /// an [Aggregate]. 328 | /// 329 | /// # Errors 330 | /// 331 | /// The method can return an error if the event to apply is unexpected 332 | /// given the current state of the Aggregate. 333 | #[doc(hidden)] 334 | pub(crate) fn apply_rehydrated_event( 335 | mut self, 336 | event: event::Envelope, 337 | ) -> Result, T::Error> { 338 | self.aggregate = T::apply(Some(self.aggregate), event.message)?; 339 | self.version += 1; 340 | 341 | Ok(self) 342 | } 343 | } 344 | 345 | // The warnings are happening due to usage of the methods only inside #[cfg(test)] 346 | #[allow(dead_code)] 347 | #[doc(hidden)] 348 | #[cfg(test)] 349 | pub(crate) mod test_user_domain { 350 | use crate::{aggregate, message}; 351 | 352 | #[derive(Debug, Clone)] 353 | pub(crate) struct User { 354 | email: String, 355 | password: String, 356 | } 357 | 358 | #[derive(Debug, Clone, PartialEq, Eq)] 359 | pub(crate) enum UserEvent { 360 | WasCreated { email: String, password: String }, 361 | PasswordWasChanged { password: String }, 362 | } 363 | 364 | impl message::Message for UserEvent { 365 | fn name(&self) -> &'static str { 366 | match self { 367 | UserEvent::WasCreated { .. } => "UserWasCreated", 368 | UserEvent::PasswordWasChanged { .. } => "UserPasswordWasChanged", 369 | } 370 | } 371 | } 372 | 373 | #[derive(Debug, thiserror::Error)] 374 | pub(crate) enum UserError { 375 | #[error("provided email was empty")] 376 | EmptyEmail, 377 | #[error("provided password was empty")] 378 | EmptyPassword, 379 | #[error("user was not yet created")] 380 | NotYetCreated, 381 | #[error("user was already created")] 382 | AlreadyCreated, 383 | } 384 | 385 | impl aggregate::Aggregate for User { 386 | type Id = String; 387 | type Event = UserEvent; 388 | type Error = UserError; 389 | 390 | fn type_name() -> &'static str { 391 | "User" 392 | } 393 | 394 | fn aggregate_id(&self) -> &Self::Id { 395 | &self.email 396 | } 397 | 398 | fn apply(state: Option, event: Self::Event) -> Result { 399 | match state { 400 | None => match event { 401 | UserEvent::WasCreated { email, password } => Ok(User { email, password }), 402 | UserEvent::PasswordWasChanged { .. } => Err(UserError::NotYetCreated), 403 | }, 404 | Some(mut state) => match event { 405 | UserEvent::PasswordWasChanged { password } => { 406 | state.password = password; 407 | Ok(state) 408 | }, 409 | UserEvent::WasCreated { .. } => Err(UserError::AlreadyCreated), 410 | }, 411 | } 412 | } 413 | } 414 | 415 | impl aggregate::Root { 416 | pub(crate) fn create(email: String, password: String) -> Result { 417 | if email.is_empty() { 418 | return Err(UserError::EmptyEmail); 419 | } 420 | 421 | if password.is_empty() { 422 | return Err(UserError::EmptyPassword); 423 | } 424 | 425 | Self::record_new(UserEvent::WasCreated { email, password }.into()) 426 | } 427 | 428 | pub(crate) fn change_password(&mut self, password: String) -> Result<(), UserError> { 429 | if password.is_empty() { 430 | return Err(UserError::EmptyPassword); 431 | } 432 | 433 | self.record_that(UserEvent::PasswordWasChanged { password }.into())?; 434 | 435 | Ok(()) 436 | } 437 | } 438 | } 439 | 440 | #[allow(clippy::semicolon_if_nothing_returned)] // False positives :shrugs: 441 | #[cfg(test)] 442 | mod tests { 443 | use std::error::Error; 444 | 445 | use crate::aggregate::repository::{Getter, Saver}; 446 | use crate::aggregate::test_user_domain::{User, UserEvent}; 447 | use crate::event::store::EventStoreExt; 448 | use crate::{aggregate, event, version}; 449 | 450 | #[tokio::test] 451 | async fn repository_persists_new_aggregate_root() { 452 | let event_store = event::store::InMemory::::default(); 453 | let tracking_event_store = event_store.with_recorded_events_tracking(); 454 | let user_repository = 455 | aggregate::EventSourcedRepository::::from(tracking_event_store.clone()); 456 | 457 | let email = "test@email.com".to_owned(); 458 | let password = "not-a-secret".to_owned(); 459 | 460 | let mut user = aggregate::Root::::create(email.clone(), password.clone()) 461 | .expect("user should be created successfully"); 462 | 463 | user_repository 464 | .save(&mut user) 465 | .await 466 | .expect("user should be saved successfully"); 467 | 468 | let expected_events = vec![event::Persisted { 469 | stream_id: email.clone(), 470 | version: 1, 471 | event: event::Envelope::from(UserEvent::WasCreated { email, password }), 472 | }]; 473 | 474 | assert_eq!(expected_events, tracking_event_store.recorded_events()); 475 | } 476 | 477 | #[tokio::test] 478 | async fn repository_retrieves_the_aggregate_root_and_stores_new_events() { 479 | let event_store = event::store::InMemory::::default(); 480 | let tracking_event_store = event_store.with_recorded_events_tracking(); 481 | let user_repository = 482 | aggregate::EventSourcedRepository::::from(tracking_event_store.clone()); 483 | 484 | let email = "test@email.com".to_owned(); 485 | let password = "not-a-secret".to_owned(); 486 | 487 | let mut user = aggregate::Root::::create(email.clone(), password.clone()) 488 | .expect("user should be created successfully"); 489 | 490 | user_repository 491 | .save(&mut user) 492 | .await 493 | .expect("user should be saved successfully"); 494 | 495 | // Reset the event recorded while storing the User for the first time. 496 | tracking_event_store.reset_recorded_events(); 497 | 498 | let mut user = user_repository 499 | .get(&email) 500 | .await 501 | .expect("user should be retrieved from the repository"); 502 | 503 | let new_password = "new-password".to_owned(); 504 | 505 | user.change_password(new_password.clone()) 506 | .expect("user password should be changed successfully"); 507 | 508 | user_repository 509 | .save(&mut user) 510 | .await 511 | .expect("new user version should be saved successfully"); 512 | 513 | let expected_events = vec![event::Persisted { 514 | stream_id: email.clone(), 515 | version: 2, 516 | event: event::Envelope::from(UserEvent::PasswordWasChanged { 517 | password: new_password, 518 | }), 519 | }]; 520 | 521 | assert_eq!(expected_events, tracking_event_store.recorded_events()); 522 | } 523 | 524 | #[tokio::test] 525 | async fn repository_returns_conflict_error_from_store_when_data_race_happens() { 526 | let event_store = event::store::InMemory::::default(); 527 | let user_repository = 528 | aggregate::EventSourcedRepository::::from(event_store.clone()); 529 | 530 | let email = "test@email.com".to_owned(); 531 | let password = "not-a-secret".to_owned(); 532 | 533 | let mut user = aggregate::Root::::create(email.clone(), password.clone()) 534 | .expect("user should be created successfully"); 535 | 536 | // We need to clone the User Aggregate Root instance to get the list 537 | // of uncommitted events from the Root context twice. 538 | let mut cloned_user = user.clone(); 539 | 540 | // Saving the first User to the Repository. 541 | user_repository 542 | .save(&mut user) 543 | .await 544 | .expect("user should be saved successfully"); 545 | 546 | // Simulating data race by duplicating the call to the Repository 547 | // with the same UserRoot instance that has already been committeed. 548 | let error = user_repository.save(&mut cloned_user).await.expect_err( 549 | "the repository should fail on the second .save() call with the cloned user", 550 | ); 551 | 552 | let error: Box = error.into(); 553 | 554 | // Have no idea how to fix this one... 555 | #[allow(clippy::redundant_closure_for_method_calls)] 556 | { 557 | assert!(error 558 | .source() 559 | .is_some_and(|src| src.is::())); 560 | } 561 | } 562 | } 563 | -------------------------------------------------------------------------------- /eventually/src/aggregate/repository.rs: -------------------------------------------------------------------------------- 1 | //! Module containing the definition of a [Repository], to fetch and store 2 | //! Aggregate Roots from a data store. 3 | //! 4 | //! If you are looking for the Event-sourced implementation of an Aggregate Repository, 5 | //! take a look at [`EventSourced`]. 6 | 7 | use std::fmt::Debug; 8 | use std::marker::PhantomData; 9 | 10 | use async_trait::async_trait; 11 | use futures::TryStreamExt; 12 | 13 | use crate::aggregate::Aggregate; 14 | use crate::{aggregate, event, version}; 15 | 16 | /// All possible errors returned by [`Getter::get`]. 17 | #[derive(Debug, thiserror::Error)] 18 | pub enum GetError { 19 | /// Error returned when the [Aggregate Root][aggregate::Root] could not be found in the data store. 20 | #[error("failed to get aggregate root: not found")] 21 | NotFound, 22 | /// Error returned when the [Getter] implementation has encountered an error. 23 | #[error("failed to get aggregate root, an error occurred: {0}")] 24 | Internal(#[from] anyhow::Error), 25 | } 26 | 27 | /// Trait used to implement read access to a data store from which 28 | /// to load an [aggregate::Root] instance, given its id. 29 | #[async_trait] 30 | pub trait Getter: Send + Sync 31 | where 32 | T: Aggregate, 33 | { 34 | /// Loads an [aggregate::Root] instance from the data store, 35 | /// referenced by its unique identifier. 36 | async fn get(&self, id: &T::Id) -> Result, GetError>; 37 | } 38 | 39 | /// All possible errors returned by [`Saver::save`]. 40 | #[derive(Debug, thiserror::Error)] 41 | pub enum SaveError { 42 | /// Error returned when [`Saver::save`] encounters a conflict error while saving the new Aggregate Root. 43 | #[error("failed to save aggregate root: {0}")] 44 | Conflict(#[from] version::ConflictError), 45 | /// Error returned when the [Saver] implementation has encountered an error. 46 | #[error("failed to save aggregate root, an error occurred: {0}")] 47 | Internal(#[from] anyhow::Error), 48 | } 49 | 50 | /// Trait used to implement write access to a data store, which can be used 51 | /// to save the latest state of an [aggregate::Root] instance. 52 | #[async_trait] 53 | pub trait Saver: Send + Sync 54 | where 55 | T: Aggregate, 56 | { 57 | /// Saves a new version of an [aggregate::Root] instance to the data store. 58 | async fn save(&self, root: &mut aggregate::Root) -> Result<(), SaveError>; 59 | } 60 | 61 | /// A Repository is an object that allows to load and save 62 | /// an [Aggregate Root][aggregate::Root] from and to a persistent data store. 63 | pub trait Repository: Getter + Saver + Send + Sync 64 | where 65 | T: Aggregate, 66 | { 67 | } 68 | 69 | impl Repository for R 70 | where 71 | T: Aggregate, 72 | R: Getter + Saver + Send + Sync, 73 | { 74 | } 75 | 76 | /// An Event-sourced implementation of the [Repository] interface. 77 | /// 78 | /// It uses an [Event Store][event::Store] instance to stream Domain Events 79 | /// for a particular Aggregate, and append uncommitted Domain Events 80 | /// recorded by an Aggregate Root. 81 | #[derive(Debug, Clone)] 82 | pub struct EventSourced 83 | where 84 | T: Aggregate, 85 | S: event::Store, 86 | { 87 | store: S, 88 | aggregate: PhantomData, 89 | } 90 | 91 | impl From for EventSourced 92 | where 93 | T: Aggregate, 94 | S: event::Store, 95 | { 96 | fn from(store: S) -> Self { 97 | Self { 98 | store, 99 | aggregate: PhantomData, 100 | } 101 | } 102 | } 103 | 104 | #[async_trait] 105 | impl Getter for EventSourced 106 | where 107 | T: Aggregate, 108 | T::Id: Clone, 109 | T::Error: std::error::Error + Send + Sync + 'static, 110 | S: event::Store, 111 | >::Error: 112 | std::error::Error + Send + Sync + 'static, 113 | { 114 | async fn get(&self, id: &T::Id) -> Result, GetError> { 115 | let stream = self 116 | .store 117 | .stream(id, event::VersionSelect::All) 118 | .map_ok(|persisted| persisted.event); 119 | 120 | let ctx = aggregate::Root::::rehydrate_async(stream) 121 | .await 122 | .map_err(anyhow::Error::from) 123 | .map_err(GetError::Internal)?; 124 | 125 | ctx.ok_or(GetError::NotFound) 126 | } 127 | } 128 | 129 | #[async_trait] 130 | impl Saver for EventSourced 131 | where 132 | T: Aggregate, 133 | T::Id: Clone, 134 | S: event::Store, 135 | { 136 | async fn save(&self, root: &mut aggregate::Root) -> Result<(), SaveError> { 137 | let events_to_commit = root.take_uncommitted_events(); 138 | let aggregate_id = root.aggregate_id(); 139 | 140 | if events_to_commit.is_empty() { 141 | return Ok(()); 142 | } 143 | 144 | let current_event_stream_version = 145 | root.version() - (events_to_commit.len() as version::Version); 146 | 147 | self.store 148 | .append( 149 | aggregate_id.clone(), 150 | version::Check::MustBe(current_event_stream_version), 151 | events_to_commit, 152 | ) 153 | .await 154 | .map_err(|err| match err { 155 | event::store::AppendError::Conflict(err) => SaveError::Conflict(err), 156 | event::store::AppendError::Internal(err) => SaveError::Internal(err), 157 | })?; 158 | 159 | Ok(()) 160 | } 161 | } 162 | -------------------------------------------------------------------------------- /eventually/src/aggregate/test.rs: -------------------------------------------------------------------------------- 1 | //! Module exposing a [Scenario] type to test [Aggregate]s using 2 | //! the [given-then-when canvas](https://www.agilealliance.org/glossary/gwt/). 3 | 4 | use std::fmt::Debug; 5 | use std::marker::PhantomData; 6 | use std::ops::Deref; 7 | use std::sync::Arc; 8 | 9 | use crate::aggregate::{Aggregate, Root}; 10 | use crate::event; 11 | 12 | /// A test scenario that can be used to test an [Aggregate] and [Aggregate Root][Root] 13 | /// using a [given-then-when canvas](https://www.agilealliance.org/glossary/gwt/) approach. 14 | #[derive(Clone, Copy)] 15 | pub struct Scenario(PhantomData) 16 | where 17 | T: Aggregate, 18 | T::Id: Clone, 19 | T::Event: Debug + PartialEq, 20 | T::Error: Debug; 21 | 22 | impl Scenario 23 | where 24 | T: Aggregate, 25 | T::Id: Clone, 26 | T::Event: Debug + PartialEq, 27 | T::Error: Debug, 28 | { 29 | /// Creates a new [Scenario] instance. 30 | #[must_use] 31 | pub fn new() -> Self { 32 | Self(PhantomData) 33 | } 34 | 35 | /// Specifies the precondition for the test [Scenario]. 36 | /// 37 | /// In other words, it can be used to specify all the Domain [Event][event::Envelope]s 38 | /// that make up the state of the [Aggregate Root][Root]. 39 | #[must_use] 40 | pub fn given(self, events: Vec>) -> ScenarioGiven { 41 | ScenarioGiven { 42 | events, 43 | marker: PhantomData, 44 | } 45 | } 46 | 47 | /// Specifies the action/mutation to execute in this [Scenario]. 48 | /// 49 | /// Use this branch when testing actions/mutations that create new [Aggregate Root][Root] 50 | /// instances, i.e. with no prior Domain Events recorded. 51 | #[must_use] 52 | pub fn when(self, f: F) -> ScenarioWhen 53 | where 54 | R: From>, 55 | F: Fn() -> Result, 56 | { 57 | ScenarioWhen { 58 | mutate: f, 59 | marker: PhantomData, 60 | err_marker: PhantomData, 61 | root_marker: PhantomData, 62 | } 63 | } 64 | } 65 | 66 | impl Default for Scenario 67 | where 68 | T: Aggregate, 69 | T::Id: Clone, 70 | T::Event: Debug + PartialEq, 71 | T::Error: Debug, 72 | { 73 | fn default() -> Self { 74 | Self::new() 75 | } 76 | } 77 | 78 | #[doc(hidden)] 79 | pub struct ScenarioGiven 80 | where 81 | T: Aggregate, 82 | T::Id: Clone, 83 | T::Event: Debug + PartialEq, 84 | T::Error: Debug, 85 | { 86 | events: Vec>, 87 | marker: PhantomData, 88 | } 89 | 90 | impl ScenarioGiven 91 | where 92 | T: Aggregate, 93 | T::Id: Clone, 94 | T::Event: Debug + PartialEq, 95 | T::Error: Debug, 96 | { 97 | /// Specifies the action/mutation to execute in this [Scenario]. 98 | /// 99 | /// Use this branch when testing actions/mutations that modify the state 100 | /// of an [Aggregate Root][Root] that already exists, by specifying its 101 | /// current state using [`Scenario::given`]. 102 | /// 103 | /// # Panics 104 | /// 105 | /// Please note: as this method expects that an [Aggregate Root][Root] instance 106 | /// is available when executing the domain method, it will panic if a `Root` instance 107 | /// could not be obtained by rehydrating the [`Aggregate`] state through the events 108 | /// provided in [`Scenario::given`]. 109 | #[must_use] 110 | pub fn when(self, f: F) -> ScenarioWhen Result, Err> 111 | where 112 | R: From>, 113 | F: Fn(&mut R) -> Result<(), Err>, 114 | { 115 | let events = Arc::new(self.events); 116 | 117 | ScenarioWhen { 118 | marker: PhantomData, 119 | err_marker: PhantomData, 120 | root_marker: PhantomData, 121 | mutate: move || -> Result { 122 | let mut root: R = Root::::rehydrate(events.iter().cloned()) 123 | .expect( 124 | "no error is expected when applying domain events from a 'given' clause", 125 | ) 126 | .expect("an aggregate root instance is expected, but none was produced") 127 | .into(); 128 | 129 | match f(&mut root) { 130 | Ok(()) => Ok(root), 131 | Err(err) => Err(err), 132 | } 133 | }, 134 | } 135 | } 136 | } 137 | 138 | #[doc(hidden)] 139 | pub struct ScenarioWhen 140 | where 141 | T: Aggregate, 142 | T::Event: Debug + PartialEq, 143 | R: From>, 144 | F: Fn() -> Result, 145 | { 146 | mutate: F, 147 | marker: PhantomData, 148 | err_marker: PhantomData, 149 | root_marker: PhantomData, 150 | } 151 | 152 | impl ScenarioWhen 153 | where 154 | T: Aggregate, 155 | T::Event: Debug + PartialEq, 156 | R: From> + Deref>, 157 | F: Fn() -> Result, 158 | { 159 | /// Specifies that the outcome of the [Scenario] is positive, and 160 | /// should result in the creation of the specified Domain Events. 161 | #[must_use] 162 | pub fn then(self, result: Vec>) -> ScenarioThen { 163 | ScenarioThen { 164 | mutate: self.mutate, 165 | expected: Ok(result), 166 | marker: PhantomData, 167 | } 168 | } 169 | 170 | /// Specified that the outcome of the [Scenario] is negative. 171 | /// 172 | /// Use this method to assert the specific Error value that the 173 | /// [Aggregate Root][Root] method should return. 174 | #[must_use] 175 | pub fn then_error(self, err: Err) -> ScenarioThen { 176 | ScenarioThen { 177 | mutate: self.mutate, 178 | expected: Err(err), 179 | marker: PhantomData, 180 | } 181 | } 182 | } 183 | 184 | #[doc(hidden)] 185 | pub struct ScenarioThen 186 | where 187 | T: Aggregate, 188 | T::Event: Debug + PartialEq, 189 | R: From> + Deref>, 190 | F: Fn() -> Result, 191 | { 192 | mutate: F, 193 | expected: Result>, Err>, 194 | marker: PhantomData, 195 | } 196 | 197 | impl ScenarioThen 198 | where 199 | T: Aggregate, 200 | T::Event: Debug + PartialEq, 201 | R: From> + Deref>, 202 | F: Fn() -> Result, 203 | Err: PartialEq + Debug, 204 | { 205 | /// Runs the [Scenario] and performs the various assertion for the test. 206 | /// 207 | /// # Panics 208 | /// 209 | /// This method will panic if the assertions have not passed, making 210 | /// the test fail. 211 | pub fn assert(self) { 212 | let result = (self.mutate)().map(|root| root.recorded_events.clone()); 213 | assert_eq!(self.expected, result); 214 | } 215 | } 216 | -------------------------------------------------------------------------------- /eventually/src/command/mod.rs: -------------------------------------------------------------------------------- 1 | //! Module containing support for Domain [Command]s. 2 | //! 3 | //! Following the Domain-driven Design definition, a [Command] expresses the 4 | //! intent of an Actor (e.g. a Customer, a User, a System, etc.) to modify 5 | //! the state of the system in some way. 6 | //! 7 | //! To modify the state of the system through a [Command], you must 8 | //! implement a Command [Handler] which, in an Event-sourced system, 9 | //! should make use of an [Aggregate] to evaluate the validity of the Command 10 | //! submitted, and emit Domain [Event]s as a result (through the Event [Store]). 11 | //! 12 | //! Check out the type documentation exported in this module. 13 | 14 | pub mod test; 15 | 16 | use std::future::Future; 17 | 18 | use async_trait::async_trait; 19 | 20 | use crate::message; 21 | 22 | /// A Command represents an intent by an Actor (e.g. a User, or a System) 23 | /// to mutate the state of the system. 24 | /// 25 | /// In an event-sourced system, a Command is represented as a [Message]. 26 | pub type Envelope = message::Envelope; 27 | 28 | /// A software component that is able to handle [Command]s of a certain type, 29 | /// and mutate the state as a result of the command handling, or fail. 30 | /// 31 | /// In an event-sourced system, the [Command] Handler 32 | /// should use an [Aggregate][crate::aggregate::Aggregate] to evaluate 33 | /// a [Command] to ensure business invariants are respected. 34 | #[async_trait] 35 | pub trait Handler: Send + Sync 36 | where 37 | T: message::Message, 38 | { 39 | /// The error type returned by the Handler while handling a [Command]. 40 | type Error: Send + Sync; 41 | 42 | /// Handles a [Command] and returns an error if the handling has failed. 43 | /// 44 | /// Since [Command]s are solely modifying the state of the system, 45 | /// they do not return anything to the caller but the result of the operation 46 | /// (expressed by a [Result] type). 47 | async fn handle(&self, command: Envelope) -> Result<(), Self::Error>; 48 | } 49 | 50 | #[async_trait] 51 | impl Handler for F 52 | where 53 | T: message::Message + Send + Sync + 'static, 54 | Err: Send + Sync, 55 | F: Send + Sync + Fn(Envelope) -> Fut, 56 | Fut: Send + Sync + Future>, 57 | { 58 | type Error = Err; 59 | 60 | async fn handle(&self, command: Envelope) -> Result<(), Self::Error> { 61 | self(command).await 62 | } 63 | } 64 | 65 | #[cfg(test)] 66 | mod test_user_domain { 67 | use std::sync::Arc; 68 | 69 | use async_trait::async_trait; 70 | 71 | use crate::aggregate::test_user_domain::{User, UserEvent}; 72 | use crate::{aggregate, command, event, message}; 73 | 74 | struct UserService(Arc>); 75 | 76 | impl From for UserService 77 | where 78 | R: aggregate::Repository + 'static, 79 | { 80 | fn from(repository: R) -> Self { 81 | Self(Arc::new(repository)) 82 | } 83 | } 84 | 85 | struct CreateUser { 86 | email: String, 87 | password: String, 88 | } 89 | 90 | impl message::Message for CreateUser { 91 | fn name(&self) -> &'static str { 92 | "CreateUser" 93 | } 94 | } 95 | 96 | #[async_trait] 97 | impl command::Handler for UserService { 98 | type Error = anyhow::Error; 99 | 100 | async fn handle(&self, command: command::Envelope) -> Result<(), Self::Error> { 101 | let command = command.message; 102 | let mut user = aggregate::Root::::create(command.email, command.password)?; 103 | 104 | self.0.save(&mut user).await?; 105 | 106 | Ok(()) 107 | } 108 | } 109 | 110 | struct ChangeUserPassword { 111 | email: String, 112 | password: String, 113 | } 114 | 115 | impl message::Message for ChangeUserPassword { 116 | fn name(&self) -> &'static str { 117 | "ChangeUserPassword" 118 | } 119 | } 120 | 121 | #[async_trait] 122 | impl command::Handler for UserService { 123 | type Error = anyhow::Error; 124 | 125 | async fn handle( 126 | &self, 127 | command: command::Envelope, 128 | ) -> Result<(), Self::Error> { 129 | let command = command.message; 130 | 131 | let mut user = self.0.get(&command.email).await?; 132 | 133 | user.change_password(command.password)?; 134 | 135 | self.0.save(&mut user).await?; 136 | 137 | Ok(()) 138 | } 139 | } 140 | 141 | #[tokio::test] 142 | async fn it_creates_a_new_user_successfully() { 143 | command::test::Scenario 144 | .when(command::Envelope::from(CreateUser { 145 | email: "test@test.com".to_owned(), 146 | password: "not-a-secret".to_owned(), 147 | })) 148 | .then(vec![event::Persisted { 149 | stream_id: "test@test.com".to_owned(), 150 | version: 1, 151 | event: event::Envelope::from(UserEvent::WasCreated { 152 | email: "test@test.com".to_owned(), 153 | password: "not-a-secret".to_owned(), 154 | }), 155 | }]) 156 | .assert_on(|event_store| { 157 | UserService::from(aggregate::EventSourcedRepository::from(event_store)) 158 | }) 159 | .await; 160 | } 161 | 162 | #[tokio::test] 163 | async fn it_fails_to_create_an_user_if_it_still_exists() { 164 | command::test::Scenario 165 | .given(vec![event::Persisted { 166 | stream_id: "test@test.com".to_owned(), 167 | version: 1, 168 | event: event::Envelope::from(UserEvent::WasCreated { 169 | email: "test@test.com".to_owned(), 170 | password: "not-a-secret".to_owned(), 171 | }), 172 | }]) 173 | .when(command::Envelope::from(CreateUser { 174 | email: "test@test.com".to_owned(), 175 | password: "not-a-secret".to_owned(), 176 | })) 177 | .then_fails() 178 | .assert_on(|event_store| { 179 | UserService::from(aggregate::EventSourcedRepository::from(event_store)) 180 | }) 181 | .await; 182 | } 183 | 184 | #[tokio::test] 185 | async fn it_updates_the_password_of_an_existing_user() { 186 | command::test::Scenario 187 | .given(vec![event::Persisted { 188 | stream_id: "test@test.com".to_owned(), 189 | version: 1, 190 | event: event::Envelope::from(UserEvent::WasCreated { 191 | email: "test@test.com".to_owned(), 192 | password: "not-a-secret".to_owned(), 193 | }), 194 | }]) 195 | .when(command::Envelope::from(ChangeUserPassword { 196 | email: "test@test.com".to_owned(), 197 | password: "new-password".to_owned(), 198 | })) 199 | .then(vec![event::Persisted { 200 | stream_id: "test@test.com".to_owned(), 201 | version: 2, 202 | event: event::Envelope::from(UserEvent::PasswordWasChanged { 203 | password: "new-password".to_owned(), 204 | }), 205 | }]) 206 | .assert_on(|event_store| { 207 | UserService::from(aggregate::EventSourcedRepository::from(event_store)) 208 | }) 209 | .await; 210 | } 211 | 212 | #[tokio::test] 213 | async fn it_fails_to_update_the_password_if_the_user_does_not_exist() { 214 | command::test::Scenario 215 | .when(command::Envelope::from(ChangeUserPassword { 216 | email: "test@test.com".to_owned(), 217 | password: "new-password".to_owned(), 218 | })) 219 | .then_fails() 220 | .assert_on(|event_store| { 221 | UserService::from(aggregate::EventSourcedRepository::from(event_store)) 222 | }) 223 | .await; 224 | } 225 | } 226 | -------------------------------------------------------------------------------- /eventually/src/command/test.rs: -------------------------------------------------------------------------------- 1 | //! Module exposing a test [Scenario] type to write Domain [Command][command::Envelope]s 2 | //! test cases using the [given-then-when canvas](https://www.agilealliance.org/glossary/gwt/). 3 | 4 | use std::fmt::Debug; 5 | use std::hash::Hash; 6 | 7 | use crate::event::store::{Appender, EventStoreExt}; 8 | use crate::{command, event, message, version}; 9 | 10 | /// A test scenario that can be used to test a [Command][command::Envelope] [Handler][command::Handler] 11 | /// using a [given-then-when canvas](https://www.agilealliance.org/glossary/gwt/) approach. 12 | pub struct Scenario; 13 | 14 | impl Scenario { 15 | /// Sets the precondition state of the system for the [Scenario], which 16 | /// is expressed by a list of Domain [Event][event::Envelope]s in an Event-sourced system. 17 | #[must_use] 18 | pub fn given(self, events: Vec>) -> ScenarioGiven 19 | where 20 | Evt: message::Message, 21 | { 22 | ScenarioGiven { given: events } 23 | } 24 | 25 | /// Specifies the [Command][command::Envelope] to test in the [Scenario], in the peculiar case 26 | /// of having a clean system. 27 | /// 28 | /// This is a shortcut for: 29 | /// ```text 30 | /// Scenario::given(vec![]).when(...) 31 | /// ``` 32 | #[must_use] 33 | pub fn when(self, command: command::Envelope) -> ScenarioWhen 34 | where 35 | Evt: message::Message, 36 | Cmd: message::Message, 37 | { 38 | ScenarioWhen { 39 | given: Vec::default(), 40 | when: command, 41 | } 42 | } 43 | } 44 | 45 | #[doc(hidden)] 46 | pub struct ScenarioGiven 47 | where 48 | Evt: message::Message, 49 | { 50 | given: Vec>, 51 | } 52 | 53 | impl ScenarioGiven 54 | where 55 | Evt: message::Message, 56 | { 57 | /// Specifies the [Command][command::Envelope] to test in the [Scenario]. 58 | #[must_use] 59 | pub fn when(self, command: command::Envelope) -> ScenarioWhen 60 | where 61 | Cmd: message::Message, 62 | { 63 | ScenarioWhen { 64 | given: self.given, 65 | when: command, 66 | } 67 | } 68 | } 69 | 70 | #[doc(hidden)] 71 | pub struct ScenarioWhen 72 | where 73 | Evt: message::Message, 74 | Cmd: message::Message, 75 | { 76 | given: Vec>, 77 | when: command::Envelope, 78 | } 79 | 80 | impl ScenarioWhen 81 | where 82 | Evt: message::Message, 83 | Cmd: message::Message, 84 | { 85 | /// Sets the expectation on the result of the [Scenario] to be positive 86 | /// and produce a specified list of Domain [Event]s. 87 | #[must_use] 88 | pub fn then(self, events: Vec>) -> ScenarioThen { 89 | ScenarioThen { 90 | given: self.given, 91 | when: self.when, 92 | case: ScenarioThenCase::Produces(events), 93 | } 94 | } 95 | 96 | /// Sets the expectation on the result of the [Scenario] to return an error. 97 | #[must_use] 98 | pub fn then_fails(self) -> ScenarioThen { 99 | ScenarioThen { 100 | given: self.given, 101 | when: self.when, 102 | case: ScenarioThenCase::Fails, 103 | } 104 | } 105 | } 106 | 107 | enum ScenarioThenCase 108 | where 109 | Evt: message::Message, 110 | { 111 | Produces(Vec>), 112 | Fails, 113 | } 114 | 115 | #[doc(hidden)] 116 | pub struct ScenarioThen 117 | where 118 | Evt: message::Message, 119 | Cmd: message::Message, 120 | { 121 | given: Vec>, 122 | when: command::Envelope, 123 | case: ScenarioThenCase, 124 | } 125 | 126 | impl ScenarioThen 127 | where 128 | Id: Clone + Eq + Hash + Send + Sync + Debug, 129 | Evt: message::Message + Clone + PartialEq + Send + Sync + Debug, 130 | Cmd: message::Message, 131 | { 132 | /// Executes the whole [Scenario] by constructing a Command [Handler][command::Handler] 133 | /// with the provided closure function and running the specified assertions. 134 | /// 135 | /// # Panics 136 | /// 137 | /// The method panics if the assertion fails. 138 | pub async fn assert_on(self, handler_factory: F) 139 | where 140 | F: Fn(event::store::Tracking, Id, Evt>) -> H, 141 | H: command::Handler, 142 | { 143 | let event_store = event::store::InMemory::::default(); 144 | let tracking_event_store = event_store.clone().with_recorded_events_tracking(); 145 | 146 | for event in self.given { 147 | event_store 148 | .append( 149 | event.stream_id, 150 | version::Check::MustBe(event.version - 1), 151 | vec![event.event], 152 | ) 153 | .await 154 | .expect("domain event in 'given' should be inserted in the event store"); 155 | } 156 | 157 | let handler = handler_factory(tracking_event_store.clone()); 158 | let result = handler.handle(self.when).await; 159 | 160 | match self.case { 161 | ScenarioThenCase::Produces(events) => { 162 | let recorded_events = tracking_event_store.recorded_events(); 163 | assert_eq!(events, recorded_events); 164 | }, 165 | ScenarioThenCase::Fails => assert!(result.is_err()), 166 | } 167 | } 168 | } 169 | -------------------------------------------------------------------------------- /eventually/src/event/mod.rs: -------------------------------------------------------------------------------- 1 | //! Module `event` contains types and abstractions helpful for working 2 | //! with Domain Events. 3 | 4 | pub mod store; 5 | use std::fmt::Debug; 6 | 7 | use futures::stream::BoxStream; 8 | use serde::{Deserialize, Serialize}; 9 | 10 | pub use crate::event::store::Store; 11 | use crate::{message, version}; 12 | 13 | /// An Event is a [Message][message::Message] carring the information about a Domain Event, 14 | /// an occurrence in the system lifetime that is relevant for the Domain 15 | /// that is being implemented. 16 | pub type Envelope = message::Envelope; 17 | 18 | /// An [Event] that has been persisted to the Event [Store]. 19 | #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] 20 | pub struct Persisted 21 | where 22 | Evt: message::Message, 23 | { 24 | /// The id of the Event Stream the persisted Event belongs to. 25 | pub stream_id: Id, 26 | 27 | /// The version of the Event Stream when this Event has been recorded. 28 | /// 29 | /// This value is used for optimistic concurrency checks, to avoid 30 | /// data races in parallel command evaluations. 31 | /// 32 | /// Check the [Version][version::Version] type and module documentation for more info. 33 | pub version: version::Version, 34 | 35 | /// The actual Domain Event carried by this envelope. 36 | pub event: Envelope, 37 | } 38 | 39 | /// Specifies the slice of the Event Stream to select when calling [`Store::stream`]. 40 | #[derive(Debug, Clone, Copy, PartialEq, Eq)] 41 | pub enum VersionSelect { 42 | /// Selects all [Event][Envelope]s in the Event [Stream]. 43 | All, 44 | 45 | /// Selects all [Event][Envelope]s in the Event [Stream] starting from the [Event] 46 | /// with the specified [Version][version::Version]. 47 | From(version::Version), 48 | } 49 | 50 | /// Stream is a stream of [Persisted] Domain Events. 51 | pub type Stream<'a, Id, Evt, Err> = BoxStream<'a, Result, Err>>; 52 | -------------------------------------------------------------------------------- /eventually/src/event/store.rs: -------------------------------------------------------------------------------- 1 | //! Contains implementations of the [`event::Store`] trait and connected abstractions, 2 | //! such as the [`std::collections::HashMap`]'s based [`InMemory`] Event Store implementation. 3 | 4 | use std::collections::HashMap; 5 | use std::convert::Infallible; 6 | use std::hash::Hash; 7 | use std::sync::{Arc, RwLock}; 8 | 9 | use async_trait::async_trait; 10 | use futures::stream::{iter, StreamExt}; 11 | 12 | use crate::{event, message, version}; 13 | 14 | /// Interface used to stream [Persisted][event::Persisted] Domain Events 15 | /// from an Event Store to an application. 16 | pub trait Streamer: Send + Sync 17 | where 18 | StreamId: Send + Sync, 19 | Event: message::Message + Send + Sync, 20 | { 21 | /// The error type returned by the Store during a [`stream`] call. 22 | type Error: Send + Sync; 23 | 24 | /// Opens an Event Stream, effectively streaming all Domain Events 25 | /// of an Event Stream back in the application. 26 | fn stream( 27 | &self, 28 | id: &StreamId, 29 | select: event::VersionSelect, 30 | ) -> event::Stream; 31 | } 32 | 33 | /// All possible error types returned by [`Appender::append`]. 34 | #[derive(Debug, thiserror::Error)] 35 | pub enum AppendError { 36 | /// Error returned when [`Appender::append`] encounters a conflict error 37 | /// while appending the new Domain Events. 38 | #[error("failed to append new domain events: {0}")] 39 | Conflict(#[from] version::ConflictError), 40 | /// Error returned when the [Appender] implementation has encountered an error. 41 | #[error("failed to append new domain events, an error occurred: {0}")] 42 | Internal(#[from] anyhow::Error), 43 | } 44 | 45 | #[async_trait] 46 | /// Interface used to append new Domain Events in an Event Store. 47 | pub trait Appender: Send + Sync 48 | where 49 | StreamId: Send + Sync, 50 | Event: message::Message + Send + Sync, 51 | { 52 | /// Appens new Domain Events to the specified Event Stream. 53 | /// 54 | /// The result of this operation is the new [Version][version::Version] 55 | /// of the Event Stream with the specified Domain Events added to it. 56 | async fn append( 57 | &self, 58 | id: StreamId, 59 | version_check: version::Check, 60 | events: Vec>, 61 | ) -> Result; 62 | } 63 | 64 | /// An [Event][event::Envelope] Store, used to store Domain Events in Event Streams -- a stream 65 | /// of Domain Events -- and retrieve them. 66 | /// 67 | /// Each Event Stream is represented by a unique Stream identifier. 68 | pub trait Store: 69 | Streamer + Appender + Send + Sync 70 | where 71 | StreamId: Send + Sync, 72 | Event: message::Message + Send + Sync, 73 | { 74 | } 75 | 76 | impl Store for T 77 | where 78 | T: Streamer + Appender + Send + Sync, 79 | StreamId: Send + Sync, 80 | Event: message::Message + Send + Sync, 81 | { 82 | } 83 | 84 | #[derive(Debug)] 85 | struct InMemoryBackend 86 | where 87 | Evt: message::Message, 88 | { 89 | event_streams: HashMap>>, 90 | } 91 | 92 | impl Default for InMemoryBackend 93 | where 94 | Evt: message::Message, 95 | { 96 | fn default() -> Self { 97 | Self { 98 | event_streams: HashMap::default(), 99 | } 100 | } 101 | } 102 | 103 | /// In-memory implementation of [`event::Store`] trait, 104 | /// backed by a thread-safe [`std::collections::HashMap`]. 105 | #[derive(Debug, Clone)] 106 | pub struct InMemory 107 | where 108 | Evt: message::Message, 109 | { 110 | backend: Arc>>, 111 | } 112 | 113 | impl Default for InMemory 114 | where 115 | Evt: message::Message, 116 | { 117 | fn default() -> Self { 118 | Self { 119 | backend: Arc::default(), 120 | } 121 | } 122 | } 123 | 124 | impl Streamer for InMemory 125 | where 126 | Id: Clone + Eq + Hash + Send + Sync, 127 | Evt: message::Message + Clone + Send + Sync, 128 | { 129 | type Error = Infallible; 130 | 131 | fn stream(&self, id: &Id, select: event::VersionSelect) -> event::Stream { 132 | let backend = self 133 | .backend 134 | .read() 135 | .expect("acquire read lock on event store backend"); 136 | 137 | let events = backend 138 | .event_streams 139 | .get(id) 140 | .cloned() 141 | .unwrap_or_default() // NOTE: the new Vec is empty, so there will be no memory allocation! 142 | .into_iter() 143 | .filter(move |evt| match select { 144 | event::VersionSelect::All => true, 145 | event::VersionSelect::From(v) => evt.version >= v, 146 | }); 147 | 148 | iter(events).map(Ok).boxed() 149 | } 150 | } 151 | 152 | #[async_trait] 153 | impl Appender for InMemory 154 | where 155 | Id: Clone + Eq + Hash + Send + Sync, 156 | Evt: message::Message + Clone + Send + Sync, 157 | { 158 | async fn append( 159 | &self, 160 | id: Id, 161 | version_check: version::Check, 162 | events: Vec>, 163 | ) -> Result { 164 | let mut backend = self 165 | .backend 166 | .write() 167 | .expect("acquire write lock on event store backend"); 168 | 169 | let last_event_stream_version = backend 170 | .event_streams 171 | .get(&id) 172 | .and_then(|events| events.last()) 173 | .map(|event| event.version) 174 | .unwrap_or_default(); 175 | 176 | if let version::Check::MustBe(expected) = version_check { 177 | if last_event_stream_version != expected { 178 | return Err(AppendError::Conflict(version::ConflictError { 179 | expected, 180 | actual: last_event_stream_version, 181 | })); 182 | } 183 | } 184 | 185 | let mut persisted_events: Vec> = events 186 | .into_iter() 187 | .enumerate() 188 | .map(|(i, event)| event::Persisted { 189 | stream_id: id.clone(), 190 | version: last_event_stream_version + (i as u64) + 1, 191 | event, 192 | }) 193 | .collect(); 194 | 195 | let new_last_event_stream_version = persisted_events 196 | .last() 197 | .map(|evt| evt.version) 198 | .unwrap_or_default(); 199 | 200 | backend 201 | .event_streams 202 | .entry(id) 203 | .and_modify(|events| events.append(&mut persisted_events)) 204 | .or_insert_with(|| persisted_events); 205 | 206 | Ok(new_last_event_stream_version) 207 | } 208 | } 209 | 210 | /// Decorator type for an [`event::Store`] implementation that tracks the list of 211 | /// recorded Domain Events through it. 212 | /// 213 | /// Useful for testing purposes, i.e. asserting that Domain Events written throguh 214 | /// this Event Store instance are the ones expected. 215 | #[derive(Debug, Clone)] 216 | pub struct Tracking 217 | where 218 | T: Store + Send + Sync, 219 | StreamId: Send + Sync, 220 | Event: message::Message + Send + Sync, 221 | { 222 | store: T, 223 | 224 | #[allow(clippy::type_complexity)] // It is a complex type but still readable. 225 | events: Arc>>>, 226 | } 227 | 228 | impl Tracking 229 | where 230 | T: Store + Send + Sync, 231 | StreamId: Clone + Send + Sync, 232 | Event: message::Message + Clone + Send + Sync, 233 | { 234 | /// Returns the list of recoded Domain Events through this decorator so far. 235 | /// 236 | /// # Panics 237 | /// 238 | /// Since the internal data is thread-safe through an [`RwLock`], this method 239 | /// could potentially panic while attempting to get a read-only lock on the data recorded. 240 | pub fn recorded_events(&self) -> Vec> { 241 | self.events 242 | .read() 243 | .expect("acquire lock on recorded events list") 244 | .clone() 245 | } 246 | 247 | /// Resets the list of recorded Domain Events through this decorator. 248 | /// 249 | /// # Panics 250 | /// 251 | /// Since the internal data is thread-safe through an [`RwLock`], this method 252 | /// could potentially panic while attempting to get a read-write lock to empty the internal store. 253 | pub fn reset_recorded_events(&self) { 254 | self.events 255 | .write() 256 | .expect("acquire lock on recorded events list") 257 | .clear(); 258 | } 259 | } 260 | 261 | impl Streamer for Tracking 262 | where 263 | T: Store + Send + Sync, 264 | StreamId: Clone + Send + Sync, 265 | Event: message::Message + Clone + Send + Sync, 266 | { 267 | type Error = >::Error; 268 | 269 | fn stream( 270 | &self, 271 | id: &StreamId, 272 | select: event::VersionSelect, 273 | ) -> event::Stream { 274 | self.store.stream(id, select) 275 | } 276 | } 277 | 278 | #[async_trait] 279 | impl Appender for Tracking 280 | where 281 | T: Store + Send + Sync, 282 | StreamId: Clone + Send + Sync, 283 | Event: message::Message + Clone + Send + Sync, 284 | { 285 | async fn append( 286 | &self, 287 | id: StreamId, 288 | version_check: version::Check, 289 | events: Vec>, 290 | ) -> Result { 291 | let new_version = self 292 | .store 293 | .append(id.clone(), version_check, events.clone()) 294 | .await?; 295 | 296 | let events_size = events.len(); 297 | let previous_version = new_version - (events_size as version::Version); 298 | 299 | let mut persisted_events = events 300 | .into_iter() 301 | .enumerate() 302 | .map(|(i, event)| event::Persisted { 303 | stream_id: id.clone(), 304 | version: previous_version + (i as version::Version) + 1, 305 | event, 306 | }) 307 | .collect(); 308 | 309 | self.events 310 | .write() 311 | .expect("acquire lock on recorded events list") 312 | .append(&mut persisted_events); 313 | 314 | Ok(new_version) 315 | } 316 | } 317 | 318 | /// Extension trait that can be used to pull in supertypes implemented 319 | /// in this module. 320 | pub trait EventStoreExt: Store + Send + Sync + Sized 321 | where 322 | StreamId: Clone + Send + Sync, 323 | Event: message::Message + Clone + Send + Sync, 324 | { 325 | /// Returns a [`Tracking`] instance that decorates the original [`event::Store`] 326 | /// instanca this method has been called on. 327 | fn with_recorded_events_tracking(self) -> Tracking { 328 | Tracking { 329 | store: self, 330 | events: Arc::default(), 331 | } 332 | } 333 | } 334 | 335 | impl EventStoreExt for T 336 | where 337 | T: Store + Send + Sync, 338 | StreamId: Clone + Send + Sync, 339 | Event: message::Message + Clone + Send + Sync, 340 | { 341 | } 342 | 343 | #[allow(clippy::semicolon_if_nothing_returned)] // False positives :shrugs: 344 | #[cfg(test)] 345 | mod test { 346 | use std::sync::LazyLock; 347 | 348 | use futures::TryStreamExt; 349 | 350 | use super::*; 351 | use crate::event; 352 | use crate::event::store::{Appender, Streamer}; 353 | use crate::message::tests::StringMessage; 354 | use crate::version::Version; 355 | 356 | const STREAM_ID: &str = "stream:test"; 357 | 358 | static EVENTS: LazyLock>> = LazyLock::new(|| { 359 | vec![ 360 | event::Envelope::from(StringMessage("event-1")), 361 | event::Envelope::from(StringMessage("event-2")), 362 | event::Envelope::from(StringMessage("event-3")), 363 | ] 364 | }); 365 | 366 | #[tokio::test] 367 | async fn it_works() { 368 | let event_store = InMemory::<&'static str, StringMessage>::default(); 369 | 370 | let new_event_stream_version = event_store 371 | .append(STREAM_ID, version::Check::MustBe(0), EVENTS.clone()) 372 | .await 373 | .expect("append should not fail"); 374 | 375 | let expected_version = EVENTS.len() as Version; 376 | assert_eq!(expected_version, new_event_stream_version); 377 | 378 | let expected_events = EVENTS 379 | .clone() 380 | .into_iter() 381 | .enumerate() 382 | .map(|(i, event)| event::Persisted { 383 | stream_id: STREAM_ID, 384 | version: (i as Version) + 1, 385 | event, 386 | }) 387 | .collect::>(); 388 | 389 | let event_stream: Vec<_> = event_store 390 | .stream(&STREAM_ID, event::VersionSelect::All) 391 | .try_collect() 392 | .await 393 | .expect("opening an event stream should not fail"); 394 | 395 | assert_eq!(expected_events, event_stream); 396 | } 397 | 398 | #[tokio::test] 399 | async fn tracking_store_works() { 400 | let event_store = InMemory::<&'static str, StringMessage>::default(); 401 | let tracking_event_store = event_store.with_recorded_events_tracking(); 402 | 403 | tracking_event_store 404 | .append(STREAM_ID, version::Check::MustBe(0), EVENTS.clone()) 405 | .await 406 | .expect("append should not fail"); 407 | 408 | let event_stream: Vec<_> = tracking_event_store 409 | .stream(&STREAM_ID, event::VersionSelect::All) 410 | .try_collect() 411 | .await 412 | .expect("opening an event stream should not fail"); 413 | 414 | assert_eq!(event_stream, tracking_event_store.recorded_events()); 415 | } 416 | 417 | #[tokio::test] 418 | async fn version_conflict_checks_work_as_expected() { 419 | let event_store = InMemory::<&'static str, StringMessage>::default(); 420 | 421 | let append_error = event_store 422 | .append(STREAM_ID, version::Check::MustBe(3), EVENTS.clone()) 423 | .await 424 | .expect_err("the event stream version should be zero"); 425 | 426 | if let AppendError::Conflict(err) = append_error { 427 | return assert_eq!( 428 | version::ConflictError { 429 | expected: 3, 430 | actual: 0, 431 | }, 432 | err 433 | ); 434 | } 435 | 436 | panic!("expected conflict error, received: {append_error}") 437 | } 438 | } 439 | -------------------------------------------------------------------------------- /eventually/src/lib.rs: -------------------------------------------------------------------------------- 1 | //! `eventually` is a crate that helps you apply different patterns to your Rust 2 | //! application domain code, such as: Event Sourcing, Aggregate Root, Outbox Pattern, 3 | //! and so on. 4 | 5 | #![deny(unsafe_code, unused_qualifications, trivial_casts, missing_docs)] 6 | #![deny(clippy::all, clippy::pedantic, clippy::cargo)] 7 | 8 | pub mod aggregate; 9 | pub mod command; 10 | pub mod event; 11 | pub mod message; 12 | pub mod query; 13 | pub mod serde; 14 | #[cfg(feature = "tracing")] 15 | pub mod tracing; 16 | pub mod version; 17 | -------------------------------------------------------------------------------- /eventually/src/message.rs: -------------------------------------------------------------------------------- 1 | //! This module contains the definition of a [Message] type, which 2 | //! can be used to describe some sort of domain value such as a [Domain Event][crate::event::Envelope], 3 | //! a [Domain Command][crate::command::Envelope], and so on. 4 | 5 | use std::collections::HashMap; 6 | 7 | use serde::{Deserialize, Serialize}; 8 | 9 | /// Represents a piece of domain data that occurs in the system. 10 | /// 11 | /// Each Message has a specific name to it, which should ideally be 12 | /// unique within the domain you're operating in. Example: a Domain Event 13 | /// that represents when an Order was created can have a `name()`: `"OrderWasCreated"`. 14 | pub trait Message { 15 | /// Returns the domain name of the [Message]. 16 | fn name(&self) -> &'static str; 17 | } 18 | 19 | /// Optional metadata to attach to an [Envelope] to provide additional context 20 | /// to the [Message] carried out. 21 | pub type Metadata = HashMap; 22 | 23 | /// Represents a [Message] packaged for persistance and/or processing by other 24 | /// parts of the system. 25 | /// 26 | /// It carries both the actual message (i.e. a payload) and some optional [Metadata]. 27 | #[derive(Debug, Clone, Serialize, Deserialize)] 28 | pub struct Envelope 29 | where 30 | T: Message, 31 | { 32 | /// The message payload. 33 | pub message: T, 34 | /// Optional metadata to provide additional context to the message. 35 | pub metadata: Metadata, 36 | } 37 | 38 | impl Envelope 39 | where 40 | T: Message, 41 | { 42 | /// Adds a new entry in the [Envelope]'s [Metadata]. 43 | #[must_use] 44 | pub fn with_metadata(mut self, key: String, value: String) -> Self { 45 | self.metadata.insert(key, value); 46 | self 47 | } 48 | } 49 | 50 | impl From for Envelope 51 | where 52 | T: Message, 53 | { 54 | fn from(message: T) -> Self { 55 | Envelope { 56 | message, 57 | metadata: Metadata::default(), 58 | } 59 | } 60 | } 61 | 62 | impl PartialEq for Envelope 63 | where 64 | T: Message + PartialEq, 65 | { 66 | fn eq(&self, other: &Envelope) -> bool { 67 | self.message == other.message 68 | } 69 | } 70 | 71 | #[cfg(test)] 72 | pub(crate) mod tests { 73 | use super::*; 74 | 75 | #[derive(Debug, Clone, Copy, PartialEq, Eq)] 76 | pub(crate) struct StringMessage(pub(crate) &'static str); 77 | 78 | impl Message for StringMessage { 79 | fn name(&self) -> &'static str { 80 | "string_payload" 81 | } 82 | } 83 | 84 | #[test] 85 | fn message_with_metadata_does_not_affect_equality() { 86 | let message = Envelope { 87 | message: StringMessage("hello"), 88 | metadata: Metadata::default(), 89 | }; 90 | 91 | let new_message = message 92 | .clone() 93 | .with_metadata("hello_world".into(), "test".into()) 94 | .with_metadata("test_number".into(), 1.to_string()); 95 | 96 | println!("Message: {message:?}"); 97 | println!("New message: {new_message:?}"); 98 | 99 | // Metadata does not affect equality of message. 100 | assert_eq!(message, new_message); 101 | } 102 | } 103 | -------------------------------------------------------------------------------- /eventually/src/query.rs: -------------------------------------------------------------------------------- 1 | //! Module `query` contains types and helpful abstractions to model Domain Queries 2 | //! and implement Domain Query Handlers. 3 | 4 | use async_trait::async_trait; 5 | use futures::Future; 6 | 7 | use crate::message; 8 | 9 | /// A [Message][message::Message] carrying the Domain Query itself as payload 10 | /// and other relevant information as metadata. 11 | pub type Envelope = message::Envelope; 12 | 13 | /// An Handler describes an implementation that is able to handle specific [Queries][Envelope]. 14 | /// 15 | /// The Handler evaluates the Domain Query and produces a **result**, here described 16 | /// through the [Output][Handler::Output] associated type. 17 | #[async_trait] 18 | pub trait Handler: Send + Sync 19 | where 20 | T: message::Message + Send + Sync, 21 | { 22 | /// The result type the Handler produces when evaluating a Query. 23 | type Output: Send + Sync; 24 | /// The error type returned by the Handler when Query evaluation fails. 25 | type Error: Send + Sync; 26 | 27 | /// Evaluates the [Query][Envelope] provided and returns a result type, 28 | /// described by the [Output][Handler::Output] parameter. 29 | /// 30 | /// # Errors 31 | /// 32 | /// As the Handler can fail to evaluate the Query, an [Error][Handler::Error] 33 | /// can be returned instead. 34 | async fn handle(&self, query: Envelope) -> Result; 35 | } 36 | 37 | #[async_trait] 38 | impl Handler for F 39 | where 40 | T: message::Message + Send + Sync + 'static, 41 | R: Send + Sync, 42 | Err: Send + Sync, 43 | F: Send + Sync + Fn(Envelope) -> Fut, 44 | Fut: Send + Sync + Future>, 45 | { 46 | type Output = R; 47 | type Error = Err; 48 | 49 | async fn handle(&self, command: Envelope) -> Result { 50 | self(command).await 51 | } 52 | } 53 | -------------------------------------------------------------------------------- /eventually/src/serde.rs: -------------------------------------------------------------------------------- 1 | //! This module provides traits and implementations for serialization and 2 | //! deserialization, allowing you to convert Rust data structures to and from 3 | //! different formats like JSON, Protobuf, etc. 4 | 5 | use std::fmt::Display; 6 | use std::marker::PhantomData; 7 | 8 | use anyhow::anyhow; 9 | #[cfg(feature = "serde-prost")] 10 | use prost::bytes::Bytes; 11 | #[cfg(feature = "serde-json")] 12 | use serde::{Deserialize, Serialize}; 13 | 14 | /// A serializer interface that can be used to serialize a Rust data type 15 | /// into a specific wire format as a byte array. 16 | pub trait Serializer: Send + Sync { 17 | /// Serializes the given value into the protocol supported by this implementation. 18 | /// 19 | /// # Errors 20 | /// 21 | /// An error ([`anyhow::Error`]) is returned in case the serialization could not 22 | /// succeed as expected. 23 | fn serialize(&self, value: T) -> anyhow::Result>; 24 | } 25 | 26 | /// A deserializer interface that can be used to deserialize a byte array 27 | /// into an instance of a specific Rust data type from a specific wire format. 28 | pub trait Deserializer: Send + Sync { 29 | /// Deserializes the given value from a message encoded in the wire format 30 | /// supported by this implementation. 31 | /// 32 | /// # Errors 33 | /// 34 | /// An error ([`anyhow::Error`]) is returned in case the deserialization could not 35 | /// succeed as expected. 36 | fn deserialize(&self, data: &[u8]) -> anyhow::Result; 37 | } 38 | 39 | /// [Serializer] and [Deserializer] that can be used to serialize into and deserialize 40 | /// from a given type into a specific wire format, such as JSON, Protobuf, etc. 41 | pub trait Serde: Serializer + Deserializer + Send + Sync {} 42 | 43 | impl Serde for S where S: Serializer + Deserializer {} 44 | 45 | /// Implements the [Serde] trait to translate between two different types, 46 | /// and using the specified [Serde] for serialization and deserialization 47 | /// using the new `Out` type. 48 | #[derive(Clone, Copy)] 49 | pub struct Convert 50 | where 51 | In: Send + Sync, 52 | Out: Send + Sync, 53 | S: Serde + Send + Sync, 54 | { 55 | serde: S, 56 | inn: PhantomData, 57 | out: PhantomData, 58 | } 59 | 60 | impl Convert 61 | where 62 | In: Send + Sync, 63 | Out: Send + Sync, 64 | S: Serde + Send + Sync, 65 | { 66 | /// Creates a new [Convert] serde instance. 67 | pub fn new(serde: S) -> Self { 68 | Self { 69 | serde, 70 | inn: PhantomData, 71 | out: PhantomData, 72 | } 73 | } 74 | } 75 | 76 | impl Serializer for Convert 77 | where 78 | In: TryFrom + Send + Sync, 79 | Out: TryFrom + Send + Sync, 80 | >::Error: Display, 81 | S: Serde + Send + Sync, 82 | { 83 | fn serialize(&self, value: In) -> anyhow::Result> { 84 | self.serde.serialize( 85 | value 86 | .try_into() 87 | .map_err(|err| anyhow!("failed to convert type values: {}", err))?, 88 | ) 89 | } 90 | } 91 | 92 | impl Deserializer for Convert 93 | where 94 | In: TryFrom + Send + Sync, 95 | Out: TryFrom + Send + Sync, 96 | >::Error: Display, 97 | S: Serde + Send + Sync, 98 | { 99 | fn deserialize(&self, data: &[u8]) -> anyhow::Result { 100 | let inn = self.serde.deserialize(data)?; 101 | 102 | inn.try_into() 103 | .map_err(|err| anyhow!("failed to convert type values: {}", err)) 104 | } 105 | } 106 | 107 | /// Implements the [Serializer] and [Deserializer] traits, which use the [serde] crate 108 | /// to serialize and deserialize a message into JSON. 109 | #[cfg(feature = "serde-json")] 110 | #[derive(Debug, Clone, Copy)] 111 | pub struct Json(PhantomData) 112 | where 113 | T: Serialize + Send + Sync, 114 | for<'d> T: Deserialize<'d>; 115 | 116 | #[cfg(feature = "serde-json")] 117 | impl Default for Json 118 | where 119 | T: Serialize + Send + Sync, 120 | for<'d> T: Deserialize<'d>, 121 | { 122 | fn default() -> Self { 123 | Self(PhantomData) 124 | } 125 | } 126 | 127 | #[cfg(feature = "serde-json")] 128 | impl Serializer for Json 129 | where 130 | T: Serialize + Send + Sync, 131 | for<'d> T: Deserialize<'d>, 132 | { 133 | fn serialize(&self, value: T) -> anyhow::Result> { 134 | serde_json::to_vec(&value) 135 | .map_err(|err| anyhow!("failed to serialize value to json: {}", err)) 136 | } 137 | } 138 | 139 | #[cfg(feature = "serde-json")] 140 | impl Deserializer for Json 141 | where 142 | T: Serialize + Send + Sync, 143 | for<'d> T: Deserialize<'d>, 144 | { 145 | fn deserialize(&self, data: &[u8]) -> anyhow::Result { 146 | serde_json::from_slice(data) 147 | .map_err(|err| anyhow!("failed to deserialize value from json: {}", err)) 148 | } 149 | } 150 | 151 | /// Implements the [Serde] trait which serializes and deserializes 152 | /// the message using Protobuf format through the [`prost::Message`] trait. 153 | #[cfg(feature = "serde-prost")] 154 | #[derive(Debug, Clone, Copy, Default)] 155 | pub struct Protobuf(PhantomData) 156 | where 157 | T: prost::Message + Default; 158 | 159 | #[cfg(feature = "serde-prost")] 160 | impl Serializer for Protobuf 161 | where 162 | T: prost::Message + Default, 163 | { 164 | fn serialize(&self, value: T) -> anyhow::Result> { 165 | Ok(value.encode_to_vec()) 166 | } 167 | } 168 | 169 | #[cfg(feature = "serde-prost")] 170 | impl Deserializer for Protobuf 171 | where 172 | T: prost::Message + Default, 173 | { 174 | fn deserialize(&self, data: &[u8]) -> anyhow::Result { 175 | let buf = Bytes::copy_from_slice(data); 176 | 177 | T::decode(buf) 178 | .map_err(|err| anyhow!("failed to deserialize protobuf message into value: {}", err)) 179 | } 180 | } 181 | 182 | /// Implementation of [Serde] traits that uses [ProtoJson](https://protobuf.dev/programming-guides/proto3/#json) 183 | /// as wire protocol. 184 | #[cfg(feature = "serde-prost")] 185 | #[cfg(feature = "serde-json")] 186 | #[derive(Clone, Copy, Default)] 187 | pub struct ProtoJson(PhantomData) 188 | where 189 | T: prost::Message + Serialize + Default, 190 | for<'de> T: Deserialize<'de>; 191 | 192 | #[cfg(feature = "serde-prost")] 193 | #[cfg(feature = "serde-json")] 194 | impl Serializer for ProtoJson 195 | where 196 | T: prost::Message + Serialize + Default, 197 | for<'de> T: Deserialize<'de>, 198 | { 199 | fn serialize(&self, value: T) -> anyhow::Result> { 200 | Json::::default().serialize(value) 201 | } 202 | } 203 | 204 | #[cfg(feature = "serde-prost")] 205 | #[cfg(feature = "serde-json")] 206 | impl Deserializer for ProtoJson 207 | where 208 | T: prost::Message + Serialize + Default, 209 | for<'de> T: Deserialize<'de>, 210 | { 211 | fn deserialize(&self, data: &[u8]) -> anyhow::Result { 212 | Json::::default().deserialize(data) 213 | } 214 | } 215 | -------------------------------------------------------------------------------- /eventually/src/tracing.rs: -------------------------------------------------------------------------------- 1 | //! Module containing some extension traits to support code instrumentation 2 | //! using the `tracing` crate. 3 | 4 | use std::fmt::Debug; 5 | use std::marker::PhantomData; 6 | 7 | use async_trait::async_trait; 8 | use tracing::instrument; 9 | 10 | use crate::aggregate::Aggregate; 11 | use crate::version::{self, Version}; 12 | use crate::{aggregate, event, message}; 13 | 14 | /// [`aggregate::Repository`] type wrapper that provides instrumentation 15 | /// features through the `tracing` crate. 16 | #[derive(Debug, Clone)] 17 | pub struct InstrumentedAggregateRepository 18 | where 19 | T: Aggregate + Debug, 20 | ::Id: Debug, 21 | ::Event: Debug, 22 | Inner: aggregate::Repository, 23 | { 24 | inner: Inner, 25 | t: PhantomData, 26 | } 27 | 28 | #[async_trait] 29 | impl aggregate::repository::Getter for InstrumentedAggregateRepository 30 | where 31 | T: Aggregate + Debug, 32 | ::Id: Debug, 33 | ::Event: Debug, 34 | Inner: aggregate::Repository, 35 | { 36 | #[allow(clippy::blocks_in_conditions)] // NOTE(ar3s3ru): seems to be a false positive. 37 | #[instrument(name = "aggregate::repository::Getter.get", ret, err, skip(self))] 38 | async fn get(&self, id: &T::Id) -> Result, aggregate::repository::GetError> { 39 | self.inner.get(id).await 40 | } 41 | } 42 | 43 | #[async_trait] 44 | impl aggregate::repository::Saver for InstrumentedAggregateRepository 45 | where 46 | T: Aggregate + Debug, 47 | ::Id: Debug, 48 | ::Event: Debug, 49 | Inner: aggregate::Repository, 50 | { 51 | #[allow(clippy::blocks_in_conditions)] // NOTE(ar3s3ru): seems to be a false positive. 52 | #[instrument(name = "aggregate::repository::Saver.save", ret, err, skip(self))] 53 | async fn save( 54 | &self, 55 | root: &mut aggregate::Root, 56 | ) -> Result<(), aggregate::repository::SaveError> { 57 | self.inner.save(root).await 58 | } 59 | } 60 | 61 | /// Extension trait for any [`aggregate::Repository`] type to provide 62 | /// instrumentation features through the `tracing` crate. 63 | pub trait AggregateRepositoryExt: aggregate::Repository + Sized 64 | where 65 | T: Aggregate + Debug, 66 | ::Id: Debug, 67 | ::Event: Debug, 68 | { 69 | /// Returns an instrumented version of the [`aggregate::Repository`] instance. 70 | fn with_tracing(self) -> InstrumentedAggregateRepository { 71 | InstrumentedAggregateRepository { 72 | inner: self, 73 | t: PhantomData, 74 | } 75 | } 76 | } 77 | 78 | impl AggregateRepositoryExt for R 79 | where 80 | R: aggregate::Repository, 81 | T: Aggregate + Debug, 82 | ::Id: Debug, 83 | ::Event: Debug, 84 | { 85 | } 86 | 87 | /// [`event::Store`] type wrapper that provides instrumentation 88 | /// features through the `tracing` crate. 89 | #[derive(Debug, Clone)] 90 | pub struct InstrumentedEventStore 91 | where 92 | T: event::Store + Send + Sync, 93 | StreamId: Debug + Send + Sync, 94 | Event: message::Message + Debug + Send + Sync, 95 | { 96 | store: T, 97 | stream_id: PhantomData, 98 | event: PhantomData, 99 | } 100 | 101 | impl event::store::Streamer 102 | for InstrumentedEventStore 103 | where 104 | T: event::Store + Send + Sync, 105 | StreamId: Debug + Send + Sync, 106 | Event: message::Message + Debug + Send + Sync, 107 | { 108 | type Error = >::Error; 109 | 110 | #[instrument(name = "event::Store.stream", skip(self))] 111 | fn stream( 112 | &self, 113 | id: &StreamId, 114 | select: event::VersionSelect, 115 | ) -> event::Stream { 116 | self.store.stream(id, select) 117 | } 118 | } 119 | 120 | #[async_trait] 121 | impl event::store::Appender 122 | for InstrumentedEventStore 123 | where 124 | T: event::Store + Send + Sync, 125 | StreamId: Debug + Send + Sync, 126 | Event: message::Message + Debug + Send + Sync, 127 | { 128 | #[allow(clippy::blocks_in_conditions)] // NOTE(ar3s3ru): seems to be a false positive. 129 | #[instrument(name = "event::Store.append", ret, err, skip(self))] 130 | async fn append( 131 | &self, 132 | id: StreamId, 133 | version_check: version::Check, 134 | events: Vec>, 135 | ) -> Result { 136 | self.store.append(id, version_check, events).await 137 | } 138 | } 139 | 140 | /// Extension trait for any [`event::Store`] type to provide 141 | /// instrumentation features through the `tracing` crate. 142 | pub trait EventStoreExt: event::Store + Sized 143 | where 144 | StreamId: Debug + Send + Sync, 145 | Event: message::Message + Debug + Send + Sync, 146 | { 147 | /// Returns an instrumented version of the [`event::Store`] instance. 148 | fn with_tracing(self) -> InstrumentedEventStore { 149 | InstrumentedEventStore { 150 | store: self, 151 | stream_id: PhantomData, 152 | event: PhantomData, 153 | } 154 | } 155 | } 156 | 157 | impl EventStoreExt for T 158 | where 159 | T: event::Store + Send + Sync, 160 | StreamId: Debug + Send + Sync, 161 | Event: message::Message + Debug + Send + Sync, 162 | { 163 | } 164 | -------------------------------------------------------------------------------- /eventually/src/version.rs: -------------------------------------------------------------------------------- 1 | //! Contains the types necessary for Optimistic Locking through versioning. 2 | 3 | /// A version used for Optimistic Locking. 4 | /// 5 | /// Used by the [`aggregate::Root`][crate::aggregate::Root] to avoid concurrency issues, 6 | /// and [`event::Store`][crate::event::Store] to implement stream-local ordering to the messages. 7 | pub type Version = u64; 8 | 9 | /// Used to set a specific expectation during an operation 10 | /// that mutates some sort of resource (e.g. an [Event Stream][crate::event::Stream]) 11 | /// that supports versioning. 12 | /// 13 | /// It allows for optimistic locking, avoiding data races 14 | /// when modifying the same resource at the same time. 15 | #[derive(Debug, Clone, Copy, PartialEq, Eq)] 16 | pub enum Check { 17 | /// Disables any kind of optimistic locking check, allowing 18 | /// for any [Version] to be used compared to the new one. 19 | Any, 20 | /// Expects that the previous [Version] used for the operation 21 | /// must have the value specified. 22 | MustBe(Version), 23 | } 24 | 25 | /// This error is returned by a function when a version conflict error has 26 | /// been detected. 27 | #[derive(Debug, Clone, Copy, PartialEq, Eq, thiserror::Error)] 28 | #[error("conflict error detected, expected version was: {expected}, found: {actual}")] 29 | pub struct ConflictError { 30 | /// The [Version] value that was expected when calling the function that failed. 31 | pub expected: Version, 32 | 33 | /// The actual [Version] value, which mismatch caused this error. 34 | pub actual: Version, 35 | } 36 | -------------------------------------------------------------------------------- /examples/bank-accounting/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "bank-accounting" 3 | version = "0.1.0" 4 | edition = "2021" 5 | readme = "README.md" 6 | publish = false 7 | 8 | [dependencies] 9 | anyhow = "1.0.97" 10 | async-trait = "0.1.77" 11 | eventually = { path = "../../eventually", features = [ 12 | "serde-prost", 13 | "tracing", 14 | ] } 15 | eventually-macros = { path = "../../eventually-macros" } 16 | eventually-postgres = { path = "../../eventually-postgres" } 17 | opentelemetry = "0.21.0" 18 | opentelemetry-otlp = "0.14.0" 19 | opentelemetry_sdk = { version = "0.21.2", features = ["rt-tokio"] } 20 | prost = "0.13.5" 21 | rust_decimal = "1.34.3" 22 | sqlx = { version = "0.8.3", features = ["runtime-tokio-rustls", "postgres"] } 23 | thiserror = "2.0.12" 24 | tokio = { version = "1.36.0", features = ["macros", "rt-multi-thread"] } 25 | tonic = { version = "0.12.3", features = ["gzip", "transport"] } 26 | tonic-health = "0.12.3" 27 | tonic-reflection = "0.12.3" 28 | tower = "0.4.13" 29 | tracing = "0.1.40" 30 | tracing-opentelemetry = "0.22.0" 31 | tracing-subscriber = { version = "0.3.18", features = [ 32 | "fmt", 33 | "std", 34 | "registry", 35 | "env-filter", 36 | ] } 37 | 38 | [dev-dependencies] 39 | 40 | [build-dependencies] 41 | tonic-build = { version = "0.12.3", features = ["prost"] } 42 | -------------------------------------------------------------------------------- /examples/bank-accounting/README.md: -------------------------------------------------------------------------------- 1 | # Example: Bank Accounting application 2 | 3 | In this folder you can find an example Event-sourced application for a generic (and simple) Bank Accounting bounded context, implemented using the `eventually` crate. 4 | 5 | This example application should be useful for people interested in: 6 | 1. Having a possible reference as to model a Domain Layer using the crate, 7 | 2. Starting out from a reference package and code structure, 8 | 3. Modeling Business Processes using Projections/Event Subscriptions. 9 | -------------------------------------------------------------------------------- /examples/bank-accounting/build.rs: -------------------------------------------------------------------------------- 1 | use std::env; 2 | use std::path::PathBuf; 3 | 4 | fn main() { 5 | let out_dir = PathBuf::from(env::var("OUT_DIR").unwrap()); 6 | 7 | tonic_build::configure() 8 | .file_descriptor_set_path(out_dir.join("bankaccouting_descriptor.bin")) 9 | .build_server(true) 10 | .build_client(false) 11 | .compile_protos( 12 | &["proto/bank_accounting.proto", "proto/bank_account.proto"], 13 | &["proto"], 14 | ) 15 | .unwrap(); 16 | } 17 | -------------------------------------------------------------------------------- /examples/bank-accounting/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: "3" 2 | services: 3 | event-store: 4 | image: postgres:13 5 | restart: on-failure 6 | command: 7 | ["postgres", "-c", "log_statement=all", "-c", "log_destination=stderr"] 8 | ports: 9 | - "5432:5432" 10 | environment: 11 | POSTGRES_PASSWORD: password 12 | 13 | jaeger: 14 | image: jaegertracing/all-in-one:latest 15 | ports: 16 | - "6831:6831/udp" 17 | - "6832:6832/udp" 18 | - "16686:16686" 19 | -------------------------------------------------------------------------------- /examples/bank-accounting/proto/bank_account.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package bankaccount; 4 | 5 | message Transaction { 6 | string id = 1; 7 | string beneficiary_account_id = 2; 8 | float amount = 3; 9 | }; 10 | 11 | message Event { 12 | message WasOpened { 13 | string id = 1; 14 | string account_holder_id = 2; 15 | float initial_balance = 3; 16 | }; 17 | 18 | message DepositWasRecorded { 19 | float amount = 1; 20 | }; 21 | 22 | message TransferWasSent { 23 | Transaction transaction = 1; 24 | optional string msg = 2; 25 | }; 26 | 27 | message TransferWasReceived { 28 | Transaction transaction = 1; 29 | optional string msg = 2; 30 | }; 31 | 32 | message TransferWasDeclined { 33 | string transaction_id = 1; 34 | optional string reason = 2; 35 | }; 36 | 37 | message TransferWasConfirmed { 38 | string transaction_id = 1; 39 | }; 40 | 41 | message WasClosed {}; 42 | 43 | message WasReopened { 44 | float reopening_balance = 1; 45 | }; 46 | 47 | oneof event { 48 | WasOpened was_opened = 1; 49 | DepositWasRecorded deposit_was_recorded = 2; 50 | TransferWasSent transfer_was_sent = 3; 51 | TransferWasReceived transfer_was_received = 4; 52 | TransferWasConfirmed transfer_was_confimed = 5; 53 | TransferWasDeclined transfer_was_declined = 6; 54 | WasClosed was_closed = 7; 55 | WasReopened was_reopened = 8; 56 | }; 57 | }; 58 | -------------------------------------------------------------------------------- /examples/bank-accounting/proto/bank_accounting.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package bankaccounting; 4 | 5 | service BankAccounting { 6 | rpc OpenBankAccount(OpenBankAccountRequest) returns (OpenBankAccountResponse) {} 7 | rpc DepositInBankAccount(DepositInBankAccountRequest) returns (DepositInBankAccountResponse) {} 8 | } 9 | 10 | message OpenBankAccountRequest { 11 | string bank_account_id = 1; 12 | string bank_account_holder_id = 2; 13 | float opening_balance = 3; 14 | } 15 | 16 | message OpenBankAccountResponse {} 17 | 18 | message DepositInBankAccountRequest { 19 | string bank_account_id = 1; 20 | float amount = 2; 21 | } 22 | 23 | message DepositInBankAccountResponse {} 24 | -------------------------------------------------------------------------------- /examples/bank-accounting/src/application.rs: -------------------------------------------------------------------------------- 1 | use std::sync::Arc; 2 | 3 | use async_trait::async_trait; 4 | use eventually::{aggregate, command, message}; 5 | use rust_decimal::Decimal; 6 | 7 | use crate::domain::{ 8 | BankAccount, BankAccountHolderId, BankAccountId, BankAccountRoot, Transaction, 9 | }; 10 | 11 | #[derive(Clone)] 12 | pub struct Service { 13 | repository: Arc>, 14 | } 15 | 16 | impl From for Service 17 | where 18 | R: aggregate::Repository + 'static, 19 | { 20 | fn from(repository: R) -> Self { 21 | Self { 22 | repository: Arc::new(repository), 23 | } 24 | } 25 | } 26 | 27 | #[derive(Debug, Clone, PartialEq, Eq)] 28 | pub struct OpenBankAccount { 29 | pub bank_account_id: BankAccountId, 30 | pub bank_account_holder_id: BankAccountHolderId, 31 | pub opening_balance: Option, 32 | } 33 | 34 | impl message::Message for OpenBankAccount { 35 | fn name(&self) -> &'static str { 36 | "OpenBankAccount" 37 | } 38 | } 39 | 40 | #[async_trait] 41 | impl command::Handler for Service { 42 | type Error = anyhow::Error; 43 | 44 | async fn handle(&self, command: command::Envelope) -> Result<(), Self::Error> { 45 | let command = command.message; 46 | 47 | let mut bank_account = BankAccountRoot::open( 48 | command.bank_account_id, 49 | command.bank_account_holder_id, 50 | command.opening_balance, 51 | )?; 52 | 53 | self.repository.save(&mut bank_account).await?; 54 | 55 | Ok(()) 56 | } 57 | } 58 | 59 | #[derive(Debug, Clone, PartialEq, Eq)] 60 | pub struct DepositInBankAccount { 61 | pub bank_account_id: BankAccountId, 62 | pub amount: Decimal, 63 | } 64 | 65 | impl message::Message for DepositInBankAccount { 66 | fn name(&self) -> &'static str { 67 | "DepositInBankAccount" 68 | } 69 | } 70 | 71 | #[async_trait] 72 | impl command::Handler for Service { 73 | type Error = anyhow::Error; 74 | 75 | async fn handle( 76 | &self, 77 | command: command::Envelope, 78 | ) -> Result<(), Self::Error> { 79 | let command = command.message; 80 | 81 | let mut bank_account: BankAccountRoot = 82 | self.repository.get(&command.bank_account_id).await?.into(); 83 | 84 | bank_account.deposit(command.amount)?; 85 | 86 | self.repository.save(&mut bank_account).await?; 87 | 88 | Ok(()) 89 | } 90 | } 91 | 92 | #[derive(Debug, Clone, PartialEq, Eq)] 93 | pub struct SendTransferToBankAccount { 94 | pub bank_account_id: BankAccountId, 95 | pub transaction: Transaction, 96 | pub message: Option, 97 | } 98 | 99 | impl message::Message for SendTransferToBankAccount { 100 | fn name(&self) -> &'static str { 101 | "SendTransferToBankAccount" 102 | } 103 | } 104 | 105 | #[async_trait] 106 | impl command::Handler for Service { 107 | type Error = anyhow::Error; 108 | 109 | async fn handle( 110 | &self, 111 | command: command::Envelope, 112 | ) -> Result<(), Self::Error> { 113 | let command = command.message; 114 | 115 | let mut bank_account: BankAccountRoot = 116 | self.repository.get(&command.bank_account_id).await?.into(); 117 | 118 | bank_account.send_transfer(command.transaction, command.message)?; 119 | 120 | self.repository.save(&mut bank_account).await?; 121 | 122 | Ok(()) 123 | } 124 | } 125 | 126 | #[cfg(test)] 127 | mod test { 128 | use eventually::{command, event}; 129 | use rust_decimal::Decimal; 130 | 131 | use crate::application; 132 | use crate::domain::{BankAccountEvent, BankAccountRepository, Transaction}; 133 | 134 | #[tokio::test] 135 | async fn open_bank_account_works_when_bank_account_has_just_been_opened_for_the_first_time() { 136 | command::test::Scenario 137 | .when( 138 | application::OpenBankAccount { 139 | bank_account_id: "account-test".to_owned(), 140 | bank_account_holder_id: "dani".to_owned(), 141 | opening_balance: Some(Decimal::new(1000, 2)), // 10,00 142 | } 143 | .into(), 144 | ) 145 | .then(vec![event::Persisted { 146 | stream_id: "account-test".to_owned(), 147 | version: 1, 148 | event: BankAccountEvent::WasOpened { 149 | id: "account-test".to_owned(), 150 | account_holder_id: "dani".to_owned(), 151 | initial_balance: Some(Decimal::new(1000, 2)), 152 | } 153 | .into(), 154 | }]) 155 | .assert_on(|event_store| { 156 | application::Service::from(BankAccountRepository::from(event_store)) 157 | }) 158 | .await; 159 | } 160 | 161 | #[tokio::test] 162 | async fn open_bank_account_fails_if_the_account_already_exists() { 163 | command::test::Scenario 164 | .given(vec![event::Persisted { 165 | stream_id: "account-test".to_owned(), 166 | version: 1, 167 | event: BankAccountEvent::WasOpened { 168 | id: "account-test".to_owned(), 169 | account_holder_id: "dani".to_owned(), 170 | initial_balance: Some(Decimal::new(1000, 2)), 171 | } 172 | .into(), 173 | }]) 174 | .when( 175 | application::OpenBankAccount { 176 | bank_account_id: "account-test".to_owned(), 177 | bank_account_holder_id: "dani".to_owned(), 178 | opening_balance: Some(Decimal::new(1000, 2)), // 10,00 179 | } 180 | .into(), 181 | ) 182 | .then_fails() 183 | .assert_on(|event_store| { 184 | application::Service::from(BankAccountRepository::from(event_store)) 185 | }) 186 | .await; 187 | } 188 | 189 | #[tokio::test] 190 | async fn deposit_money_fails_on_unexisting_bank_account() { 191 | command::test::Scenario 192 | .when( 193 | application::DepositInBankAccount { 194 | bank_account_id: "account-test".to_owned(), 195 | amount: Decimal::new(2000, 2), // 20,00 196 | } 197 | .into(), 198 | ) 199 | .then_fails() 200 | .assert_on(|event_store| { 201 | application::Service::from(BankAccountRepository::from(event_store)) 202 | }) 203 | .await; 204 | } 205 | 206 | #[tokio::test] 207 | async fn deposit_money_on_existing_bank_account_works_when_amount_is_positive() { 208 | command::test::Scenario 209 | .given(vec![event::Persisted { 210 | stream_id: "account-test".to_owned(), 211 | version: 1, 212 | event: BankAccountEvent::WasOpened { 213 | id: "account-test".to_owned(), 214 | account_holder_id: "dani".to_owned(), 215 | initial_balance: Some(Decimal::new(1000, 2)), 216 | } 217 | .into(), 218 | }]) 219 | .when( 220 | application::DepositInBankAccount { 221 | bank_account_id: "account-test".to_owned(), 222 | amount: Decimal::new(2000, 2), // 20,00 223 | } 224 | .into(), 225 | ) 226 | .then(vec![event::Persisted { 227 | stream_id: "account-test".to_owned(), 228 | version: 2, 229 | event: BankAccountEvent::DepositWasRecorded { 230 | amount: Decimal::new(2000, 2), // 20,00 231 | } 232 | .into(), 233 | }]) 234 | .assert_on(|event_store| { 235 | application::Service::from(BankAccountRepository::from(event_store)) 236 | }) 237 | .await; 238 | } 239 | 240 | #[tokio::test] 241 | async fn deposit_money_on_existing_bank_account_fails_when_amount_is_negative() { 242 | command::test::Scenario 243 | .given(vec![event::Persisted { 244 | stream_id: "account-test".to_owned(), 245 | version: 1, 246 | event: BankAccountEvent::WasOpened { 247 | id: "account-test".to_owned(), 248 | account_holder_id: "dani".to_owned(), 249 | initial_balance: Some(Decimal::new(1000, 2)), 250 | } 251 | .into(), 252 | }]) 253 | .when( 254 | application::DepositInBankAccount { 255 | bank_account_id: "account-test".to_owned(), 256 | amount: Decimal::new(-2000, 2), // -20,00 257 | } 258 | .into(), 259 | ) 260 | .then_fails() 261 | .assert_on(|event_store| { 262 | application::Service::from(BankAccountRepository::from(event_store)) 263 | }) 264 | .await; 265 | } 266 | 267 | #[tokio::test] 268 | async fn deposit_money_with_zero_amount_in_open_bank_account_fails() { 269 | command::test::Scenario 270 | .given(vec![event::Persisted { 271 | stream_id: "account-test".to_owned(), 272 | version: 1, 273 | event: BankAccountEvent::WasOpened { 274 | id: "account-test".to_owned(), 275 | account_holder_id: "dani".to_owned(), 276 | initial_balance: Some(Decimal::new(1000, 2)), 277 | } 278 | .into(), 279 | }]) 280 | .when( 281 | application::DepositInBankAccount { 282 | bank_account_id: "account-test".to_owned(), 283 | amount: Decimal::new(0, 0), 284 | } 285 | .into(), 286 | ) 287 | .then_fails() 288 | .assert_on(|event_store| { 289 | application::Service::from(BankAccountRepository::from(event_store)) 290 | }) 291 | .await; 292 | } 293 | 294 | #[tokio::test] 295 | async fn deposit_money_on_existing_bank_account_fails_when_account_is_closed() { 296 | command::test::Scenario 297 | .given(vec![ 298 | event::Persisted { 299 | stream_id: "account-test".to_owned(), 300 | version: 1, 301 | event: BankAccountEvent::WasOpened { 302 | id: "account-test".to_owned(), 303 | account_holder_id: "dani".to_owned(), 304 | initial_balance: Some(Decimal::new(1000, 2)), 305 | } 306 | .into(), 307 | }, 308 | event::Persisted { 309 | stream_id: "account-test".to_owned(), 310 | version: 2, 311 | event: BankAccountEvent::WasClosed.into(), 312 | }, 313 | ]) 314 | .when( 315 | application::DepositInBankAccount { 316 | bank_account_id: "account-test".to_owned(), 317 | amount: Decimal::new(2000, 2), // 20,00 318 | } 319 | .into(), 320 | ) 321 | .then_fails() 322 | .assert_on(|event_store| { 323 | application::Service::from(BankAccountRepository::from(event_store)) 324 | }) 325 | .await; 326 | } 327 | 328 | #[tokio::test] 329 | async fn send_transfer_fails_if_bank_account_does_not_exist() { 330 | command::test::Scenario 331 | .when( 332 | application::SendTransferToBankAccount { 333 | bank_account_id: "sender".to_owned(), 334 | transaction: Transaction { 335 | id: "transaction".to_owned(), 336 | beneficiary_account_id: "receiver".to_owned(), 337 | amount: Decimal::new(2000, 2), 338 | }, 339 | message: None, 340 | } 341 | .into(), 342 | ) 343 | .then_fails() 344 | .assert_on(|event_store| { 345 | application::Service::from(BankAccountRepository::from(event_store)) 346 | }) 347 | .await; 348 | } 349 | 350 | #[tokio::test] 351 | async fn send_transfer_fails_if_bank_account_does_not_have_sufficient_funds() { 352 | command::test::Scenario 353 | .given(vec![ 354 | event::Persisted { 355 | stream_id: "sender".to_owned(), 356 | version: 1, 357 | event: BankAccountEvent::WasOpened { 358 | id: "sender".to_owned(), 359 | account_holder_id: "sender-name".to_owned(), 360 | initial_balance: Some(Decimal::new(1_000, 0)), 361 | } 362 | .into(), 363 | }, 364 | event::Persisted { 365 | stream_id: "receiver".to_owned(), 366 | version: 1, 367 | event: BankAccountEvent::WasOpened { 368 | id: "receiver".to_owned(), 369 | account_holder_id: "receiver-name".to_owned(), 370 | initial_balance: None, 371 | } 372 | .into(), 373 | }, 374 | ]) 375 | .when( 376 | application::SendTransferToBankAccount { 377 | bank_account_id: "sender".to_owned(), 378 | transaction: Transaction { 379 | id: "transaction".to_owned(), 380 | beneficiary_account_id: "receiver".to_owned(), 381 | amount: Decimal::new(2_000, 0), 382 | }, 383 | message: None, 384 | } 385 | .into(), 386 | ) 387 | .then_fails() 388 | .assert_on(|event_store| { 389 | application::Service::from(BankAccountRepository::from(event_store)) 390 | }) 391 | .await; 392 | } 393 | 394 | #[tokio::test] 395 | async fn send_transfer_works_if_bank_account_has_sufficient_funds() { 396 | command::test::Scenario 397 | .given(vec![ 398 | event::Persisted { 399 | stream_id: "sender".to_owned(), 400 | version: 1, 401 | event: BankAccountEvent::WasOpened { 402 | id: "sender".to_owned(), 403 | account_holder_id: "sender-name".to_owned(), 404 | initial_balance: Some(Decimal::new(1_000, 0)), 405 | } 406 | .into(), 407 | }, 408 | event::Persisted { 409 | stream_id: "receiver".to_owned(), 410 | version: 1, 411 | event: BankAccountEvent::WasOpened { 412 | id: "receiver".to_owned(), 413 | account_holder_id: "receiver-name".to_owned(), 414 | initial_balance: None, 415 | } 416 | .into(), 417 | }, 418 | ]) 419 | .when( 420 | application::SendTransferToBankAccount { 421 | bank_account_id: "sender".to_owned(), 422 | transaction: Transaction { 423 | id: "transaction".to_owned(), 424 | beneficiary_account_id: "receiver".to_owned(), 425 | amount: Decimal::new(500, 0), 426 | }, 427 | message: None, 428 | } 429 | .into(), 430 | ) 431 | .then(vec![event::Persisted { 432 | stream_id: "sender".to_owned(), 433 | version: 2, 434 | event: BankAccountEvent::TransferWasSent { 435 | transaction: Transaction { 436 | id: "transaction".to_owned(), 437 | beneficiary_account_id: "receiver".to_owned(), 438 | amount: Decimal::new(500, 0), 439 | }, 440 | message: None, 441 | } 442 | .into(), 443 | }]) 444 | .assert_on(|event_store| { 445 | application::Service::from(BankAccountRepository::from(event_store)) 446 | }) 447 | .await; 448 | } 449 | } 450 | -------------------------------------------------------------------------------- /examples/bank-accounting/src/domain.rs: -------------------------------------------------------------------------------- 1 | use std::collections::HashMap; 2 | 3 | use eventually::aggregate; 4 | use eventually::message::Message; 5 | use eventually_macros::aggregate_root; 6 | use rust_decimal::Decimal; 7 | 8 | pub type BankAccountRepository = aggregate::EventSourcedRepository; 9 | 10 | pub type TransactionId = String; 11 | 12 | #[derive(Debug, Clone, PartialEq, Eq)] 13 | pub struct Transaction { 14 | pub id: TransactionId, 15 | pub beneficiary_account_id: BankAccountId, 16 | pub amount: Decimal, 17 | } 18 | 19 | pub type BankAccountHolderId = String; 20 | pub type BankAccountId = String; 21 | 22 | #[derive(Debug, Clone, PartialEq, Eq)] 23 | pub enum BankAccountEvent { 24 | WasOpened { 25 | id: BankAccountId, 26 | account_holder_id: BankAccountHolderId, 27 | initial_balance: Option, 28 | }, 29 | DepositWasRecorded { 30 | amount: Decimal, 31 | }, 32 | TransferWasSent { 33 | transaction: Transaction, 34 | message: Option, 35 | }, 36 | TransferWasReceived { 37 | transaction: Transaction, 38 | message: Option, 39 | }, 40 | TransferWasDeclined { 41 | transaction_id: TransactionId, 42 | reason: Option, 43 | }, 44 | TransferWasConfirmed { 45 | transaction_id: TransactionId, 46 | }, 47 | WasClosed, 48 | WasReopened { 49 | reopening_balance: Option, 50 | }, 51 | } 52 | 53 | impl Message for BankAccountEvent { 54 | fn name(&self) -> &'static str { 55 | match self { 56 | BankAccountEvent::WasOpened { .. } => "BankAccountWasOpened", 57 | BankAccountEvent::DepositWasRecorded { .. } => "BankAccountDepositWasRecorded", 58 | BankAccountEvent::TransferWasSent { .. } => "BankAccountTransferWasSent", 59 | BankAccountEvent::TransferWasReceived { .. } => "BankAccountTransferWasReceived", 60 | BankAccountEvent::TransferWasDeclined { .. } => "BankAccountTransferWasDeclined", 61 | BankAccountEvent::TransferWasConfirmed { .. } => "BankAccountTransferWasConfirmed", 62 | BankAccountEvent::WasClosed => "BankAccountWasClosed", 63 | BankAccountEvent::WasReopened { .. } => "BankAccountWasReopened", 64 | } 65 | } 66 | } 67 | 68 | #[derive(Debug, Clone, PartialEq, Eq, thiserror::Error)] 69 | pub enum BankAccountError { 70 | #[error("bank account has not been opened yet")] 71 | NotOpenedYet, 72 | #[error("bank account has already been opened")] 73 | AlreadyOpened, 74 | #[error("empty id provided for the new bank account")] 75 | EmptyAccountId, 76 | #[error("empty account holder id provided for the new bank account")] 77 | EmptyAccountHolderId, 78 | #[error("a deposit was attempted with negative import")] 79 | NegativeDepositAttempted, 80 | #[error("no money to deposit has been specified")] 81 | NoMoneyDeposited, 82 | #[error("transfer could not be sent due to insufficient funds")] 83 | InsufficientFunds, 84 | #[error("transfer transaction was destined to a different recipient: {0}")] 85 | WrongTransactionRecipient(BankAccountId), 86 | #[error("the account is closed")] 87 | Closed, 88 | #[error("bank account has already been closed")] 89 | AlreadyClosed, 90 | } 91 | 92 | #[derive(Debug, Clone)] 93 | pub struct BankAccount { 94 | id: BankAccountId, 95 | current_balance: Decimal, 96 | pending_transactions: HashMap, 97 | is_closed: bool, 98 | } 99 | 100 | impl aggregate::Aggregate for BankAccount { 101 | type Id = BankAccountId; 102 | type Event = BankAccountEvent; 103 | type Error = BankAccountError; 104 | 105 | fn type_name() -> &'static str { 106 | "BankAccount" 107 | } 108 | 109 | fn aggregate_id(&self) -> &Self::Id { 110 | &self.id 111 | } 112 | 113 | fn apply(state: Option, event: Self::Event) -> Result { 114 | match state { 115 | Option::None => match event { 116 | BankAccountEvent::WasOpened { 117 | id, 118 | initial_balance, 119 | .. 120 | } => Ok(BankAccount { 121 | id, 122 | current_balance: initial_balance.unwrap_or_default(), 123 | pending_transactions: HashMap::default(), 124 | is_closed: false, 125 | }), 126 | _ => Err(BankAccountError::NotOpenedYet), 127 | }, 128 | Some(mut account) => match event { 129 | BankAccountEvent::DepositWasRecorded { amount } => { 130 | account.current_balance += amount; 131 | Ok(account) 132 | }, 133 | BankAccountEvent::TransferWasReceived { transaction, .. } => { 134 | account.current_balance += transaction.amount; 135 | Ok(account) 136 | }, 137 | BankAccountEvent::TransferWasSent { transaction, .. } => { 138 | account.current_balance -= transaction.amount; 139 | account 140 | .pending_transactions 141 | .insert(transaction.id.clone(), transaction); 142 | Ok(account) 143 | }, 144 | BankAccountEvent::TransferWasConfirmed { transaction_id } => { 145 | account.pending_transactions.remove(&transaction_id); 146 | Ok(account) 147 | }, 148 | BankAccountEvent::TransferWasDeclined { transaction_id, .. } => { 149 | if let Some(transaction) = account.pending_transactions.remove(&transaction_id) 150 | { 151 | account.current_balance += transaction.amount; 152 | } 153 | 154 | Ok(account) 155 | }, 156 | BankAccountEvent::WasClosed => { 157 | account.is_closed = true; 158 | account.current_balance = Decimal::default(); 159 | Ok(account) 160 | }, 161 | BankAccountEvent::WasReopened { reopening_balance } => { 162 | account.is_closed = false; 163 | account.current_balance = reopening_balance.unwrap_or_default(); 164 | Ok(account) 165 | }, 166 | BankAccountEvent::WasOpened { .. } => Err(BankAccountError::AlreadyOpened), 167 | }, 168 | } 169 | } 170 | } 171 | 172 | #[aggregate_root(BankAccount)] 173 | #[derive(Debug, Clone)] 174 | pub struct BankAccountRoot; 175 | 176 | impl BankAccountRoot { 177 | pub fn open( 178 | id: BankAccountId, 179 | account_holder_id: BankAccountHolderId, 180 | opening_balance: Option, 181 | ) -> Result { 182 | if id.is_empty() { 183 | return Err(BankAccountError::EmptyAccountId); 184 | } 185 | 186 | if account_holder_id.is_empty() { 187 | return Err(BankAccountError::EmptyAccountHolderId); 188 | } 189 | 190 | aggregate::Root::::record_new( 191 | BankAccountEvent::WasOpened { 192 | id, 193 | account_holder_id, 194 | initial_balance: opening_balance, 195 | } 196 | .into(), 197 | ) 198 | .map(Self) 199 | } 200 | 201 | pub fn deposit(&mut self, money: Decimal) -> Result<(), BankAccountError> { 202 | if self.is_closed { 203 | return Err(BankAccountError::Closed); 204 | } 205 | 206 | if money.is_sign_negative() { 207 | return Err(BankAccountError::NegativeDepositAttempted); 208 | } 209 | 210 | if money.is_zero() { 211 | return Err(BankAccountError::NoMoneyDeposited); 212 | } 213 | 214 | self.record_that(BankAccountEvent::DepositWasRecorded { amount: money }.into()) 215 | } 216 | 217 | pub fn send_transfer( 218 | &mut self, 219 | mut transaction: Transaction, 220 | message: Option, 221 | ) -> Result<(), BankAccountError> { 222 | if self.is_closed { 223 | return Err(BankAccountError::Closed); 224 | } 225 | 226 | // NOTE: transaction amounts should be positive, so they can be subtracted 227 | // when applied to a Bank Account. 228 | if transaction.amount.is_sign_negative() { 229 | transaction.amount.set_sign_positive(true); 230 | } 231 | 232 | if self.current_balance < transaction.amount { 233 | return Err(BankAccountError::InsufficientFunds); 234 | } 235 | 236 | let transaction_already_pending = self.pending_transactions.contains_key(&transaction.id); 237 | if transaction_already_pending { 238 | return Ok(()); 239 | } 240 | 241 | self.record_that( 242 | BankAccountEvent::TransferWasSent { 243 | message, 244 | transaction, 245 | } 246 | .into(), 247 | ) 248 | } 249 | 250 | pub fn receive_transfer( 251 | &mut self, 252 | transaction: Transaction, 253 | message: Option, 254 | ) -> Result<(), BankAccountError> { 255 | if self.is_closed { 256 | return Err(BankAccountError::Closed); 257 | } 258 | 259 | if self.id != transaction.beneficiary_account_id { 260 | return Err(BankAccountError::WrongTransactionRecipient( 261 | transaction.beneficiary_account_id, 262 | )); 263 | } 264 | 265 | self.record_that( 266 | BankAccountEvent::TransferWasReceived { 267 | transaction, 268 | message, 269 | } 270 | .into(), 271 | ) 272 | } 273 | 274 | pub fn record_transfer_success( 275 | &mut self, 276 | transaction_id: TransactionId, 277 | ) -> Result<(), BankAccountError> { 278 | let is_transaction_recorded = self.pending_transactions.contains_key(&transaction_id); 279 | if !is_transaction_recorded { 280 | // TODO: return error 281 | } 282 | 283 | self.record_that(BankAccountEvent::TransferWasConfirmed { transaction_id }.into()) 284 | } 285 | 286 | pub fn close(&mut self) -> Result<(), BankAccountError> { 287 | if self.is_closed { 288 | return Err(BankAccountError::AlreadyClosed); 289 | } 290 | 291 | self.record_that(BankAccountEvent::WasClosed.into()) 292 | } 293 | 294 | pub fn reopen(&mut self, reopening_balance: Option) -> Result<(), BankAccountError> { 295 | if !self.is_closed { 296 | return Err(BankAccountError::AlreadyOpened); 297 | } 298 | 299 | self.record_that(BankAccountEvent::WasReopened { reopening_balance }.into()) 300 | } 301 | } 302 | -------------------------------------------------------------------------------- /examples/bank-accounting/src/grpc.rs: -------------------------------------------------------------------------------- 1 | use async_trait::async_trait; 2 | use eventually::command::Handler; 3 | use eventually::version; 4 | use rust_decimal::prelude::FromPrimitive; 5 | use rust_decimal::Decimal; 6 | use tracing::instrument; 7 | 8 | use crate::domain::BankAccountError; 9 | use crate::{application, proto}; 10 | 11 | #[derive(Clone)] 12 | pub struct BankAccountingApi { 13 | application_service: application::Service, 14 | } 15 | 16 | impl From for BankAccountingApi { 17 | fn from(application_service: application::Service) -> Self { 18 | Self { 19 | application_service, 20 | } 21 | } 22 | } 23 | 24 | #[async_trait] 25 | impl proto::bank_accounting_server::BankAccounting for BankAccountingApi { 26 | #[instrument(skip(self))] 27 | async fn open_bank_account( 28 | &self, 29 | request: tonic::Request, 30 | ) -> Result, tonic::Status> { 31 | let request = request.into_inner(); 32 | 33 | self.application_service 34 | .handle( 35 | application::OpenBankAccount { 36 | bank_account_id: request.bank_account_id, 37 | bank_account_holder_id: request.bank_account_holder_id, 38 | opening_balance: Decimal::from_f32(request.opening_balance), 39 | } 40 | .into(), 41 | ) 42 | .await 43 | .map(|_| tonic::Response::new(proto::OpenBankAccountResponse {})) 44 | .map_err(|e| { 45 | use BankAccountError::*; 46 | 47 | let bank_error = as_error::(&e); 48 | let conflict_error = as_error::(&e); 49 | 50 | if let Some(EmptyAccountId | EmptyAccountHolderId) = bank_error { 51 | tonic::Status::invalid_argument(e.to_string()) 52 | } else if conflict_error.is_some() { 53 | tonic::Status::already_exists(AlreadyOpened.to_string()) 54 | } else { 55 | tonic::Status::internal(e.to_string()) 56 | } 57 | }) 58 | } 59 | 60 | #[instrument(skip(self))] 61 | async fn deposit_in_bank_account( 62 | &self, 63 | request: tonic::Request, 64 | ) -> Result, tonic::Status> { 65 | let request = request.into_inner(); 66 | 67 | self.application_service 68 | .handle( 69 | application::DepositInBankAccount { 70 | bank_account_id: request.bank_account_id, 71 | amount: Decimal::from_f32(request.amount).ok_or_else(|| { 72 | tonic::Status::invalid_argument("amount should be more than 0") 73 | })?, 74 | } 75 | .into(), 76 | ) 77 | .await 78 | .map(|_| tonic::Response::new(proto::DepositInBankAccountResponse {})) 79 | .map_err(|e| { 80 | use BankAccountError::*; 81 | 82 | let bank_error = as_error::(&e); 83 | let conflict_error = as_error::(&e); 84 | 85 | if let Some(Closed | NegativeDepositAttempted) = bank_error { 86 | tonic::Status::failed_precondition(e.to_string()) 87 | } else if let Some(NoMoneyDeposited) = bank_error { 88 | tonic::Status::invalid_argument(e.to_string()) 89 | } else if let Some(e) = conflict_error { 90 | tonic::Status::failed_precondition(e.to_string()) 91 | } else { 92 | tonic::Status::internal(e.to_string()) 93 | } 94 | }) 95 | } 96 | } 97 | 98 | fn as_error(e: &anyhow::Error) -> Option<&T> 99 | where 100 | T: std::error::Error + Send + Sync + 'static, 101 | { 102 | e.source().and_then(move |e| e.downcast_ref::()) 103 | } 104 | -------------------------------------------------------------------------------- /examples/bank-accounting/src/lib.rs: -------------------------------------------------------------------------------- 1 | pub mod application; 2 | pub mod domain; 3 | pub mod grpc; 4 | pub mod postgres; 5 | pub mod serde; 6 | pub mod tracing; 7 | 8 | #[allow(unused_qualifications)] 9 | #[allow(clippy::all)] // Cannot really check the sanity of generated code :shrugs: 10 | pub mod proto { 11 | tonic::include_proto!("bankaccounting"); 12 | tonic::include_proto!("bankaccount"); 13 | 14 | pub const FILE_DESCRIPTOR_SET: &[u8] = 15 | tonic::include_file_descriptor_set!("bankaccouting_descriptor"); 16 | } 17 | -------------------------------------------------------------------------------- /examples/bank-accounting/src/main.rs: -------------------------------------------------------------------------------- 1 | use std::time::Duration; 2 | 3 | use anyhow::anyhow; 4 | use bank_accounting::domain::{BankAccountEvent, BankAccountRepository}; 5 | use bank_accounting::{application, grpc, proto}; 6 | use eventually::serde; 7 | use eventually::tracing::{AggregateRepositoryExt, EventStoreExt}; 8 | use eventually_postgres::event; 9 | 10 | #[tokio::main] 11 | async fn main() -> anyhow::Result<()> { 12 | bank_accounting::tracing::initialize("bank-accounting")?; 13 | 14 | let pool = bank_accounting::postgres::connect().await?; 15 | 16 | let bank_account_event_serde = serde::Convert::::new( 17 | serde::Protobuf::::default(), 18 | ); 19 | 20 | let bank_account_event_store = event::Store::new(pool, bank_account_event_serde) 21 | .await? 22 | .with_tracing(); 23 | 24 | let bank_account_repository = 25 | BankAccountRepository::from(bank_account_event_store.clone()).with_tracing(); 26 | 27 | let application_service = application::Service::from(bank_account_repository); 28 | 29 | tracing::info!("Service is starting up..."); 30 | 31 | let addr = "0.0.0.0:10437" 32 | .parse() 33 | .map_err(|e| anyhow!("failed to parse grpc address: {}", e))?; 34 | 35 | let (_, health_svc) = tonic_health::server::health_reporter(); 36 | 37 | let reflection_svc = tonic_reflection::server::Builder::configure() 38 | .register_encoded_file_descriptor_set(proto::FILE_DESCRIPTOR_SET) 39 | .register_encoded_file_descriptor_set(tonic_health::pb::FILE_DESCRIPTOR_SET) 40 | .build_v1() 41 | .map_err(|e| anyhow!("failed to build grpc reflection service: {}", e))?; 42 | 43 | let bank_accounting_svc = proto::bank_accounting_server::BankAccountingServer::new( 44 | grpc::BankAccountingApi::from(application_service), 45 | ); 46 | 47 | let layer = tower::ServiceBuilder::new() 48 | .timeout(Duration::from_secs(5)) 49 | .into_inner(); 50 | 51 | tonic::transport::Server::builder() 52 | .trace_fn(|r| tracing::info_span!("server", uri = r.uri().to_string())) 53 | .layer(layer) 54 | .add_service(health_svc) 55 | .add_service(reflection_svc) 56 | .add_service(bank_accounting_svc) 57 | .serve(addr) 58 | .await 59 | .map_err(|e| anyhow!("tonic server exited with error: {}", e))?; 60 | 61 | Ok(()) 62 | } 63 | -------------------------------------------------------------------------------- /examples/bank-accounting/src/postgres.rs: -------------------------------------------------------------------------------- 1 | use sqlx::postgres::{PgConnectOptions, PgSslMode}; 2 | use sqlx::{ConnectOptions, PgPool}; 3 | use tracing::log::LevelFilter; 4 | 5 | pub async fn connect() -> anyhow::Result { 6 | Ok(PgPool::connect_with( 7 | PgConnectOptions::new() 8 | .host( 9 | std::env::var("DATABASE_HOST") 10 | .expect("env var DATABASE_HOST is required") 11 | .as_ref(), 12 | ) 13 | .port(5432) 14 | .username("postgres") 15 | .password("password") 16 | .ssl_mode(PgSslMode::Disable) 17 | .log_statements(LevelFilter::Debug), 18 | ) 19 | .await?) 20 | } 21 | -------------------------------------------------------------------------------- /examples/bank-accounting/src/serde.rs: -------------------------------------------------------------------------------- 1 | use rust_decimal::prelude::{FromPrimitive, ToPrimitive}; 2 | use rust_decimal::Decimal; 3 | 4 | use crate::domain::BankAccountEvent; 5 | use crate::proto::Event as ProtoBankAccountEvent; 6 | use crate::{domain, proto}; 7 | 8 | impl From for proto::Transaction { 9 | fn from(tx: domain::Transaction) -> Self { 10 | Self { 11 | id: tx.id, 12 | beneficiary_account_id: tx.beneficiary_account_id, 13 | amount: tx.amount.to_f32().unwrap(), 14 | } 15 | } 16 | } 17 | 18 | impl From for ProtoBankAccountEvent { 19 | fn from(event: BankAccountEvent) -> Self { 20 | Self { 21 | event: Some(match event { 22 | BankAccountEvent::WasOpened { 23 | id, 24 | account_holder_id, 25 | initial_balance, 26 | } => proto::event::Event::WasOpened(proto::event::WasOpened { 27 | id, 28 | account_holder_id, 29 | initial_balance: initial_balance.unwrap_or_default().to_f32().unwrap(), 30 | }), 31 | BankAccountEvent::DepositWasRecorded { amount } => { 32 | proto::event::Event::DepositWasRecorded(proto::event::DepositWasRecorded { 33 | amount: amount.to_f32().unwrap(), 34 | }) 35 | }, 36 | BankAccountEvent::TransferWasSent { 37 | transaction, 38 | message, 39 | } => proto::event::Event::TransferWasSent(proto::event::TransferWasSent { 40 | transaction: Some(transaction.into()), 41 | msg: message, 42 | }), 43 | BankAccountEvent::TransferWasReceived { 44 | transaction, 45 | message, 46 | } => proto::event::Event::TransferWasReceived(proto::event::TransferWasReceived { 47 | transaction: Some(transaction.into()), 48 | msg: message, 49 | }), 50 | BankAccountEvent::TransferWasConfirmed { transaction_id } => { 51 | proto::event::Event::TransferWasConfimed(proto::event::TransferWasConfirmed { 52 | transaction_id, 53 | }) 54 | }, 55 | BankAccountEvent::TransferWasDeclined { 56 | transaction_id, 57 | reason, 58 | } => proto::event::Event::TransferWasDeclined(proto::event::TransferWasDeclined { 59 | transaction_id, 60 | reason, 61 | }), 62 | BankAccountEvent::WasClosed => { 63 | proto::event::Event::WasClosed(proto::event::WasClosed {}) 64 | }, 65 | BankAccountEvent::WasReopened { reopening_balance } => { 66 | proto::event::Event::WasReopened(proto::event::WasReopened { 67 | reopening_balance: reopening_balance.unwrap_or_default().to_f32().unwrap(), 68 | }) 69 | }, 70 | }), 71 | } 72 | } 73 | } 74 | 75 | impl From for BankAccountEvent { 76 | fn from(proto: ProtoBankAccountEvent) -> Self { 77 | match proto.event.expect("event is a required field") { 78 | proto::event::Event::WasOpened(proto::event::WasOpened { 79 | id, 80 | account_holder_id, 81 | initial_balance, 82 | }) => BankAccountEvent::WasOpened { 83 | id, 84 | account_holder_id, 85 | initial_balance: Some(Decimal::from_f32(initial_balance).unwrap()), 86 | }, 87 | proto::event::Event::DepositWasRecorded(proto::event::DepositWasRecorded { 88 | amount, 89 | }) => BankAccountEvent::DepositWasRecorded { 90 | amount: Decimal::from_f32(amount).unwrap(), 91 | }, 92 | // TODO: fill these as more implementations are added to the service. 93 | proto::event::Event::TransferWasSent(_) => todo!(), 94 | proto::event::Event::TransferWasReceived(_) => todo!(), 95 | proto::event::Event::TransferWasConfimed(_) => todo!(), 96 | proto::event::Event::TransferWasDeclined(_) => todo!(), 97 | proto::event::Event::WasClosed(_) => todo!(), 98 | proto::event::Event::WasReopened(_) => todo!(), 99 | } 100 | } 101 | } 102 | -------------------------------------------------------------------------------- /examples/bank-accounting/src/tracing.rs: -------------------------------------------------------------------------------- 1 | use anyhow::anyhow; 2 | use opentelemetry::KeyValue; 3 | use opentelemetry_sdk::{trace, Resource}; 4 | use tracing_subscriber::prelude::*; 5 | use tracing_subscriber::EnvFilter; 6 | 7 | pub fn initialize(service_name: &'static str) -> anyhow::Result<()> { 8 | let tracer = opentelemetry_otlp::new_pipeline() 9 | .tracing() 10 | .with_exporter(opentelemetry_otlp::new_exporter().tonic()) 11 | .with_trace_config( 12 | opentelemetry_sdk::trace::config() 13 | .with_sampler(trace::Sampler::AlwaysOn) 14 | .with_id_generator(trace::RandomIdGenerator::default()) 15 | .with_resource(Resource::new([KeyValue::new("service.name", service_name)])), 16 | ) 17 | .install_batch(opentelemetry_sdk::runtime::Tokio) 18 | .map_err(|e| anyhow!("failed to initialize OTLP tracer: {}", e))?; 19 | 20 | let filter_layer = EnvFilter::try_from_default_env() 21 | .or_else(|_| EnvFilter::try_new("info")) 22 | .map_err(|e| anyhow!("failed to initialize env filter: {}", e))?; 23 | 24 | tracing_subscriber::registry() 25 | .with(tracing_subscriber::fmt::layer()) 26 | .with(tracing_opentelemetry::layer().with_tracer(tracer)) 27 | .with(filter_layer) 28 | .try_init() 29 | .map_err(|e| anyhow!("failed to initialize subscribers: {}", e))?; 30 | 31 | Ok(()) 32 | } 33 | -------------------------------------------------------------------------------- /examples/light-switch/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "light_switch" 3 | version = "0.1.0" 4 | edition = "2021" 5 | readme = "README.md" 6 | publish = false 7 | 8 | [dependencies] 9 | serde_json = { version = "1.0.114", optional = true } 10 | serde = { version = "1.0.197", features = ["derive"] } 11 | tokio = { version = "1.36.0", features = ["macros", "rt-multi-thread"] } 12 | thiserror = { version = "2.0.12" } 13 | anyhow = "1.0.97" 14 | async-trait = "0.1.77" 15 | eventually = { features = ["serde-json"], path="../../eventually" } 16 | -------------------------------------------------------------------------------- /examples/light-switch/README.md: -------------------------------------------------------------------------------- 1 | # Example: Light Switch application 2 | 3 | This example application is a bare minimum example of the core functionality. 4 | Persistence is not enabled, nor is logging, nor any kind of user interface. It 5 | just starts, runs some commands, queries the final state and prints it. 6 | 7 | It models a "light switch". You can "install" the switch, then turn it "off" or 8 | "on" using commands. You can issue a query to get the current state of a switch. 9 | -------------------------------------------------------------------------------- /examples/light-switch/src/application.rs: -------------------------------------------------------------------------------- 1 | use eventually::aggregate; 2 | 3 | use crate::domain::LightSwitch; 4 | pub type LightSwitchRepo = aggregate::EventSourcedRepository; 5 | 6 | #[derive(Clone)] 7 | pub struct LightSwitchService 8 | where 9 | R: aggregate::Repository, 10 | { 11 | pub light_switch_repository: R, 12 | } 13 | 14 | impl From for LightSwitchService 15 | where 16 | R: aggregate::Repository, 17 | { 18 | fn from(light_switch_repository: R) -> Self { 19 | Self { 20 | light_switch_repository, 21 | } 22 | } 23 | } 24 | -------------------------------------------------------------------------------- /examples/light-switch/src/commands/install_light_switch.rs: -------------------------------------------------------------------------------- 1 | use async_trait::async_trait; 2 | use eventually::{aggregate, command, message}; 3 | 4 | use crate::application::LightSwitchService; 5 | use crate::domain::{LightSwitch, LightSwitchId, LightSwitchRoot}; 6 | 7 | #[derive(Debug, Clone, PartialEq, Eq)] 8 | pub struct InstallLightSwitch { 9 | pub id: LightSwitchId, 10 | } 11 | 12 | impl message::Message for InstallLightSwitch { 13 | fn name(&self) -> &'static str { 14 | "InstallLightSwitch" 15 | } 16 | } 17 | 18 | #[async_trait] 19 | impl command::Handler for LightSwitchService 20 | where 21 | R: aggregate::Repository, 22 | { 23 | type Error = anyhow::Error; 24 | async fn handle( 25 | &self, 26 | command: command::Envelope, 27 | ) -> Result<(), Self::Error> { 28 | let command = command.message; 29 | let mut light_switch = LightSwitchRoot::install(command.id)?; 30 | self.light_switch_repository.save(&mut light_switch).await?; 31 | Ok(()) 32 | } 33 | } 34 | -------------------------------------------------------------------------------- /examples/light-switch/src/commands/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod install_light_switch; 2 | pub mod turn_light_switch_off; 3 | pub mod turn_light_switch_on; 4 | -------------------------------------------------------------------------------- /examples/light-switch/src/commands/turn_light_switch_off.rs: -------------------------------------------------------------------------------- 1 | use async_trait::async_trait; 2 | use eventually::{aggregate, command, message}; 3 | 4 | use crate::application::LightSwitchService; 5 | use crate::domain::{LightSwitch, LightSwitchId, LightSwitchRoot}; 6 | 7 | #[derive(Debug, Clone, PartialEq, Eq)] 8 | pub struct TurnLightSwitchOff { 9 | pub id: LightSwitchId, 10 | } 11 | 12 | impl message::Message for TurnLightSwitchOff { 13 | fn name(&self) -> &'static str { 14 | "TurnLightSwitchOff" 15 | } 16 | } 17 | 18 | #[async_trait] 19 | impl command::Handler for LightSwitchService 20 | where 21 | R: aggregate::Repository, 22 | { 23 | type Error = anyhow::Error; 24 | async fn handle( 25 | &self, 26 | command: command::Envelope, 27 | ) -> Result<(), Self::Error> { 28 | let command = command.message; 29 | let mut root: LightSwitchRoot = self.light_switch_repository.get(&command.id).await?.into(); 30 | let _ = root.turn_off(command.id)?; 31 | self.light_switch_repository.save(&mut root).await?; 32 | Ok(()) 33 | } 34 | } 35 | -------------------------------------------------------------------------------- /examples/light-switch/src/commands/turn_light_switch_on.rs: -------------------------------------------------------------------------------- 1 | use async_trait::async_trait; 2 | use eventually::{aggregate, command, message}; 3 | 4 | use crate::application::LightSwitchService; 5 | use crate::domain::{LightSwitch, LightSwitchId, LightSwitchRoot}; 6 | 7 | #[derive(Debug, Clone, PartialEq, Eq)] 8 | pub struct TurnLightSwitchOn { 9 | pub id: LightSwitchId, 10 | } 11 | 12 | impl message::Message for TurnLightSwitchOn { 13 | fn name(&self) -> &'static str { 14 | "TurnLightSwitchOn" 15 | } 16 | } 17 | 18 | #[async_trait] 19 | impl command::Handler for LightSwitchService 20 | where 21 | R: aggregate::Repository, 22 | { 23 | type Error = anyhow::Error; 24 | async fn handle( 25 | &self, 26 | command: command::Envelope, 27 | ) -> Result<(), Self::Error> { 28 | let command = command.message; 29 | let mut root: LightSwitchRoot = self.light_switch_repository.get(&command.id).await?.into(); 30 | let _ = root.turn_on(command.id)?; 31 | self.light_switch_repository.save(&mut root).await?; 32 | Ok(()) 33 | } 34 | } 35 | -------------------------------------------------------------------------------- /examples/light-switch/src/domain.rs: -------------------------------------------------------------------------------- 1 | use eventually::{aggregate, message}; 2 | 3 | pub type LightSwitchId = String; 4 | 5 | #[derive(Debug, Clone, PartialEq, Eq, thiserror::Error)] 6 | pub enum LightSwitchError { 7 | #[error("Light switch has not yet been installed")] 8 | NotYetInstalled, 9 | #[error("Light switch has already been installed")] 10 | AlreadyInstalled, 11 | #[error("Light switch is already on")] 12 | AlreadyOn, 13 | #[error("Light switch is already off")] 14 | AlreadyOff, 15 | } 16 | 17 | // events 18 | #[derive(Debug, Clone, Eq, PartialEq)] 19 | pub struct Installed { 20 | id: LightSwitchId, 21 | } 22 | 23 | #[derive(Debug, Clone, Eq, PartialEq)] 24 | pub struct SwitchedOn { 25 | id: LightSwitchId, 26 | } 27 | 28 | #[derive(Debug, Clone, Eq, PartialEq)] 29 | pub struct SwitchedOff { 30 | id: LightSwitchId, 31 | } 32 | 33 | #[derive(Debug, Clone, Eq, PartialEq)] 34 | pub enum LightSwitchEvent { 35 | Installed(Installed), 36 | SwitchedOn(SwitchedOn), 37 | SwitchedOff(SwitchedOff), 38 | } 39 | 40 | impl message::Message for LightSwitchEvent { 41 | fn name(&self) -> &'static str { 42 | match self { 43 | LightSwitchEvent::SwitchedOn(_) => "SwitchedOn", 44 | LightSwitchEvent::SwitchedOff(_) => "SwitchedOff", 45 | LightSwitchEvent::Installed(_) => "Installed", 46 | } 47 | } 48 | } 49 | 50 | // aggregate 51 | #[derive(Debug, Clone, Eq, PartialEq)] 52 | pub enum LightSwitchState { 53 | On, 54 | Off, 55 | } 56 | 57 | #[derive(Debug, Clone)] 58 | pub struct LightSwitch { 59 | id: LightSwitchId, 60 | state: LightSwitchState, 61 | } 62 | 63 | impl aggregate::Aggregate for LightSwitch { 64 | type Id = LightSwitchId; 65 | type Event = LightSwitchEvent; 66 | type Error = LightSwitchError; 67 | 68 | fn type_name() -> &'static str { 69 | "LightSwitch" 70 | } 71 | 72 | fn aggregate_id(&self) -> &Self::Id { 73 | &self.id 74 | } 75 | 76 | fn apply(state: Option, event: Self::Event) -> Result { 77 | match state { 78 | None => match event { 79 | LightSwitchEvent::Installed(installed) => Ok(LightSwitch { 80 | id: installed.id, 81 | state: LightSwitchState::Off, 82 | }), 83 | LightSwitchEvent::SwitchedOn(_) | LightSwitchEvent::SwitchedOff(_) => { 84 | Err(LightSwitchError::NotYetInstalled) 85 | }, 86 | }, 87 | Some(mut light_switch) => match event { 88 | LightSwitchEvent::Installed(_) => Err(LightSwitchError::AlreadyInstalled), 89 | LightSwitchEvent::SwitchedOn(_) => match light_switch.state { 90 | LightSwitchState::On => Err(LightSwitchError::AlreadyOn), 91 | LightSwitchState::Off => { 92 | light_switch.state = LightSwitchState::On; 93 | Ok(light_switch) 94 | }, 95 | }, 96 | LightSwitchEvent::SwitchedOff(_) => match light_switch.state { 97 | LightSwitchState::On => { 98 | light_switch.state = LightSwitchState::Off; 99 | Ok(light_switch) 100 | }, 101 | LightSwitchState::Off => Err(LightSwitchError::AlreadyOff), 102 | }, 103 | }, 104 | } 105 | } 106 | } 107 | 108 | // root 109 | #[derive(Debug, Clone)] 110 | pub struct LightSwitchRoot(aggregate::Root); 111 | 112 | // NOTE: The trait implementations for From, Deref and DerefMut below are 113 | // implemented manually for demonstration purposes, but most would prefer to have them 114 | // auto-generated at compile time by using the [`eventually_macros::aggregate_root`] macro 115 | impl From> for LightSwitchRoot { 116 | fn from(root: eventually::aggregate::Root) -> Self { 117 | Self(root) 118 | } 119 | } 120 | impl From for eventually::aggregate::Root { 121 | fn from(value: LightSwitchRoot) -> Self { 122 | value.0 123 | } 124 | } 125 | impl std::ops::Deref for LightSwitchRoot { 126 | type Target = eventually::aggregate::Root; 127 | fn deref(&self) -> &Self::Target { 128 | &self.0 129 | } 130 | } 131 | impl std::ops::DerefMut for LightSwitchRoot { 132 | fn deref_mut(&mut self) -> &mut Self::Target { 133 | &mut self.0 134 | } 135 | } 136 | 137 | impl LightSwitchRoot { 138 | pub fn install(id: LightSwitchId) -> Result { 139 | aggregate::Root::::record_new( 140 | LightSwitchEvent::Installed(Installed { id }).into(), 141 | ) 142 | .map(Self) 143 | } 144 | pub fn turn_on(&mut self, id: LightSwitchId) -> Result<(), LightSwitchError> { 145 | self.record_that(LightSwitchEvent::SwitchedOn(SwitchedOn { id }).into()) 146 | } 147 | pub fn turn_off(&mut self, id: LightSwitchId) -> Result<(), LightSwitchError> { 148 | self.record_that(LightSwitchEvent::SwitchedOff(SwitchedOff { id }).into()) 149 | } 150 | pub fn get_switch_state(&self) -> Result { 151 | Ok(self.state.clone()) 152 | } 153 | } 154 | -------------------------------------------------------------------------------- /examples/light-switch/src/main.rs: -------------------------------------------------------------------------------- 1 | mod application; 2 | mod commands; 3 | mod domain; 4 | mod queries; 5 | use application::{LightSwitchRepo, LightSwitchService}; 6 | use commands::install_light_switch::InstallLightSwitch; 7 | use commands::turn_light_switch_off::TurnLightSwitchOff; 8 | use commands::turn_light_switch_on::TurnLightSwitchOn; 9 | use domain::{LightSwitchEvent, LightSwitchId}; 10 | use eventually::{command, event, query}; 11 | use queries::get_switch_state::GetSwitchState; 12 | 13 | #[tokio::main] 14 | async fn main() -> Result<(), anyhow::Error> { 15 | let store = event::store::InMemory::::default(); 16 | let repo = LightSwitchRepo::from(store.clone()); 17 | let svc = LightSwitchService::from(repo); 18 | 19 | let cmd = InstallLightSwitch { 20 | id: "Switch1".to_string(), 21 | } 22 | .into(); 23 | command::Handler::handle(&svc, cmd).await?; 24 | println!("Installed Switch1"); 25 | 26 | let cmd = TurnLightSwitchOn { 27 | id: "Switch1".to_string(), 28 | } 29 | .into(); 30 | command::Handler::handle(&svc, cmd).await?; 31 | println!("Turned Switch1 On"); 32 | 33 | let cmd = TurnLightSwitchOff { 34 | id: "Switch1".to_string(), 35 | } 36 | .into(); 37 | command::Handler::handle(&svc, cmd).await?; 38 | println!("Turned Switch1 Off"); 39 | 40 | let query = GetSwitchState { 41 | id: "Switch1".to_string(), 42 | } 43 | .into(); 44 | let state = query::Handler::handle(&svc, query).await?; 45 | println!("Switch1 is currently: {:?}", state); 46 | Ok(()) 47 | } 48 | -------------------------------------------------------------------------------- /examples/light-switch/src/queries/get_switch_state.rs: -------------------------------------------------------------------------------- 1 | use async_trait::async_trait; 2 | use eventually::{aggregate, message, query}; 3 | 4 | use crate::application::LightSwitchService; 5 | use crate::domain::{LightSwitch, LightSwitchId, LightSwitchRoot, LightSwitchState}; 6 | 7 | #[derive(Debug, Clone, PartialEq, Eq)] 8 | pub struct GetSwitchState { 9 | pub id: LightSwitchId, 10 | } 11 | 12 | impl message::Message for GetSwitchState { 13 | fn name(&self) -> &'static str { 14 | "GetSwitch" 15 | } 16 | } 17 | 18 | #[async_trait] 19 | impl query::Handler for LightSwitchService 20 | where 21 | R: aggregate::Repository, 22 | { 23 | type Error = anyhow::Error; 24 | type Output = LightSwitchState; 25 | 26 | async fn handle( 27 | &self, 28 | query: query::Envelope, 29 | ) -> Result { 30 | let query = query.message; 31 | let root: LightSwitchRoot = self.light_switch_repository.get(&query.id).await?.into(); 32 | let s = root.get_switch_state()?; 33 | Ok(s) 34 | } 35 | } 36 | -------------------------------------------------------------------------------- /examples/light-switch/src/queries/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod get_switch_state; 2 | -------------------------------------------------------------------------------- /flake.lock: -------------------------------------------------------------------------------- 1 | { 2 | "nodes": { 3 | "flake-utils": { 4 | "inputs": { 5 | "systems": "systems" 6 | }, 7 | "locked": { 8 | "lastModified": 1731533236, 9 | "narHash": "sha256-l0KFg5HjrsfsO/JpG+r7fRrqm12kzFHyUHqHCVpMMbI=", 10 | "owner": "numtide", 11 | "repo": "flake-utils", 12 | "rev": "11707dc2f618dd54ca8739b309ec4fc024de578b", 13 | "type": "github" 14 | }, 15 | "original": { 16 | "owner": "numtide", 17 | "repo": "flake-utils", 18 | "type": "github" 19 | } 20 | }, 21 | "nixpkgs": { 22 | "locked": { 23 | "lastModified": 1742504295, 24 | "narHash": "sha256-wcwHucozGLQWE78Hh+ZjTIHzjlv1sgIYAiovlBr8c6U=", 25 | "owner": "NixOS", 26 | "repo": "nixpkgs", 27 | "rev": "91b0d2a1466aee5d8d6d802989a2fbb9324126d4", 28 | "type": "github" 29 | }, 30 | "original": { 31 | "owner": "NixOS", 32 | "ref": "master", 33 | "repo": "nixpkgs", 34 | "type": "github" 35 | } 36 | }, 37 | "root": { 38 | "inputs": { 39 | "flake-utils": "flake-utils", 40 | "nixpkgs": "nixpkgs", 41 | "rust-overlay": "rust-overlay" 42 | } 43 | }, 44 | "rust-overlay": { 45 | "inputs": { 46 | "nixpkgs": [ 47 | "nixpkgs" 48 | ] 49 | }, 50 | "locked": { 51 | "lastModified": 1742437918, 52 | "narHash": "sha256-Vflb6KJVDikFcM9E231mRN88uk4+jo7BWtaaQMifthI=", 53 | "owner": "oxalica", 54 | "repo": "rust-overlay", 55 | "rev": "f03085549609e49c7bcbbee86a1949057d087199", 56 | "type": "github" 57 | }, 58 | "original": { 59 | "owner": "oxalica", 60 | "repo": "rust-overlay", 61 | "type": "github" 62 | } 63 | }, 64 | "systems": { 65 | "locked": { 66 | "lastModified": 1681028828, 67 | "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=", 68 | "owner": "nix-systems", 69 | "repo": "default", 70 | "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e", 71 | "type": "github" 72 | }, 73 | "original": { 74 | "owner": "nix-systems", 75 | "repo": "default", 76 | "type": "github" 77 | } 78 | } 79 | }, 80 | "root": "root", 81 | "version": 7 82 | } 83 | -------------------------------------------------------------------------------- /flake.nix: -------------------------------------------------------------------------------- 1 | { 2 | inputs = { 3 | nixpkgs.url = "github:NixOS/nixpkgs/master"; 4 | flake-utils.url = "github:numtide/flake-utils"; 5 | 6 | rust-overlay.url = "github:oxalica/rust-overlay"; 7 | rust-overlay.inputs.nixpkgs.follows = "nixpkgs"; 8 | rust-overlay.inputs.flake-utils.follows = "flake-utils"; 9 | }; 10 | 11 | outputs = { nixpkgs, flake-utils, rust-overlay, ... }: 12 | flake-utils.lib.eachDefaultSystem 13 | (system: 14 | let 15 | pkgs = import nixpkgs { 16 | inherit system; 17 | overlays = [ (import rust-overlay) ]; 18 | }; 19 | 20 | protobuf = pkgs.protobuf3_24; 21 | in 22 | with pkgs; 23 | { 24 | devShells.default = with pkgs; mkShell { 25 | packages = [ 26 | niv 27 | nixpkgs-fmt 28 | pkg-config 29 | openssl 30 | rust-bin.nightly.latest.default 31 | protobuf 32 | ] ++ lib.optionals stdenv.isDarwin [ 33 | darwin.apple_sdk.frameworks.SystemConfiguration 34 | ]; 35 | 36 | PROTOC = "${protobuf}/bin/protoc"; 37 | }; 38 | } 39 | ); 40 | } 41 | -------------------------------------------------------------------------------- /renovate.json5: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "https://docs.renovatebot.com/renovate-schema.json", 3 | "extends": [ 4 | "config:recommended", 5 | "group:allNonMajor" 6 | ], 7 | "branchPrefix": "chore/renovate-", 8 | "rebaseWhen": "behind-base-branch", 9 | "nix": { 10 | "enabled": true 11 | }, 12 | "lockFileMaintenance": { 13 | "enabled": true, 14 | "recreateWhen": "always", 15 | "rebaseStalePrs": true 16 | }, 17 | "packageRules": [ 18 | // Group all Github Actions updates in a single PR. 19 | { 20 | "matchManagers": [ 21 | "github-actions" 22 | ] 23 | } 24 | ] 25 | } 26 | -------------------------------------------------------------------------------- /resources/logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/get-eventually/eventually-rs/cd5a96533cc81ea86a15818df207113ac9936567/resources/logo.png -------------------------------------------------------------------------------- /rustfmt.toml: -------------------------------------------------------------------------------- 1 | match_block_trailing_comma = true 2 | reorder_imports = true 3 | group_imports = "StdExternalCrate" 4 | imports_granularity = "Module" 5 | ignore = ["third_party"] 6 | --------------------------------------------------------------------------------