├── .gitattributes ├── .githooks └── pre-commit ├── .github ├── dependabot.yml └── workflows │ ├── ci.yml │ └── coverage.yml ├── .gitignore ├── CHANGELOG.md ├── Cargo.lock ├── Cargo.toml ├── Dockerfile ├── LICENSE ├── README.md ├── bin └── githooks ├── buildspec_test.yml ├── demo ├── Cargo.toml ├── Dockerfile ├── README.md ├── cqrs-demo.postman_collection.json ├── curl │ ├── DepositMoney.json │ ├── WithdrawMoney.json │ ├── WriteCheck.json │ ├── lambda_payload.json │ ├── test_api.sh │ └── test_lambda.sh ├── db │ └── init.sql ├── docker-compose.yml └── src │ ├── command_extractor.rs │ ├── config.rs │ ├── domain │ ├── aggregate.rs │ ├── commands.rs │ ├── events.rs │ └── mod.rs │ ├── lambda_main.rs │ ├── lib.rs │ ├── main.rs │ ├── queries.rs │ ├── route_handler.rs │ ├── services.rs │ └── state.rs ├── deny.toml ├── docs ├── book │ └── src │ │ ├── SUMMARY.md │ │ ├── advanced_debugging_state.md │ │ ├── advanced_event_replay.md │ │ ├── advanced_event_upcasters.md │ │ ├── advanced_topics.md │ │ ├── application_building.md │ │ ├── application_event_store.md │ │ ├── application_metadata.md │ │ ├── application_persisted_views.md │ │ ├── book.toml │ │ ├── demo_application.md │ │ ├── demo_application_framework.md │ │ ├── demo_event_store.md │ │ ├── demo_simple_query.md │ │ ├── event_upcasters.md │ │ ├── images │ │ ├── CQRS.png │ │ ├── CQRS_flow.png │ │ ├── bounded_context.png │ │ ├── compare_standard_application.png │ │ └── event-replay.png │ │ ├── intro.md │ │ ├── intro_add_aggregate.md │ │ ├── intro_add_commands.md │ │ ├── intro_add_error.md │ │ ├── intro_add_events.md │ │ ├── intro_getting_started.md │ │ ├── test_add_first.md │ │ ├── test_add_more.md │ │ ├── theory.md │ │ ├── theory_cqrs.md │ │ ├── theory_ddd.md │ │ ├── theory_event_sourcing.md │ │ ├── theory_queries.md │ │ └── theory_updates.md ├── ladr │ ├── 01-postgres-is-primary-datastore.md │ └── 02-use-async-rust.md ├── tenets.md └── versions │ ├── change_log.md │ ├── migration_0_2_5.md │ ├── migration_0_3_0.md │ └── migration_0_4_0.md ├── persistence ├── dynamo-es │ ├── .gitignore │ ├── Cargo.toml │ ├── README.md │ ├── buildspec_test.yml │ ├── db │ │ ├── create_tables.sh │ │ └── dynamo_db.yaml │ ├── docker-compose.yml │ ├── src │ │ ├── cqrs.rs │ │ ├── error.rs │ │ ├── event_repository.rs │ │ ├── helpers.rs │ │ ├── lib.rs │ │ ├── testing.rs │ │ ├── types.rs │ │ └── view_repository.rs │ └── tests │ │ └── lib.rs ├── mysql-es │ ├── .gitignore │ ├── CHANGELOG.md │ ├── Cargo.toml │ ├── README.md │ ├── buildspec_test.yml │ ├── db │ │ └── init.sql │ ├── docker-compose.yml │ ├── src │ │ ├── cqrs.rs │ │ ├── error.rs │ │ ├── event_repository.rs │ │ ├── lib.rs │ │ ├── sql_query.rs │ │ ├── testing.rs │ │ ├── types.rs │ │ └── view_repository.rs │ └── tests │ │ └── lib.rs └── postgres-es │ ├── .gitignore │ ├── CHANGELOG.md │ ├── Cargo.toml │ ├── README.md │ ├── buildspec_test.yml │ ├── db │ └── init.sql │ ├── docker-compose.yml │ ├── src │ ├── cqrs.rs │ ├── error.rs │ ├── event_repository.rs │ ├── lib.rs │ ├── sql_query.rs │ ├── testing.rs │ ├── types.rs │ └── view_repository.rs │ └── tests │ └── lib.rs ├── src ├── aggregate.rs ├── cqrs.rs ├── doc.rs ├── error.rs ├── event.rs ├── lib.rs ├── mem_store.rs ├── persist.rs ├── persist │ ├── context.rs │ ├── doc.rs │ ├── error.rs │ ├── event_repository.rs │ ├── event_store.rs │ ├── event_stream.rs │ ├── generic_query.rs │ ├── replay.rs │ ├── serialized_event.rs │ ├── upcaster.rs │ └── view_repository.rs ├── query.rs ├── store.rs ├── test.rs └── test │ ├── executor.rs │ ├── framework.rs │ └── validator.rs └── tests └── lib.rs /.gitattributes: -------------------------------------------------------------------------------- 1 | Cargo.lock binary 2 | -------------------------------------------------------------------------------- /.githooks/pre-commit: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Ensures there are no formatting errors. 3 | # 4 | TMP_DIR="_GIT_COMMIT_CHECK_DIR" 5 | CURRENT_DIR=`pwd` 6 | trap _reset_dirs EXIT 7 | function _reset_dirs () { 8 | cd $CURRENT_DIR 9 | rm -rf $TMP_DIR 10 | } 11 | mkdir $TMP_DIR 12 | git checkout-index --prefix=$TMP_DIR/ -af 13 | cd $TMP_DIR 14 | cargo fmt --check 15 | if [ $? -ne 0 ]; then 16 | echo 17 | echo -e "\033[0;31mInvalid formatting, please run 'cargo fmt' \033[0m" 18 | exit 1 19 | fi 20 | 21 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | # bump major and minor updates as soon as available 4 | - package-ecosystem: cargo 5 | target-branch: main # see https://github.com/dependabot/dependabot-core/issues/1778#issuecomment-1988140219 6 | directory: / 7 | schedule: 8 | interval: daily 9 | commit-message: 10 | prefix: chore 11 | include: scope 12 | ignore: 13 | - dependency-name: "*" 14 | update-types: 15 | - "version-update:semver-patch" 16 | 17 | # bundle patch updates together on a monthly basis 18 | # (note that security updates will be bumped as soon as available) 19 | - package-ecosystem: cargo 20 | directory: / 21 | schedule: 22 | interval: monthly 23 | commit-message: 24 | prefix: chore 25 | include: scope 26 | groups: 27 | patch-updates: 28 | update-types: 29 | - patch 30 | ignore: 31 | - dependency-name: "*" 32 | update-types: 33 | - "version-update:semver-minor" 34 | - "version-update:semver-major" 35 | 36 | # bump actions as soon as available 37 | - package-ecosystem: github-actions 38 | directory: / 39 | schedule: 40 | interval: daily 41 | commit-message: 42 | prefix: chore 43 | include: scope 44 | ignore: 45 | - dependency-name: dtolnay/rust-toolchain 46 | -------------------------------------------------------------------------------- /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | on: 2 | push: 3 | branches: [main] 4 | pull_request: 5 | 6 | name: Continuous integration 7 | 8 | jobs: 9 | 10 | check: 11 | runs-on: ubuntu-latest 12 | steps: 13 | - uses: actions/checkout@v4 14 | - uses: dtolnay/rust-toolchain@stable 15 | - run: cargo check --all --all-targets 16 | 17 | test: 18 | runs-on: ${{ matrix.os }} 19 | strategy: 20 | matrix: 21 | os: [ubuntu-latest, windows-latest, macos-latest] 22 | rust: [stable] 23 | include: 24 | - os: ubuntu-latest 25 | rust: nightly 26 | steps: 27 | - uses: actions/checkout@v4 28 | - uses: dtolnay/rust-toolchain@master 29 | with: 30 | toolchain: ${{ matrix.rust }} 31 | - run: cargo test --locked 32 | 33 | test-persistence: 34 | name: test persistence 35 | runs-on: ubuntu-latest 36 | strategy: 37 | matrix: 38 | db: [postgres, mysql] 39 | steps: 40 | - uses: actions/checkout@v4 41 | - uses: dtolnay/rust-toolchain@stable 42 | - run: | 43 | cd persistence/${{ matrix.db }}-es 44 | docker compose up -d 45 | cargo test 46 | docker compose down 47 | 48 | # test-dynamo: 49 | # name: test persistence (dynamo) 50 | # runs-on: ubuntu-latest 51 | # steps: 52 | # - uses: actions/checkout@v4 53 | # - uses: dtolnay/rust-toolchain@stable 54 | # - name: Set up DynamoDB test environment 55 | # run: | 56 | # cd persistence/dynamo-es 57 | # docker compose up -d 58 | # ./db/create_tables.sh 59 | # cargo test 60 | # docker compose down 61 | 62 | fmt: 63 | name: format 64 | runs-on: ubuntu-latest 65 | steps: 66 | - uses: actions/checkout@v4 67 | - uses: dtolnay/rust-toolchain@nightly 68 | with: 69 | components: rustfmt 70 | - run: cargo fmt -- --check 71 | 72 | cargo-deny: 73 | runs-on: ubuntu-latest 74 | steps: 75 | - uses: actions/checkout@v4 76 | - uses: EmbarkStudios/cargo-deny-action@v2 77 | 78 | msrv: 79 | runs-on: ubuntu-latest 80 | steps: 81 | - uses: actions/checkout@master 82 | - name: Get MSRV from Cargo.toml 83 | run: | 84 | MSRV=$(grep 'rust-version' Cargo.toml | sed 's/.*= *"\(.*\)".*/\1/') 85 | echo "MSRV=$MSRV" >> $GITHUB_ENV 86 | - uses: dtolnay/rust-toolchain@master 87 | with: 88 | toolchain: ${{ env.MSRV }} 89 | - uses: taiki-e/install-action@cargo-no-dev-deps 90 | - run: cargo no-dev-deps check 91 | 92 | # Automatically merge if it's a Dependabot PR that passes the build 93 | dependabot: 94 | needs: [check, test, test-persistence, fmt, cargo-deny, msrv] 95 | permissions: 96 | contents: write 97 | pull-requests: write 98 | runs-on: ubuntu-latest 99 | if: github.actor == 'dependabot[bot]' 100 | steps: 101 | - name: Enable auto-merge for Dependabot PRs 102 | run: gh pr merge --auto --squash "$PR_URL" 103 | env: 104 | PR_URL: ${{github.event.pull_request.html_url}} 105 | GH_TOKEN: ${{secrets.GITHUB_TOKEN}} 106 | -------------------------------------------------------------------------------- /.github/workflows/coverage.yml: -------------------------------------------------------------------------------- 1 | on: 2 | push: 3 | branches: [main] 4 | pull_request: 5 | 6 | name: Code Coverage 7 | 8 | jobs: 9 | coverage: 10 | name: coverage 11 | runs-on: ubuntu-latest 12 | strategy: 13 | matrix: 14 | crate: [cqrs, postgres, mysql] 15 | steps: 16 | - name: checkout source 17 | uses: actions/checkout@v4 18 | 19 | - name: Install nightly toolchain 20 | uses: dtolnay/rust-toolchain@nightly 21 | with: 22 | components: llvm-tools-preview 23 | 24 | - name: Install cargo-llvm-cov 25 | uses: taiki-e/install-action@cargo-llvm-cov 26 | 27 | - name: Run llvm-cov 28 | run: | 29 | if [ "${{ matrix.crate }}" == "cqrs" ]; then 30 | cargo llvm-cov --doctests --lcov --output-path lcov.info 31 | else 32 | cd persistence/${{ matrix.crate }}-es 33 | docker compose up -d 34 | cargo llvm-cov --doctests --lcov --output-path ../../lcov.info 35 | docker compose down 36 | fi 37 | 38 | - name: Upload coverage to Codecov 39 | uses: codecov/codecov-action@v5 40 | with: 41 | token: ${{ secrets.CODECOV_TOKEN }} 42 | files: lcov.info 43 | flags: ${{ matrix.crate }} 44 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .idea 2 | 3 | /target 4 | **/*.rs.bk 5 | 6 | /docs/book/book 7 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | The changelog for all crates in the cqrs-es project are located 2 | [here](https://github.com/serverlesstechnology/cqrs/blob/main/docs/versions/change_log.md). -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [workspace] 2 | members = [ 3 | ".", 4 | "demo", 5 | "persistence/dynamo-es", 6 | "persistence/mysql-es", 7 | "persistence/postgres-es", 8 | ] 9 | 10 | [workspace.package] 11 | version = "0.4.12" 12 | authors = ["Dave Garred "] 13 | edition = "2021" 14 | license = "Apache-2.0" 15 | keywords = ["cqrs", "event-sourcing", "serverless"] 16 | repository = "https://github.com/serverlesstechnology/cqrs" 17 | 18 | [workspace.dependencies] 19 | cqrs-es = { version = "0.4.12", path = "." } 20 | serde = "^1.0.219" 21 | tokio = "^1.45.1" 22 | uuid = { version = "1.17", features = ["v4"] } 23 | 24 | [package] 25 | name = "cqrs-es" 26 | version.workspace = true 27 | authors.workspace = true 28 | edition.workspace = true 29 | license.workspace = true 30 | keywords.workspace = true 31 | description = "A lightweight, opinionated CQRS and event sourcing framework." 32 | documentation = "https://docs.rs/cqrs-es" 33 | repository.workspace = true 34 | readme = "README.md" 35 | exclude = ["docs"] 36 | rust-version = "1.79.0" 37 | 38 | [dependencies] 39 | async-trait = "0.1" 40 | serde = { workspace = true, features = ["derive"] } 41 | serde_json = "1.0" 42 | thiserror = "^2.0.12" 43 | tokio = { workspace = true, features = ["macros", "sync", "rt"] } 44 | 45 | [dev-dependencies] 46 | uuid.workspace = true 47 | chrono = { version = "^0.4.41", default-features = false, features = ["clock"] } 48 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM rust:latest as builder 2 | 3 | WORKDIR /home/build 4 | RUN git clone https://github.com/serverlesstechnology/cqrs.git 5 | WORKDIR /home/build/cqrs 6 | 7 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright 2023 Serverless Technology Consulting, LLC 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # cqrs 2 | 3 | **A lightweight, opinionated CQRS and event sourcing framework targeting serverless architectures.** 4 | 5 | Command Query Responsibility Segregation (CQRS) is a pattern in 6 | [Domain Driven Design](https://martinfowler.com/tags/domain%20driven%20design.html) 7 | that uses separate write and read models for application objects and interconnects them with events. 8 | Event sourcing uses the generated events as the source of truth for the 9 | state of the application. 10 | 11 | Together these provide a number of benefits: 12 | - Removes coupling between tests and application logic allowing limitless refactoring. 13 | - Greater isolation of the [aggregate](https://martinfowler.com/bliki/DDD_Aggregate.html). 14 | - Ability to create views that more accurately model our business environment. 15 | - A horizontally scalable read path. 16 | 17 | Things that could be helpful: 18 | - [User guide](https://doc.rust-cqrs.org) along with an introduction to CQRS and event sourcing. 19 | - [Demo application](./demo/) using the axum http server. 20 | - [Change log](https://github.com/serverlesstechnology/cqrs/blob/main/docs/versions/change_log.md) 21 | 22 | Three backing data stores are supported: 23 | - [PostgreSQL](https://www.postgresql.org/) - [postgres-es](./persistence/postgres-es/) 24 | - [MySQL](https://www.mysql.com/) - [mysql-es](./persistence/mysql-es/) 25 | - [DynamoDb](https://aws.amazon.com/dynamodb/) - [dynamo-es](./persistence/dynamo-es/) 26 | 27 | Other data stores supported supported elsewhere: 28 | - [SQLite](https://www.sqlite.org/) - [sqlite-es](https://crates.io/crates/sqlite-es) 29 | 30 | [![Crates.io](https://img.shields.io/crates/v/cqrs-es)](https://crates.io/crates/cqrs-es) 31 | [![docs](https://img.shields.io/badge/API-docs-blue.svg)](https://docs.rs/cqrs-es) 32 | ![CodeBuild](https://codebuild.us-west-2.amazonaws.com/badges?uuid=eyJlbmNyeXB0ZWREYXRhIjoia3ZYcXozMjVZaFhoTldlUmhHemlWVm9LUjVaTC9LN3dSTFZpMkVTTmRycElkcGhJT3g2TUdtajZyRWZMd01xNktvUkNwLzdZYW15bzJkZldQMjJWZ1dNPSIsIml2UGFyYW1ldGVyU3BlYyI6InFORDNyaFFEQUNFQkE1NlUiLCJtYXRlcmlhbFNldFNlcmlhbCI6MX0%3D&branch=main) 33 | 34 | -------------------------------------------------------------------------------- /bin/githooks: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Configures the .githooks directory locally 3 | # 4 | git config --local core.hooksPath .githooks/ 5 | -------------------------------------------------------------------------------- /buildspec_test.yml: -------------------------------------------------------------------------------- 1 | version: 0.2 2 | 3 | phases: 4 | install: 5 | commands: 6 | - echo "${DOCKERHUB_PASSWORD}" | docker login -u "${DOCKERHUB_USERNAME}" --password-stdin 7 | - nohup /usr/local/bin/dockerd --host=unix:///var/run/docker.sock --host=tcp://127.0.0.1:2375 --storage-driver=overlay2& 8 | - timeout 15 sh -c "until docker info; do echo .; sleep 1; done" 9 | pre_build: 10 | commands: 11 | - docker build -t cqrs . 12 | build: 13 | commands: 14 | - docker images 15 | - docker image inspect cqrs 16 | - docker run cqrs cargo test 17 | -------------------------------------------------------------------------------- /demo/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "cqrs-demo" 3 | version.workspace = true 4 | authors.workspace = true 5 | edition.workspace = true 6 | license.workspace = true 7 | keywords.workspace = true 8 | description = "A demo application for cqrs-es crate." 9 | documentation = "https://docs.rs/cqrs-demo" 10 | repository.workspace = true 11 | readme = "README.md" 12 | publish = false 13 | default-run = "cqrs-demo" 14 | 15 | [dependencies] 16 | cqrs-es = {path = "../", version = "0.4.12" } 17 | postgres-es = { path = "../persistence/postgres-es" } 18 | 19 | async-trait = "0.1" 20 | axum = "0.8" 21 | serde = { workspace = true, features = ["derive"]} 22 | serde_json = "1.0" 23 | sqlx = { version = "0.8.6", features = ["postgres", "runtime-tokio-rustls", "json"] } 24 | chrono = { version = "^0.4.41", default-features = false, features = ["clock"] } 25 | tokio = { workspace = true, features = ["full"] } 26 | 27 | lambda_http = "0.15" 28 | thiserror = "2.0.12" 29 | 30 | [[bin]] 31 | name = "cqrs-demo" 32 | path = "src/main.rs" 33 | 34 | [[bin]] 35 | name = "bootstrap" 36 | path = "src/lambda_main.rs" 37 | -------------------------------------------------------------------------------- /demo/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM public.ecr.aws/lambda/provided:al2 2 | ENV RUST_BACKTRACE=1 3 | COPY target/x86_64-unknown-linux-musl/release/bootstrap ${LAMBDA_RUNTIME_DIR} 4 | CMD [ "cqrs.handler" ] -------------------------------------------------------------------------------- /demo/README.md: -------------------------------------------------------------------------------- 1 | # cqrs-demo 2 | 3 | > A demo application using the [cqrs-es](https://github.com/serverlesstechnology/cqrs) framework 4 | > with a backing postgres repository. 5 | 6 | ## Requirements 7 | - rust 1.53 or greater 8 | - docker & [docker-compose](https://docs.docker.com/compose/) for starting an instance of Postgres 9 | - [postman](https://www.postman.com/) or [curl](curl/test_api.sh) (or your favorite Restful client) 10 | 11 | Alternatively, if a standard Postgres instance is running locally it can be utilized instead of the docker instance, 12 | see [the init script](db/init.sql) for the expected table configuration. 13 | 14 | ## Installation 15 | 16 | Clone this repository 17 | 18 | git clone https://github.com/serverlesstechnology/cqrs 19 | 20 | Enter the project folder and start a docker instance of PostgreSql 21 | 22 | cd cqrs/demo 23 | docker-compose up -d 24 | 25 | Start the application 26 | 27 | cargo run 28 | 29 | Call the API, the easiest way to do this is to import 30 | [the provided postman collection](cqrs-demo.postman_collection.json) 31 | into your Postman client or the `test_api.sh` curl script found in the `curl` directory. 32 | Note that the command calls are configured to return a 204 status with no content, 33 | only the query call will return a `200 OK` response with a body. 34 | For feedback on state you should call a query. 35 | 36 | ### Docs you might want 37 | 38 | - Documentation of these crates as well as an introduction to CQRS [can be found here](https://doc.rust-cqrs.org/). 39 | - [Change log](https://github.com/serverlesstechnology/cqrs/blob/master/docs/versions/change_log.md) for the `cqrs-es` project at large. 40 | 41 | # Serverless cqrs-demo 42 | A serverless demo is also available in this package. 43 | The `bootstrap` binary that is built may be run on AWS Lambda but requires a number of services to do so (e.g., IAM roles, database, etc.). 44 | For simplicity this demo will only be deployed in docker and tested locally, and will use the same database as before. 45 | 46 | ## Additional Requirements 47 | - The x86 MUSL library - get with `rustup target add x86_64-unknown-linux-musl` 48 | - musl-gcc compiler - may be obtained on Ubuntu via `sudo apt install musl-tools` 49 | 50 | ## Building 51 | Build a release version of the `bootstrap` binary targeting x86-MUSL and build this into a docker image using the provided Dockerfile. 52 | ```shell 53 | cargo build --release \ 54 | --target x86_64-unknown-linux-musl \ 55 | --bin bootstrap 56 | docker build -t cqrs-srvrls . 57 | ``` 58 | 59 | Ensure that the Postgres docker image is running, then start a new docker container using the created image. 60 | ```shell 61 | docker run --rm --network=host cqrs-srvrls 62 | ``` 63 | 64 | Use the `test_lambda.sh` script in the `curl` directory to test the lambda container. 65 | The application is designed to be deployed using an AWS API Gateway proxy integration or Lambda Function URL. 66 | These use the v2.0 of the AWS Lamba proxy integration, more information on 67 | [this format is available here](https://docs.aws.amazon.com/apigateway/latest/developerguide/http-api-develop-integrations-lambda.html#http-api-develop-integrations-lambda.proxy-format). 68 | -------------------------------------------------------------------------------- /demo/cqrs-demo.postman_collection.json: -------------------------------------------------------------------------------- 1 | { 2 | "info": { 3 | "_postman_id": "8ee552bb-c88e-4e10-8e9a-a553373e2b6f", 4 | "name": "cqrs-demo", 5 | "schema": "https://schema.getpostman.com/json/collection/v2.1.0/collection.json" 6 | }, 7 | "item": [ 8 | { 9 | "name": "command - OpenAccount", 10 | "event": [ 11 | { 12 | "listen": "prerequest", 13 | "script": { 14 | "exec": [ 15 | "pm.globals.set(\"account_id\", \"ACCT-\" + (Math.random().toString(16)+\"000000000\").substr(2,8));" 16 | ], 17 | "type": "text/javascript" 18 | } 19 | } 20 | ], 21 | "request": { 22 | "method": "POST", 23 | "header": [], 24 | "body": { 25 | "mode": "raw", 26 | "raw": "{\n \"OpenAccount\": {\n \"account_id\": \"{{account_id}}\"\n }\n}", 27 | "options": { 28 | "raw": { 29 | "language": "json" 30 | } 31 | } 32 | }, 33 | "url": { 34 | "raw": "localhost:3030/account/{{account_id}}", 35 | "host": [ 36 | "localhost" 37 | ], 38 | "port": "3030", 39 | "path": [ 40 | "account", 41 | "{{account_id}}" 42 | ] 43 | } 44 | }, 45 | "response": [] 46 | }, 47 | { 48 | "name": "command - DepositMoney", 49 | "request": { 50 | "method": "POST", 51 | "header": [], 52 | "body": { 53 | "mode": "raw", 54 | "raw": "{\n \"DepositMoney\": {\n \"amount\": 1000.0\n }\n}", 55 | "options": { 56 | "raw": { 57 | "language": "json" 58 | } 59 | } 60 | }, 61 | "url": { 62 | "raw": "localhost:3030/account/{{account_id}}", 63 | "host": [ 64 | "localhost" 65 | ], 66 | "port": "3030", 67 | "path": [ 68 | "account", 69 | "{{account_id}}" 70 | ] 71 | } 72 | }, 73 | "response": [] 74 | }, 75 | { 76 | "name": "command - WithdrawMoney", 77 | "request": { 78 | "method": "POST", 79 | "header": [], 80 | "body": { 81 | "mode": "raw", 82 | "raw": "{\n \"WithdrawMoney\": {\n \"atm_id\": \"ATM-N468290\",\n \"amount\": 400.0\n }\n}", 83 | "options": { 84 | "raw": { 85 | "language": "json" 86 | } 87 | } 88 | }, 89 | "url": { 90 | "raw": "localhost:3030/account/{{account_id}}", 91 | "host": [ 92 | "localhost" 93 | ], 94 | "port": "3030", 95 | "path": [ 96 | "account", 97 | "{{account_id}}" 98 | ] 99 | } 100 | }, 101 | "response": [] 102 | }, 103 | { 104 | "name": "command - WriteCheck", 105 | "request": { 106 | "method": "POST", 107 | "header": [], 108 | "body": { 109 | "mode": "raw", 110 | "raw": "{\n \"WriteCheck\": {\n \"check_number\": \"1170\",\n \"amount\": 256.28\n }\n}", 111 | "options": { 112 | "raw": { 113 | "language": "json" 114 | } 115 | } 116 | }, 117 | "url": { 118 | "raw": "localhost:3030/account/{{account_id}}", 119 | "host": [ 120 | "localhost" 121 | ], 122 | "port": "3030", 123 | "path": [ 124 | "account", 125 | "{{account_id}}" 126 | ] 127 | } 128 | }, 129 | "response": [] 130 | }, 131 | { 132 | "name": "query - Account", 133 | "protocolProfileBehavior": { 134 | "disableBodyPruning": true 135 | }, 136 | "request": { 137 | "method": "GET", 138 | "header": [], 139 | "body": { 140 | "mode": "raw", 141 | "raw": "{\n\t\"amount\": 1000.0\n}", 142 | "options": { 143 | "raw": { 144 | "language": "json" 145 | } 146 | } 147 | }, 148 | "url": { 149 | "raw": "localhost:3030/account/{{account_id}}", 150 | "host": [ 151 | "localhost" 152 | ], 153 | "port": "3030", 154 | "path": [ 155 | "account", 156 | "{{account_id}}" 157 | ] 158 | } 159 | }, 160 | "response": [] 161 | } 162 | ] 163 | } -------------------------------------------------------------------------------- /demo/curl/DepositMoney.json: -------------------------------------------------------------------------------- 1 | { 2 | "DepositMoney": { 3 | "amount": 1000.0 4 | } 5 | } 6 | -------------------------------------------------------------------------------- /demo/curl/WithdrawMoney.json: -------------------------------------------------------------------------------- 1 | { 2 | "WithdrawMoney": { 3 | "atm_id": "ATM-N468290", 4 | "amount": 400.0 5 | } 6 | } -------------------------------------------------------------------------------- /demo/curl/WriteCheck.json: -------------------------------------------------------------------------------- 1 | { 2 | "WriteCheck": { 3 | "check_number": "1170", 4 | "amount": 256.28 5 | } 6 | } 7 | -------------------------------------------------------------------------------- /demo/curl/lambda_payload.json: -------------------------------------------------------------------------------- 1 | { 2 | "version": "2.0", 3 | "routeKey": "$default", 4 | "rawPath": "/account/$ACCOUNT", 5 | "headers": { 6 | "Content-Type": "application/json" 7 | }, 8 | "requestContext": { 9 | "http": { 10 | "method": "POST", 11 | "path": "/account/$ACCOUNT", 12 | "protocol": "HTTP/1.1", 13 | "sourceIp": "192.0.2.1", 14 | "userAgent": "agent" 15 | }, 16 | "time": "12/Mar/2020:19:03:58 +0000", 17 | "timeEpoch": 1583348638390 18 | }, 19 | "body": "$PAYLOAD", 20 | "isBase64Encoded": false 21 | } -------------------------------------------------------------------------------- /demo/curl/test_api.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | RANDOM=$$ 4 | TEST_ACCT="test-acct-$RANDOM" 5 | TEST_URL="localhost:3030/account/$TEST_ACCT" 6 | echo "Using test account: $TEST_ACCT" 7 | echo "Opening an account" 8 | curl -i --location --request POST $TEST_URL --header 'Content-Type: application/json' --data-raw "{\"OpenAccount\": {\"account_id\": \"$TEST_ACCT\"}}" 9 | echo "Depositing money" 10 | curl -i --location --request POST $TEST_URL --header 'Content-Type: application/json' --data "@DepositMoney.json" 11 | echo "Withdrawing money" 12 | curl -i --location --request POST $TEST_URL --header 'Content-Type: application/json' --data "@WithdrawMoney.json" 13 | echo "Writing a check" 14 | curl -i --location --request POST $TEST_URL --header 'Content-Type: application/json' --data "@WriteCheck.json" 15 | echo "Checking account status (calling a query)" 16 | curl -i --location $TEST_URL 17 | echo 18 | -------------------------------------------------------------------------------- /demo/curl/test_lambda.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | RANDOM=$$ 4 | TEST_ACCT="test-acct-$RANDOM" 5 | echo "Using test account: $TEST_ACCT" 6 | 7 | TMP_FILE="_TEMPORARY_PAYLOAD_FILE.json" 8 | TEST_URL="http://localhost:8080/2015-03-31/functions/function/invocations" 9 | 10 | trap _remove_tmpfile EXIT 11 | function _remove_tmpfile { 12 | rm $TMP_FILE 13 | } 14 | 15 | function call_lambda() { 16 | local PAYLOAD=$(echo $1 | sed -e "s/\"/\\\\\\\\\"/g") 17 | sed -e "s/\$ACCOUNT/$TEST_ACCT/" -e "s/\$PAYLOAD/$PAYLOAD/" lambda_payload.json > $TMP_FILE 18 | curl -i --location --request POST $TEST_URL --data @$TMP_FILE 19 | echo 20 | echo 21 | } 22 | 23 | echo "Opening an account" 24 | call_lambda "{\"OpenAccount\": {\"account_id\": \"$TEST_ACCT\"}}" 25 | 26 | echo "Depositing money" 27 | call_lambda "{\"DepositMoney\":{\"amount\":1000.0}}" 28 | 29 | echo "Withdrawing money" 30 | call_lambda "{\"WithdrawMoney\":{\"atm_id\":\"ATM-N468290\",\"amount\":400.0}}" 31 | 32 | echo "Writing a check" 33 | call_lambda "{\"WriteCheck\":{\"check_number\":\"1170\",\"amount\":256.28}}" 34 | 35 | echo "Checking account status (calling a query)" 36 | PAYLOAD="" 37 | sed -e "s/\$ACCOUNT/$TEST_ACCT/" -e "s/\$PAYLOAD/$PAYLOAD/" -e "s/\POST/GET/" lambda_payload.json > $TMP_FILE 38 | curl -i --request POST $TEST_URL --data @$TMP_FILE 39 | echo 40 | 41 | -------------------------------------------------------------------------------- /demo/db/init.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE events 2 | ( 3 | aggregate_type text NOT NULL, 4 | aggregate_id text NOT NULL, 5 | sequence bigint CHECK (sequence >= 0) NOT NULL, 6 | event_type text NOT NULL, 7 | event_version text NOT NULL, 8 | payload json NOT NULL, 9 | metadata json NOT NULL, 10 | PRIMARY KEY (aggregate_type, aggregate_id, sequence) 11 | ); 12 | 13 | CREATE TABLE account_query 14 | ( 15 | view_id text NOT NULL, 16 | version bigint CHECK (version >= 0) NOT NULL, 17 | payload json NOT NULL, 18 | PRIMARY KEY (view_id) 19 | ); 20 | 21 | CREATE USER demo_user WITH ENCRYPTED PASSWORD 'demo_pass'; 22 | GRANT ALL PRIVILEGES ON DATABASE postgres TO demo_user; -------------------------------------------------------------------------------- /demo/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3.1' 2 | 3 | services: 4 | cqrs-postgres-db: 5 | image: postgres 6 | restart: always 7 | ports: 8 | - 5432:5432 9 | environment: 10 | POSTGRES_DB: demo 11 | POSTGRES_USER: demo_user 12 | POSTGRES_PASSWORD: demo_pass 13 | volumes: 14 | - './db:/docker-entrypoint-initdb.d' -------------------------------------------------------------------------------- /demo/src/command_extractor.rs: -------------------------------------------------------------------------------- 1 | use crate::domain::commands::BankAccountCommand; 2 | use axum::body::{Body, Bytes}; 3 | use axum::extract::FromRequest; 4 | use axum::http::{Request, StatusCode}; 5 | use axum::response::{IntoResponse, Response}; 6 | use std::collections::HashMap; 7 | 8 | // This is a custom Axum extension that builds metadata from the inbound request 9 | // and parses and deserializes the body as the command payload. 10 | pub struct CommandExtractor(pub HashMap, pub BankAccountCommand); 11 | 12 | const USER_AGENT_HDR: &str = "User-Agent"; 13 | 14 | impl FromRequest for CommandExtractor 15 | where 16 | S: Send + Sync, 17 | { 18 | type Rejection = CommandExtractionError; 19 | 20 | async fn from_request(req: Request, state: &S) -> Result { 21 | // Here we are including the current date/time, the uri that was called and the user-agent 22 | // in a HashMap that we will submit as metadata with the command. 23 | let mut metadata = HashMap::default(); 24 | metadata.insert("time".to_string(), chrono::Utc::now().to_rfc3339()); 25 | metadata.insert("uri".to_string(), req.uri().to_string()); 26 | if let Some(user_agent) = req.headers().get(USER_AGENT_HDR) { 27 | if let Ok(value) = user_agent.to_str() { 28 | metadata.insert(USER_AGENT_HDR.to_string(), value.to_string()); 29 | } 30 | } 31 | 32 | // Parse and deserialize the request body as the command payload. 33 | let body = Bytes::from_request(req, state).await?; 34 | let command: BankAccountCommand = serde_json::from_slice(&body)?; 35 | Ok(Self(metadata, command)) 36 | } 37 | } 38 | 39 | pub struct CommandExtractionError; 40 | 41 | impl IntoResponse for CommandExtractionError { 42 | fn into_response(self) -> Response { 43 | ( 44 | StatusCode::BAD_REQUEST, 45 | "command could not be read".to_string(), 46 | ) 47 | .into_response() 48 | } 49 | } 50 | 51 | impl From for CommandExtractionError { 52 | fn from(_: axum::extract::rejection::BytesRejection) -> Self { 53 | Self 54 | } 55 | } 56 | 57 | impl From for CommandExtractionError { 58 | fn from(_: serde_json::Error) -> Self { 59 | Self 60 | } 61 | } 62 | -------------------------------------------------------------------------------- /demo/src/config.rs: -------------------------------------------------------------------------------- 1 | use std::sync::Arc; 2 | 3 | use cqrs_es::Query; 4 | use postgres_es::{PostgresCqrs, PostgresViewRepository}; 5 | use sqlx::{Pool, Postgres}; 6 | 7 | use crate::domain::aggregate::BankAccount; 8 | use crate::queries::{AccountQuery, BankAccountView, SimpleLoggingQuery}; 9 | use crate::services::{BankAccountServices, HappyPathBankAccountServices}; 10 | 11 | pub fn cqrs_framework( 12 | pool: Pool, 13 | ) -> ( 14 | Arc>, 15 | Arc>, 16 | ) { 17 | // A very simple query that writes each event to stdout. 18 | let simple_query = SimpleLoggingQuery {}; 19 | 20 | // A query that stores the current state of an individual account. 21 | let account_view_repo = Arc::new(PostgresViewRepository::new("account_query", pool.clone())); 22 | let mut account_query = AccountQuery::new(account_view_repo.clone()); 23 | 24 | // Without a query error handler there will be no indication if an 25 | // error occurs (e.g., database connection failure, missing columns or table). 26 | // Consider logging an error or panicking in your own application. 27 | account_query.use_error_handler(Box::new(|e| println!("{e}"))); 28 | 29 | // Create and return an event-sourced `CqrsFramework`. 30 | let queries: Vec>> = 31 | vec![Box::new(simple_query), Box::new(account_query)]; 32 | let services = BankAccountServices::new(Box::new(HappyPathBankAccountServices)); 33 | ( 34 | Arc::new(postgres_es::postgres_cqrs(pool, queries, services)), 35 | account_view_repo, 36 | ) 37 | } 38 | -------------------------------------------------------------------------------- /demo/src/domain/commands.rs: -------------------------------------------------------------------------------- 1 | use serde::{Deserialize, Serialize}; 2 | 3 | #[derive(Debug, Serialize, Deserialize)] 4 | pub enum BankAccountCommand { 5 | OpenAccount { account_id: String }, 6 | DepositMoney { amount: f64 }, 7 | WithdrawMoney { amount: f64, atm_id: String }, 8 | WriteCheck { check_number: String, amount: f64 }, 9 | } 10 | -------------------------------------------------------------------------------- /demo/src/domain/events.rs: -------------------------------------------------------------------------------- 1 | use cqrs_es::DomainEvent; 2 | use serde::{Deserialize, Serialize}; 3 | 4 | #[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] 5 | pub enum BankAccountEvent { 6 | AccountOpened { 7 | account_id: String, 8 | }, 9 | CustomerDepositedMoney { 10 | amount: f64, 11 | balance: f64, 12 | }, 13 | CustomerWithdrewCash { 14 | amount: f64, 15 | balance: f64, 16 | }, 17 | CustomerWroteCheck { 18 | check_number: String, 19 | amount: f64, 20 | balance: f64, 21 | }, 22 | } 23 | 24 | impl DomainEvent for BankAccountEvent { 25 | fn event_type(&self) -> String { 26 | match self { 27 | Self::AccountOpened { .. } => "AccountOpened".to_string(), 28 | Self::CustomerDepositedMoney { .. } => "CustomerDepositedMoney".to_string(), 29 | Self::CustomerWithdrewCash { .. } => "CustomerWithdrewCash".to_string(), 30 | Self::CustomerWroteCheck { .. } => "CustomerWroteCheck".to_string(), 31 | } 32 | } 33 | 34 | fn event_version(&self) -> String { 35 | "1.0".to_string() 36 | } 37 | } 38 | 39 | #[derive(Debug, thiserror::Error)] 40 | #[error("{0}")] 41 | pub struct BankAccountError(String); 42 | 43 | impl From<&str> for BankAccountError { 44 | fn from(msg: &str) -> Self { 45 | Self(msg.to_string()) 46 | } 47 | } 48 | -------------------------------------------------------------------------------- /demo/src/domain/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod aggregate; 2 | pub mod commands; 3 | pub mod events; 4 | -------------------------------------------------------------------------------- /demo/src/lambda_main.rs: -------------------------------------------------------------------------------- 1 | use axum::extract::{Path, State}; 2 | use axum::http::StatusCode; 3 | use axum::response::Response; 4 | use axum::routing::get; 5 | use axum::Router; 6 | use cqrs_demo::command_extractor::CommandExtractor; 7 | use cqrs_demo::route_handler::{command_handler, query_handler}; 8 | use cqrs_demo::state::{new_application_state, ApplicationState}; 9 | use lambda_http::{run, Error}; 10 | 11 | #[tokio::main] 12 | async fn main() -> Result<(), Error> { 13 | let state = new_application_state().await; 14 | let routes = Router::new().route( 15 | "/account/:account_id", 16 | get(lambda_query_handler).post(lambda_command_handler), 17 | ); 18 | let app = Router::new().merge(routes).with_state(state); 19 | run(app).await?; 20 | Ok(()) 21 | } 22 | pub async fn lambda_query_handler( 23 | Path(account_id): Path, 24 | State(state): State, 25 | ) -> Result { 26 | Ok(query_handler(Path(account_id), State(state)).await) 27 | } 28 | async fn lambda_command_handler( 29 | Path(account_id): Path, 30 | State(state): State, 31 | CommandExtractor(metadata, command): CommandExtractor, 32 | ) -> Result { 33 | Ok(command_handler( 34 | Path(account_id), 35 | State(state), 36 | CommandExtractor(metadata, command), 37 | ) 38 | .await) 39 | } 40 | -------------------------------------------------------------------------------- /demo/src/lib.rs: -------------------------------------------------------------------------------- 1 | #![forbid(unsafe_code)] 2 | #![deny(clippy::all)] 3 | 4 | pub mod command_extractor; 5 | mod config; 6 | mod domain; 7 | mod queries; 8 | pub mod route_handler; 9 | mod services; 10 | pub mod state; 11 | -------------------------------------------------------------------------------- /demo/src/main.rs: -------------------------------------------------------------------------------- 1 | use axum::routing::get; 2 | use axum::Router; 3 | use cqrs_demo::route_handler::{command_handler, query_handler}; 4 | use cqrs_demo::state::new_application_state; 5 | use std::net::SocketAddr; 6 | 7 | #[tokio::main] 8 | async fn main() { 9 | let state = new_application_state().await; 10 | // Configure the Axum routes and services. 11 | // For this example a single logical endpoint is used and the HTTP method 12 | // distinguishes whether the call is a command or a query. 13 | let router = Router::new() 14 | .route( 15 | "/account/:account_id", 16 | get(query_handler).post(command_handler), 17 | ) 18 | .with_state(state); 19 | 20 | let addr = SocketAddr::from(([0, 0, 0, 0], 3030)); 21 | 22 | axum::serve(tokio::net::TcpListener::bind(addr).await.unwrap(), router) 23 | .await 24 | .unwrap(); 25 | } 26 | -------------------------------------------------------------------------------- /demo/src/queries.rs: -------------------------------------------------------------------------------- 1 | use async_trait::async_trait; 2 | use cqrs_es::persist::GenericQuery; 3 | use cqrs_es::{EventEnvelope, Query, View}; 4 | use postgres_es::PostgresViewRepository; 5 | use serde::{Deserialize, Serialize}; 6 | 7 | use crate::domain::aggregate::BankAccount; 8 | use crate::domain::events::BankAccountEvent; 9 | 10 | pub struct SimpleLoggingQuery {} 11 | 12 | // Our simplest query, this is great for debugging but absolutely useless in production. 13 | // This query just pretty prints the events as they are processed. 14 | #[async_trait] 15 | impl Query for SimpleLoggingQuery { 16 | async fn dispatch(&self, aggregate_id: &str, events: &[EventEnvelope]) { 17 | for event in events { 18 | let payload = serde_json::to_string_pretty(&event.payload).unwrap(); 19 | println!("{}-{}\n{}", aggregate_id, event.sequence, payload); 20 | } 21 | } 22 | } 23 | 24 | // Our second query, this one will be handled with Postgres `GenericQuery` 25 | // which will serialize and persist our view after it is updated. It also 26 | // provides a `load` method to deserialize the view on request. 27 | pub type AccountQuery = GenericQuery< 28 | PostgresViewRepository, 29 | BankAccountView, 30 | BankAccount, 31 | >; 32 | 33 | // The view for a BankAccount query, for a standard http application this should 34 | // be designed to reflect the response dto that will be returned to a user. 35 | #[derive(Debug, Default, Serialize, Deserialize)] 36 | pub struct BankAccountView { 37 | account_id: Option, 38 | balance: f64, 39 | written_checks: Vec, 40 | ledger: Vec, 41 | } 42 | 43 | #[derive(Debug, Serialize, Deserialize)] 44 | pub struct LedgerEntry { 45 | description: String, 46 | amount: f64, 47 | } 48 | impl LedgerEntry { 49 | fn new(description: String, amount: f64) -> Self { 50 | Self { 51 | description, 52 | amount, 53 | } 54 | } 55 | } 56 | 57 | // This updates the view with events as they are committed. 58 | // The logic should be minimal here, e.g., don't calculate the account balance, 59 | // design the events to carry the balance information instead. 60 | impl View for BankAccountView { 61 | fn update(&mut self, event: &EventEnvelope) { 62 | match &event.payload { 63 | BankAccountEvent::AccountOpened { account_id } => { 64 | self.account_id = Some(account_id.clone()); 65 | } 66 | 67 | BankAccountEvent::CustomerDepositedMoney { amount, balance } => { 68 | self.ledger 69 | .push(LedgerEntry::new("deposit".to_string(), *amount)); 70 | self.balance = *balance; 71 | } 72 | 73 | BankAccountEvent::CustomerWithdrewCash { amount, balance } => { 74 | self.ledger 75 | .push(LedgerEntry::new("atm withdrawal".to_string(), *amount)); 76 | self.balance = *balance; 77 | } 78 | 79 | BankAccountEvent::CustomerWroteCheck { 80 | check_number, 81 | amount, 82 | balance, 83 | } => { 84 | self.ledger 85 | .push(LedgerEntry::new(check_number.to_string(), *amount)); 86 | self.written_checks.push(check_number.clone()); 87 | self.balance = *balance; 88 | } 89 | } 90 | } 91 | } 92 | -------------------------------------------------------------------------------- /demo/src/route_handler.rs: -------------------------------------------------------------------------------- 1 | use crate::command_extractor::CommandExtractor; 2 | use crate::state::ApplicationState; 3 | use axum::extract::{Path, State}; 4 | use axum::http::StatusCode; 5 | use axum::response::{IntoResponse, Response}; 6 | use axum::Json; 7 | use cqrs_es::persist::ViewRepository; 8 | 9 | // Serves as our query endpoint to respond with the materialized `BankAccountView` 10 | // for the requested account. 11 | pub async fn query_handler( 12 | Path(account_id): Path, 13 | State(state): State, 14 | ) -> Response { 15 | match state.account_query.load(&account_id).await { 16 | Ok(Some(account_view)) => (StatusCode::OK, Json(account_view)).into_response(), 17 | Ok(None) => StatusCode::NOT_FOUND.into_response(), 18 | Err(err) => { 19 | println!("Error: {err:#?}\n"); 20 | (StatusCode::INTERNAL_SERVER_ERROR, err.to_string()).into_response() 21 | } 22 | } 23 | } 24 | 25 | // Serves as our command endpoint to make changes in a `BankAccount` aggregate. 26 | pub async fn command_handler( 27 | Path(account_id): Path, 28 | State(state): State, 29 | CommandExtractor(metadata, command): CommandExtractor, 30 | ) -> Response { 31 | match state 32 | .cqrs 33 | .execute_with_metadata(&account_id, command, metadata) 34 | .await 35 | { 36 | Ok(()) => StatusCode::NO_CONTENT.into_response(), 37 | Err(err) => { 38 | println!("Error: {err:#?}\n"); 39 | (StatusCode::BAD_REQUEST, err.to_string()).into_response() 40 | } 41 | } 42 | } 43 | -------------------------------------------------------------------------------- /demo/src/services.rs: -------------------------------------------------------------------------------- 1 | use async_trait::async_trait; 2 | 3 | pub struct BankAccountServices { 4 | pub services: Box, 5 | } 6 | 7 | impl BankAccountServices { 8 | pub fn new(services: Box) -> Self { 9 | Self { services } 10 | } 11 | } 12 | 13 | // External services must be called during the processing of the command. 14 | #[async_trait] 15 | pub trait BankAccountApi: Sync + Send { 16 | async fn atm_withdrawal(&self, atm_id: &str, amount: f64) -> Result<(), AtmError>; 17 | async fn validate_check(&self, account_id: &str, check: &str) -> Result<(), CheckingError>; 18 | } 19 | pub struct AtmError; 20 | pub struct CheckingError; 21 | 22 | // A very simple "happy path" set of services that always succeed. 23 | pub struct HappyPathBankAccountServices; 24 | 25 | #[async_trait] 26 | impl BankAccountApi for HappyPathBankAccountServices { 27 | async fn atm_withdrawal(&self, _atm_id: &str, _amount: f64) -> Result<(), AtmError> { 28 | Ok(()) 29 | } 30 | 31 | async fn validate_check( 32 | &self, 33 | _account_id: &str, 34 | _check_number: &str, 35 | ) -> Result<(), CheckingError> { 36 | Ok(()) 37 | } 38 | } 39 | -------------------------------------------------------------------------------- /demo/src/state.rs: -------------------------------------------------------------------------------- 1 | use crate::config::cqrs_framework; 2 | use crate::domain::aggregate::BankAccount; 3 | use crate::queries::BankAccountView; 4 | use postgres_es::{default_postgress_pool, PostgresCqrs, PostgresViewRepository}; 5 | use std::sync::Arc; 6 | 7 | #[derive(Clone)] 8 | pub struct ApplicationState { 9 | pub cqrs: Arc>, 10 | pub account_query: Arc>, 11 | } 12 | 13 | pub async fn new_application_state() -> ApplicationState { 14 | // Configure the CQRS framework, backed by a Postgres database, along with two queries: 15 | // - a simply-query prints events to stdout as they are published 16 | // - `account_query` stores the current state of the account in a ViewRepository that we can access 17 | // 18 | // The needed database tables are automatically configured with `docker-compose up -d`, 19 | // see init file at `/db/init.sql` for more. 20 | let pool = default_postgress_pool("postgresql://demo_user:demo_pass@localhost:5432/demo").await; 21 | let (cqrs, account_query) = cqrs_framework(pool); 22 | ApplicationState { 23 | cqrs, 24 | account_query, 25 | } 26 | } 27 | -------------------------------------------------------------------------------- /deny.toml: -------------------------------------------------------------------------------- 1 | 2 | # This section is considered when running `cargo deny check advisories` 3 | # More documentation for the advisories section can be found here: 4 | # https://embarkstudios.github.io/cargo-deny/checks/advisories/cfg.html 5 | [advisories] 6 | # The path where the advisory database is cloned/fetched into 7 | db-path = "~/.cargo/advisory-db" 8 | # The url(s) of the advisory databases to use 9 | db-urls = ["https://github.com/rustsec/advisory-db"] 10 | # The lint level for crates that have been yanked from their source registry 11 | yanked = "warn" 12 | 13 | [licenses] 14 | allow = [ 15 | "MIT", 16 | "Unicode-3.0", 17 | "Unicode-DFS-2016", 18 | "Apache-2.0", 19 | ] 20 | confidence-threshold = 1.0 21 | 22 | [bans] 23 | # Lint level for when multiple versions of the same crate are detected 24 | multiple-versions = "warn" 25 | # Lint level for when a crate version requirement is `*` 26 | wildcards = "deny" 27 | # The graph highlighting used when creating dotgraphs for crates 28 | # with multiple versions 29 | # * lowest-version - The path to the lowest versioned duplicate is highlighted 30 | # * simplest-path - The path to the version with the fewest edges is highlighted 31 | # * all - Both lowest-version and simplest-path are used 32 | highlight = "all" 33 | # The default lint level for `default` features for crates that are members of 34 | # the workspace that is being checked. This can be overriden by allowing/denying 35 | # `default` on a crate-by-crate basis if desired. 36 | workspace-default-features = "allow" 37 | # The default lint level for `default` features for external crates that are not 38 | # members of the workspace. This can be overriden by allowing/denying `default` 39 | # on a crate-by-crate basis if desired. 40 | external-default-features = "allow" 41 | 42 | [sources] 43 | # Lint level for what to happen when a crate from a crate registry that is not 44 | # in the allow list is encountered 45 | unknown-registry = "warn" 46 | # Lint level for what to happen when a crate from a git repository that is not 47 | # in the allow list is encountered 48 | unknown-git = "warn" 49 | # List of URLs for allowed crate registries. Defaults to the crates.io index 50 | # if not specified. If it is specified but empty, no registries are allowed. 51 | allow-registry = ["https://github.com/rust-lang/crates.io-index"] 52 | -------------------------------------------------------------------------------- /docs/book/src/SUMMARY.md: -------------------------------------------------------------------------------- 1 | # Summary 2 | 3 | [CQRS and Event Sourcing using Rust](intro.md) 4 | - [The patterns](theory.md) 5 | - [Domain driven design](theory_ddd.md) 6 | - [CQRS](theory_cqrs.md) 7 | - [Making changes to the application state](theory_updates.md) 8 | - [Queries](theory_queries.md) 9 | - [Event Sourcing](theory_event_sourcing.md) 10 | - [Getting started](intro_getting_started.md) 11 | - [Add commands](intro_add_commands.md) 12 | - [Add domain events](intro_add_events.md) 13 | - [Add an error and service](intro_add_error.md) 14 | - [Add an aggregate](intro_add_aggregate.md) 15 | - [Domain tests](test_add_first.md) 16 | - [Adding more complex logic](test_add_more.md) 17 | - [Configuring a (test) application](demo_application.md) 18 | - [An event store](demo_event_store.md) 19 | - [A simple query](demo_simple_query.md) 20 | - [Putting everything together](demo_application_framework.md) 21 | - [Building an application](application_building.md) 22 | - [Persisted event store](application_event_store.md) 23 | - [Queries with persisted views](application_persisted_views.md) 24 | - [Including metadata](application_metadata.md) 25 | - [Event upcasters](advanced_event_upcasters.md) 26 | -------------------------------------------------------------------------------- /docs/book/src/advanced_debugging_state.md: -------------------------------------------------------------------------------- 1 | ## Debugging state errors 2 | 3 | #### Sorry, we're just getting started here, this isn't quite done. 4 | 5 | Have some patience, this section should be completed soon. -------------------------------------------------------------------------------- /docs/book/src/advanced_event_replay.md: -------------------------------------------------------------------------------- 1 | ## Event replay 2 | 3 | #### Sorry, we're just getting started here, this isn't quite done. 4 | 5 | Have some patience, this section should be completed soon. -------------------------------------------------------------------------------- /docs/book/src/advanced_event_upcasters.md: -------------------------------------------------------------------------------- 1 | ## Event upcasters 2 | 3 | Over time the domain model will need to be modified to adapt to new business rules, 4 | and with event sourcing the domain model directly relates to events. 5 | Event changes can be minimized by keeping events small and focused, but they will be needed. 6 | This can be a challenge because domain events are append-only and immutable. 7 | 8 | As an example, if our bank services only local customers there is no need to identify the state as part of their address, 9 | this is understood. The payload for an `UpdateAddress` event might look something like: 10 | ```json 11 | { 12 | "UpdateAddress": { 13 | "address": "912 Spring St", 14 | "city": "Seattle" 15 | } 16 | } 17 | ``` 18 | 19 | If however the bank begins servicing customers in other states we'll need additional information in our payload, e.g., 20 | ```json 21 | { 22 | "UpdateAddress": { 23 | "address": "912 Spring St", 24 | "city": "Seattle", 25 | "state": "WA" 26 | } 27 | } 28 | ``` 29 | 30 | We are event sourced, so we will need to load past events in order to build our aggregate to process new commands. 31 | However, the persisted form of the event no longer matches the new structure. 32 | 33 | The naive solution of versioning events is not preferred due to the duplication of both business logic and tests. 34 | This duplication requires additional maintenance, a risk of logic diverging for the same tasks, and leaves a burden 35 | on the developer of any new code to understand the legacy changes. 36 | 37 | The preferred solution is to use upcasters to convert a legacy event payload to the structure that is expected by the 38 | current aggregate logic. 39 | 40 | ### Event Upcaster 41 | 42 | The `EventUpcaster` trait provides the functionality to make this conversion. 43 | A persistence repository will use any configured upcasters to 'upcast' events as they are loaded. 44 | For each event, the stored `event_type` and `event_version` will be compared to each upcaster to determine if it 45 | should be upcast, and the to upcast it if needed. 46 | ```rust 47 | pub trait EventUpcaster: Send + Sync { 48 | fn can_upcast(&self, event_type: &str, event_version: &str) -> bool; 49 | fn upcast(&self, event: SerializedEvent) -> SerializedEvent; 50 | } 51 | ``` 52 | The `EventUpcaster` trait provides flexibility to modify a serialized event in any way needed including changing the 53 | name and modifying metadata. 54 | In most cases this flexibility is not needed and a `SemanticVersionEventUpcaster` can be used, this implementation 55 | will use the provided function to modify the event payload of any event with a matching `event_name` and with an 56 | `event_version` that is previous to the configured value. 57 | 58 | For the above example we only need to add a field: 59 | ```rust,ignore 60 | let upcaster = SemanticVersionEventUpcaster::new("UpdateAddress", "0.3.0", Box::new(my_upcaster)); 61 | 62 | fn my_upcast_fn(mut payload: Value) -> Value { 63 | match payload.get_mut("UpdateAddress").unwrap() { 64 | Value::Object(object) => { 65 | object.insert("state".to_string(), Value::String("WA".to_string())); 66 | payload 67 | } 68 | _ => panic!("invalid payload encountered"), 69 | } 70 | } 71 | ``` 72 | -------------------------------------------------------------------------------- /docs/book/src/advanced_topics.md: -------------------------------------------------------------------------------- 1 | ## Advanced topics 2 | 3 | Running a CQRS application in production provides benefits and concerns different from that in a standard webapp. 4 | These advanced topics cover these additional considerations. 5 | 6 | One of the primary reasons for using CQRS with event sourcing is to allow your domain model to change over time. 7 | For changes to the structure or payload of events we use 8 | [event upcasters](advanced_event_upcasters.md) that translate a persisted older event 9 | structure into the newer form. 10 | 11 | The logic of queries and/or structure of the underlying views may also change over time. 12 | The approach here is to use an 13 | [event replay](advanced_event_replay.md) against updated logic to rebuild the persisted views. 14 | 15 | Tracking down the origin of state errors can be a notoriously tricky task, particularly if they arise in a 16 | production environment. 17 | Event sourcing allows use to greatly simplify the task of 18 | [debugging state errors](advanced_debugging_state.md) to identify where the problem originates. 19 | -------------------------------------------------------------------------------- /docs/book/src/application_building.md: -------------------------------------------------------------------------------- 1 | ## Building the application 2 | 3 | For a bare minimum operating application we are missing a number of components including: 4 | 5 | - a Restful API or other interface 6 | - non-volatile persistence 7 | - useful queries 8 | 9 | A demo application with examples of all of these features is available in the 10 | [cqrs-demo](https://github.com/serverlesstechnology/cqrs-demo) project. 11 | 12 | The [persist module](https://docs.rs/cqrs-es/0.3.0/cqrs_es/persist/index.html) contains the generic entities 13 | needed for a backing event store. 14 | A database repository handles the implementation specifics with three options currently available: 15 | - [PostgreSQL](https://www.postgresql.org/) - [postgres-es](https://crates.io/crates/postgres-es) 16 | - [MySQL](https://www.mysql.com/) - [mysql-es](https://crates.io/crates/mysql-es) 17 | - [DynamoDb](https://aws.amazon.com/dynamodb/) - [dynamo-es](https://crates.io/crates/dynamo-es) 18 | 19 | These libraries also provide persistence for simple queries. 20 | Note that `postgres-es` is used for examples in this user guide but all the crates have similar methods available. 21 | 22 | For using `postgres-es` for persistence, add to the dependencies section of your `Cargo.toml`: 23 | 24 | ```toml 25 | [dependencies] 26 | cqrs-es = "0.3.0" 27 | postgres-es = "0.3.0" 28 | ``` -------------------------------------------------------------------------------- /docs/book/src/application_event_store.md: -------------------------------------------------------------------------------- 1 | ## Persisted Event Store 2 | 3 | A `PersistedEventStore` is used to back the CqrsFramework and handle the storing and loading of aggregates 4 | (including domain events) in a database. 5 | The `PersistedEventStore` relies on a `PersistedEventRepository` for the actual database access of events and snapshots. 6 | For the `postgres-es` crate this is implemented by a `PostgresEventRepository` which in turn relies on a 7 | database connection pool. 8 | 9 | Creating a `PostgresEventRepository` 10 | ```rust 11 | fn configure_repo() -> PostgresEventRepository { 12 | let connection_string = "postgresql://test_user:test_pass@localhost:5432/test"; 13 | let pool: Pool = default_postgress_pool(connection_string).await; 14 | PostgresEventRepository::new(pool) 15 | } 16 | ``` 17 | 18 | The default repository will expect to find tables named `events` and `snapshots`, but the table names are configurable. 19 | To create these tables in a PostgreSql database (see database initialization files for other repository crates): 20 | ```sql 21 | CREATE TABLE events 22 | ( 23 | aggregate_type text NOT NULL, 24 | aggregate_id text NOT NULL, 25 | sequence bigint CHECK (sequence >= 0) NOT NULL, 26 | event_type text NOT NULL, 27 | event_version text NOT NULL, 28 | payload json NOT NULL, 29 | metadata json NOT NULL, 30 | timestamp timestamp with time zone DEFAULT (CURRENT_TIMESTAMP), 31 | PRIMARY KEY (aggregate_type, aggregate_id, sequence) 32 | ); 33 | 34 | CREATE TABLE snapshots 35 | ( 36 | aggregate_type text NOT NULL, 37 | aggregate_id text NOT NULL, 38 | last_sequence bigint CHECK (last_sequence >= 0) NOT NULL, 39 | current_snapshot bigint CHECK (current_snapshot >= 0) NOT NULL, 40 | payload json NOT NULL, 41 | timestamp timestamp with time zone DEFAULT (CURRENT_TIMESTAMP), 42 | PRIMARY KEY (aggregate_type, aggregate_id, last_sequence) 43 | ); 44 | ``` 45 | Note that the `snapshots` table is not needed for pure event sourcing. 46 | -------------------------------------------------------------------------------- /docs/book/src/application_metadata.md: -------------------------------------------------------------------------------- 1 | ## Including metadata with our commands 2 | 3 | Any useful application will require much more information than what is solely needed to satisfy the domain (business) logic. 4 | This additional data could be needed for debugging, security, an audit trail or a variety of reasons. 5 | Some examples include: 6 | - server name, region or other operational information 7 | - username that authorized the request 8 | - IP address that made the call 9 | - date and time that the command was processed 10 | - a request id for distributed tracing 11 | 12 | A Domain Event is intended to only carry information that is pertinent to the domain logic, 13 | this additional information should be added as metadata when the command is processed. 14 | All events that are produced will be persisted along with a copy of this metadata. 15 | Any configured Queries will also receive the metadata along with the event payload as part of an `EventEnvelope`. 16 | 17 | The `CqrsFramework` expects the metadata in the form of key-value pairs stored in a standard `HashMap`, 18 | this metadata should be passed along with the command at the time of execution. 19 | 20 | ```rust 21 | async fn process_command( 22 | cqrs: PostgresCqrs, 23 | command: BankAccountCommand, 24 | ) -> Result<(), AggregateError> { 25 | let mut metadata = HashMap::new(); 26 | metadata.insert("time".to_string(), chrono::Utc::now().to_rfc3339()); 27 | 28 | cqrs.execute_with_metadata("agg-id-F39A0C", command, metadata).await 29 | } 30 | ``` 31 | -------------------------------------------------------------------------------- /docs/book/src/application_persisted_views.md: -------------------------------------------------------------------------------- 1 | ## Queries with persisted views 2 | 3 | A `ViewRepository` provides a simple database backed repository for views that do not require multiple indexes. 4 | This is designed to back work with a `GenericQuery` to apply events to a view synchronously immediately after those 5 | events are committed. 6 | 7 | A `GenericQuery` will load the view, apply any events, and store the updated version back in the database. 8 | The logic for the update is placed in a `View` implementation. 9 | For our bank account example this might look like: 10 | ```rust 11 | impl View for BankAccountView { 12 | fn update(&mut self, event: &EventEnvelope) { 13 | match &event.payload { 14 | BankAccountEvent::CustomerDepositedMoney { amount, balance } => { 15 | self.ledger.push(LedgerEntry::new("deposit", *amount)); 16 | self.balance = *balance; 17 | } 18 | ... 19 | } 20 | } 21 | } 22 | ``` 23 | 24 | The view repositories use the same database connection as the event repositories, for `postgres-es` this is a database 25 | connection pool. 26 | ```rust 27 | type MyViewRepository = PostgresViewRepository; 28 | 29 | fn configure_view_repository(db_pool: Pool) -> MyViewRepository { 30 | PostgresViewRepository::new("my_view_name", db_pool) 31 | } 32 | ``` 33 | 34 | The database must have a table prepared before use, where the table name should match the value passed while 35 | initiating the `PostgresViewRepository`. 36 | ```sql 37 | CREATE TABLE my_view_name 38 | ( 39 | view_id text NOT NULL, 40 | version bigint CHECK (version >= 0) NOT NULL, 41 | payload json NOT NULL, 42 | PRIMARY KEY (view_id) 43 | ); 44 | ``` 45 | 46 | To use this view repository with a `GenericQuery` it must be configured with the CqrsFramework. 47 | 48 | ```rust 49 | fn configure_cqrs(store: PostgresEventStore, my_view_repo: MyViewRepository) -> CqrsFramework { 50 | let my_query = GenericQuery::::new(my_view_repo); 51 | let my_query = Box::new(my_query); 52 | CqrsFramework::new(store, vec![my_query]); 53 | } 54 | ``` 55 | -------------------------------------------------------------------------------- /docs/book/src/book.toml: -------------------------------------------------------------------------------- 1 | [book] 2 | authors = ["Dave Garred"] 3 | language = "en" 4 | multilingual = false 5 | src = "src" 6 | edition = "2021" 7 | title = "Getting started with CQRS using Rust" 8 | -------------------------------------------------------------------------------- /docs/book/src/demo_application.md: -------------------------------------------------------------------------------- 1 | ## Putting it all together 2 | 3 | Now that we have built and tested the logic for our application we need to find a way to utilize it. 4 | We will start by 5 | building a test application with in-memory persistence in order to understand the fundamentals. 6 | We will need three things for this: 7 | 8 | - an event store to insert and retrieve our events 9 | - a query to read our events once committed 10 | - a framework to wire everything together and process commands 11 | -------------------------------------------------------------------------------- /docs/book/src/demo_application_framework.md: -------------------------------------------------------------------------------- 1 | ## Putting everything together 2 | 3 | The final piece of our test application is a CQRS framework to load up the aggregate, process incoming commands, 4 | persist the events and apply them to our queries. This is provided by a `CqrsFramework` component which takes an 5 | `EventStore` and a vector of boxed `Query`s. 6 | 7 | Wiring this all up and firing two commands: 8 | 9 | ```rust 10 | #[tokio::test] 11 | async fn test_event_store() { 12 | let event_store = MemStore::::default(); 13 | let query = SimpleLoggingQuery {}; 14 | let cqrs = CqrsFramework::new(event_store, vec![Box::new(query)], BankAccountServices); 15 | 16 | let aggregate_id = "aggregate-instance-A"; 17 | 18 | // deposit $1000 19 | cqrs.execute( 20 | aggregate_id, 21 | BankAccountCommand::DepositMoney { amount: 1000_f64 }, 22 | ) 23 | .await 24 | .unwrap(); 25 | 26 | // write a check for $236.15 27 | cqrs.execute( 28 | aggregate_id, 29 | BankAccountCommand::WriteCheck { 30 | check_number: "1337".to_string(), 31 | amount: 236.15, 32 | }, 33 | ) 34 | .await 35 | .unwrap(); 36 | } 37 | ``` 38 | 39 | To run the test we should ensure that rust does not consume our output. 40 | 41 | ``` 42 | cargo test -- --nocapture 43 | ``` 44 | 45 | Which should give us output something like this: 46 | 47 | ``` 48 | running 1 test 49 | loading: 0 events for aggregate ID 'aggregate-instance-A' 50 | storing: 1 new events for aggregate ID 'aggregate-instance-A' 51 | aggregate-instance-A-1 52 | { 53 | "CustomerDepositedMoney": { 54 | "amount": 1000.0, 55 | "balance": 1000.0 56 | } 57 | } 58 | loading: 1 events for aggregate ID 'aggregate-instance-A' 59 | storing: 1 new events for aggregate ID 'aggregate-instance-A' 60 | aggregate-instance-A-2 61 | { 62 | "CustomerWroteCheck": { 63 | "check_number": "1137", 64 | "amount": 236.15, 65 | "balance": 763.85 66 | } 67 | } 68 | ``` 69 | 70 | Here we see the output from our `SimpleLoggingQuery` along with some logging from the `MemStore` which is just what we hoped 71 | for. 72 | 73 | This shows our entire framework working including loading events, rebuilding the aggregate, processing commands and 74 | distributing events to a query. Next, we will move on to actually using this in an application. -------------------------------------------------------------------------------- /docs/book/src/demo_event_store.md: -------------------------------------------------------------------------------- 1 | ## Using an event store 2 | 3 | In an event sourced application the domain events are our source of truth, and to provide persistence we need an event 4 | store. Any persistence mechanism can be used but there are a few things that we need from a production event store: 5 | 6 | - append only 7 | - load all events, in order of commit, for a single aggregate instance 8 | - a guarantee that no events are missing 9 | - optimistic locking on the aggregate instance 10 | - provide additional metadata, outside of the event payload, for auditing or logging use 11 | 12 | To provide our needed guarantees we identify any domain event by the combination of the aggregate type, the 13 | aggregate instance ID and the sequence number. 14 | This allows us to correctly order all events, guarantee that we are not missing any events for an aggregate instance, 15 | and to provide optimistic locking on append. 16 | 17 | To keep all of the context surrounding and event together with the event payload, we use an `EventEnvelope` consisting of: 18 | 19 | - aggregate instance ID 20 | - sequence number 21 | - aggregate type 22 | - payload 23 | - metadata 24 | 25 | ### The `EventStore` trait 26 | 27 | In our application we need an implementation of `EventStore` for appending and loading events. 28 | For our test application we will use `MemStore`, the in-memory event store that ships with the `cqrs-es` crate. 29 | 30 | ```rust 31 | use cqrs_es::mem_store::MemStore; 32 | 33 | let event_store = MemStore::::default(); 34 | ``` 35 | 36 | This implementation will not give us any real persistence but it will allow us to get started with testing our 37 | application. Later we will use another crate to provide a production capable implementation. -------------------------------------------------------------------------------- /docs/book/src/demo_simple_query.md: -------------------------------------------------------------------------------- 1 | ## A simple query 2 | 3 | The command processing portion of a CQRS handles updates to the system but provides no insight into the current 4 | state. For this we will need one or more queries that read the events as they are committed. In the `cqrs-es` crate 5 | these events should implement the `Query` trait. 6 | 7 | For our first query, we will just print the aggregate instance ID, sequence number and the event payload. 8 | 9 | ```rust 10 | struct SimpleLoggingQuery {} 11 | 12 | #[async_trait] 13 | impl Query for SimpleLoggingQuery { 14 | async fn dispatch(&self, aggregate_id: &str, events: &[EventEnvelope]) { 15 | for event in events { 16 | println!("{}-{}\n{:#?}", aggregate_id, event.sequence, &event.payload); 17 | } 18 | } 19 | } 20 | ``` 21 | 22 | Note that the trait's sole method takes a vector of 23 | [`EventEnvelope`](https://docs.rs/cqrs-es/0.3.0/cqrs_es/struct.EventEnvelope.html)s, 24 | a struct that contains the event along with supporting context and 25 | [metadata](application_metadata.md). 26 | This allows queries to have the full context surrounding the event, important since a query may be 27 | interested in a very different set of fields than those of interest within the aggregate. 28 | 29 | E.g., the user's IP address is likely unimportant for the business rules but could be of interest in a query 30 | used for security audits 31 | -------------------------------------------------------------------------------- /docs/book/src/event_upcasters.md: -------------------------------------------------------------------------------- 1 | # Event upcasters 2 | -------------------------------------------------------------------------------- /docs/book/src/images/CQRS.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/serverlesstechnology/cqrs/8480cbebadb1ccea5901752d218955f9fd8f3e6d/docs/book/src/images/CQRS.png -------------------------------------------------------------------------------- /docs/book/src/images/CQRS_flow.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/serverlesstechnology/cqrs/8480cbebadb1ccea5901752d218955f9fd8f3e6d/docs/book/src/images/CQRS_flow.png -------------------------------------------------------------------------------- /docs/book/src/images/bounded_context.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/serverlesstechnology/cqrs/8480cbebadb1ccea5901752d218955f9fd8f3e6d/docs/book/src/images/bounded_context.png -------------------------------------------------------------------------------- /docs/book/src/images/compare_standard_application.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/serverlesstechnology/cqrs/8480cbebadb1ccea5901752d218955f9fd8f3e6d/docs/book/src/images/compare_standard_application.png -------------------------------------------------------------------------------- /docs/book/src/images/event-replay.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/serverlesstechnology/cqrs/8480cbebadb1ccea5901752d218955f9fd8f3e6d/docs/book/src/images/event-replay.png -------------------------------------------------------------------------------- /docs/book/src/intro.md: -------------------------------------------------------------------------------- 1 | # CQRS and Event Sourcing using Rust 2 | 3 | The [`cqrs-es` crate](https://crates.io/crates/cqrs-es) 4 | provides a lightweight framework for building applications utilizing CQRS and event sourcing. 5 | The project targets serverless architectures but can be used in any application seeking to utilize these patterns to 6 | build better software. 7 | 8 | [Chapter 1](theory.html) provides an introduction to CQRS and event sourcing as well as the underlying design strategy 9 | that they support, Domain Driven Design. This chapter is essential reading for anyone not familiar with these topics. 10 | 11 | [Chapter 2](getting_started.html) provides a tutorial for getting started with the framework. This involves building 12 | an aggregate, commands, events and the associated trait implementations for a basic CQRS system. Our demo application 13 | will simulate a simple bank account 14 | 15 | [Chapter 3](add_first_test.html) covers the basics of implementing domain tests. The ability to simply build supple 16 | tests are one of the primary benefits of a CQRS system, and here we explain how to effectively build these. 17 | 18 | [Chapter 4](test_application.html) covers building a simple query processor as well as putting all of these components 19 | together in a proper CQRS framework. We will use a naive, in-memory event store to facilitate this mini-application that 20 | will allow us to explore CQRS with simple unit tests. 21 | 22 | [Chapter 5](building_an_application.html) discusses the additional features needed to work this framework into a real 23 | application. 24 | 25 | [Chapter 6](advanced_topics.html) branches out to some of the more advanced topics permitted by these patterns, 26 | including event replay and upcasters. -------------------------------------------------------------------------------- /docs/book/src/intro_add_aggregate.md: -------------------------------------------------------------------------------- 1 | ## Aggregate 2 | 3 | With the command and event in place we can now start adding our business logic. 4 | In Domain Driven Design all of this logic belongs within the aggregate which 5 | for our example we will call name `BankAccount`. 6 | 7 | And for our simple set of business rules, we will use two fields. 8 | 9 | ```rust,ignore 10 | #[derive(Serialize, Default, Deserialize)] 11 | pub struct BankAccount { 12 | opened: bool, 13 | // this is a floating point for our example, don't do this IRL 14 | balance: f64, 15 | } 16 | ``` 17 | 18 | In order to operate within the `cqrs-es` framework, we will need the traits, `Default`, `Serialize` and `Deserialize` 19 | (all usually derived) and we will implement `cqrs_es::Aggregate`, minus any of the business logic. 20 | 21 | ```rust 22 | impl Aggregate for BankAccount { 23 | type Command = BankAccountCommand; 24 | type Event = BankAccountEvent; 25 | type Error = BankAccountError; 26 | type Services = BankAccountServices; 27 | 28 | // This identifier should be unique to the system. 29 | fn aggregate_type() -> String { 30 | "Account".to_string() 31 | } 32 | 33 | // The aggregate logic goes here. Note that this will be the _bulk_ of a CQRS system 34 | // so expect to use helper functions elsewhere to keep the code clean. 35 | async fn handle(&self, command: Self::Command, services: Self::Services) -> Result, Self::Error> { 36 | todo!() 37 | } 38 | 39 | fn apply(&mut self, event: Self::Event) { 40 | match event { 41 | BankAccountEvent::AccountOpened { .. } => { 42 | self.opened = true 43 | } 44 | 45 | BankAccountEvent::CustomerDepositedMoney { amount: _, balance } => { 46 | self.balance = balance; 47 | } 48 | 49 | BankAccountEvent::CustomerWithdrewCash { amount: _, balance } => { 50 | self.balance = balance; 51 | } 52 | 53 | BankAccountEvent::CustomerWroteCheck { 54 | check_number: _, 55 | amount: _, 56 | balance, 57 | } => { 58 | self.balance = balance; 59 | } 60 | } 61 | 62 | } 63 | } 64 | 65 | ``` 66 | 67 | ### Identifying the aggregate when persisted 68 | 69 | The `aggregate_type` method is used by the cqrs-es framework to uniquely identify the aggregate and event 70 | when serialized for persistence. Each aggregate should use a unique value within your application. 71 | ```rust 72 | fn aggregate_type() -> String { 73 | "Account".to_string() 74 | } 75 | ``` 76 | 77 | ### Handling commands 78 | 79 | The `handle` method of this trait is where _all_ of the business logic will go, for now we will leave that out and just return an empty vector. 80 | 81 | ```rust 82 | // note that the aggregate is immutable and an error can be returned 83 | async fn handle(&self, command: Self::Command) -> Result, AggregateError> { 84 | todo!() 85 | } 86 | ``` 87 | The `handle` method does not allow any mutation of the aggregate, state should be changed _only_ by emitting events. 88 | 89 | ### Applying committed events 90 | 91 | Once events have been committed they will need to be applied to the aggregate in order for it to update its state. 92 | ```rust 93 | // note the aggregate is mutable and there is no return type 94 | fn apply(&mut self, event: Self::Event) { 95 | match event { 96 | BankAccountEvent::AccountOpened { .. } => { 97 | self.opened = true 98 | } 99 | 100 | BankAccountEvent::CustomerDepositedMoney { amount: _, balance } => { 101 | self.balance = balance; 102 | } 103 | 104 | BankAccountEvent::CustomerWithdrewCash { amount: _, balance } => { 105 | self.balance = balance; 106 | } 107 | 108 | BankAccountEvent::CustomerWroteCheck { 109 | check_number: _, 110 | amount: _, 111 | balance, 112 | } => { 113 | self.balance = balance; 114 | } 115 | } 116 | } 117 | ``` 118 | Note that the `apply` function has no return value. The act of applying an event is simply bookkeeping, the action has 119 | already taken place. 120 | 121 | > An event is a historical fact, it can be ignored, but it should never cause an error. -------------------------------------------------------------------------------- /docs/book/src/intro_add_commands.md: -------------------------------------------------------------------------------- 1 | ## Commands 2 | 3 | In order to make changes to our system we will need commands. 4 | These are the simplest components of any CQRS system and consist of little more than packaged data. 5 | 6 | When designing commands an easy mental model to use is that of an HTTP API. 7 | Each virtual endpoint would receive just the data that is needed to operate that function. 8 | 9 | ```rust,ignore 10 | #[derive(Debug, Deserialize)] 11 | pub enum BankAccountCommand { 12 | OpenAccount { account_id: String }, 13 | DepositMoney { amount: f64 }, 14 | WithdrawMoney { amount: f64 }, 15 | WriteCheck { check_number: String, amount: f64 }, 16 | } 17 | ``` 18 | 19 | Note that the `Deserialize` trait is derived. 20 | This is not yet needed, but it will be useful when building out a full application. 21 | The most common way to receive commands from a user is via an HTTP body that can be directly deserialized. -------------------------------------------------------------------------------- /docs/book/src/intro_add_error.md: -------------------------------------------------------------------------------- 1 | ## Custom error and service 2 | 3 | Aside from our domain data objects, we'll need two additional components to complete an aggregate. 4 | An error to indicate a violation of the business rules, and a set of services that will be made available during command processing. 5 | 6 | ### User error 7 | 8 | The `Aggregate` trait can return an error from its `handle` method indicating that some rule of the business logic was violated, 9 | this information will usually be returned to the user as an error message. 10 | For example, an attempt to withdraw more money from a bank account than the current balance would return this error 11 | and the user would be informed that the balance was not sufficient for this transaction. 12 | 13 | ```rust 14 | #[derive(Debug)] 15 | pub struct BankAccountError(String); 16 | ``` 17 | 18 | This error should implement `Display` and `Error` as well. 19 | Additionally, implementing the `From<&str>` trait will simplify the business logic that we'll be writing in the 20 | next sections. 21 | 22 | ```rust 23 | impl Display for BankAccountError { 24 | fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { 25 | write!(f,"{}",self.0) 26 | } 27 | } 28 | 29 | impl std::error::Error for BankAccountError {} 30 | 31 | impl From<&str> for BankAccountError { 32 | fn from(message: &str) -> Self { 33 | BankAccountError(message.to_string()) 34 | } 35 | } 36 | ``` 37 | 38 | ### External services 39 | 40 | Business logic doesn't exist in a vacuum and external services may be needed for a variety of reasons. 41 | We don't have much logic built yet, so this will initially just be a placeholder. 42 | Let's add a couple of calls that will, for now, always return successfully. 43 | 44 | ```rust 45 | pub struct BankAccountServices; 46 | 47 | impl BankAccountServices { 48 | async fn atm_withdrawal(&self, atm_id: &str, amount: f64) -> Result<(), AtmError> { 49 | Ok(()) 50 | } 51 | 52 | async fn validate_check(&self, account: &str, check: &str) -> Result<(), CheckingError> { 53 | Ok(()) 54 | } 55 | } 56 | pub struct AtmError; 57 | pub struct CheckingError; 58 | ``` 59 | -------------------------------------------------------------------------------- /docs/book/src/intro_add_events.md: -------------------------------------------------------------------------------- 1 | ## Domain Events 2 | 3 | Next we will need to create some domain events. Note that we qualify events with 'domain' to differentiate them from 4 | other events that might exist within our application. These are domain events because they make assertions about 5 | changes in the aggregate state. 6 | 7 | In the `cqrs-es` framework the domain events are expected to be an enum with payloads similar to the commands, 8 | this will give us a single root event for each aggregate. 9 | 10 | The enum as well as the payloads should derive several traits. 11 | 12 | - `Debug` - used for error handling and testing. 13 | - `Clone` - the event may be passed to a number of downstream queries in an asynchronous manner and will need to be cloned. 14 | - `Serialize, Deserialize` - serialization is essential for both storage and publishing to distributed queries. 15 | - `PartialEq` - we will be adding a lot of tests to verify that our business logic is correct. 16 | 17 | ### Adding events and payloads 18 | 19 | Let's add four self-descriptive events as part of a single enum. 20 | 21 | ```rust 22 | #[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] 23 | pub enum BankAccountEvent { 24 | AccountOpened { 25 | account_id: String, 26 | }, 27 | CustomerDepositedMoney { 28 | amount: f64, 29 | balance: f64, 30 | }, 31 | CustomerWithdrewCash { 32 | amount: f64, 33 | balance: f64, 34 | }, 35 | CustomerWroteCheck { 36 | check_number: String, 37 | amount: f64, 38 | balance: f64, 39 | }, 40 | } 41 | ``` 42 | 43 | Again, all of our events are named in the past tense, 44 | [this is important](https://martinfowler.com/bliki/UbiquitousLanguage.html). 45 | 46 | Our events now need to implement `cqrs_es::DomainEvent` to provide an `event_name` and `event_version` 47 | for each event. 48 | This will be important later in any production system when events need to be changed 49 | (see [event upcasters](event_upcasters.md)). 50 | 51 | ```rust 52 | impl DomainEvent for BankAccountEvent { 53 | fn event_type(&self) -> String { 54 | let event_type: &str = match self { 55 | BankAccountEvent::AccountOpened { .. } => "AccountOpened", 56 | BankAccountEvent::CustomerDepositedMoney { .. } => "CustomerDepositedMoney", 57 | BankAccountEvent::CustomerWithdrewCash { .. } => "CustomerWithdrewCash", 58 | BankAccountEvent::CustomerWroteCheck { .. } => "CustomerWroteCheck", 59 | }; 60 | event_type.to_string() 61 | } 62 | 63 | fn event_version(&self) -> String { 64 | "1.0".to_string() 65 | } 66 | } 67 | ``` -------------------------------------------------------------------------------- /docs/book/src/intro_getting_started.md: -------------------------------------------------------------------------------- 1 | ## Getting started 2 | 3 | For this tutorial we will build an application to manage the logic of a bank account. 4 | As a simple set of business rules, we want to: 5 | 6 | - accept deposits 7 | - provide withdrawals 8 | - allow our customers to write checks 9 | - disallow customers from overdrawing their account 10 | 11 | ### Project setup 12 | 13 | Okay let's get some code going. First, up we need a workspace. 14 | You know the drill, find your favorite playspace on your hard 15 | drive and start a new Rust bin project. 16 | 17 | cargo new --bin mybank 18 | 19 | There is a lot that happens behind the scenes in a CQRS/event sourcing application, so we'll be using the 20 | [`cqrs-es`](https://docs.rs/cqrs-es) framework to get us off the ground. 21 | Add these dependencies in your cargo.toml: 22 | 23 | ```toml 24 | [dependencies] 25 | cqrs-es = "0.4.8" 26 | async-trait = "0.1" 27 | serde = { version = "1.0", features = ["derive"]} 28 | tokio = { version = "1", features = ["full"] } 29 | ``` 30 | > All of the examples included here are simplified from the 31 | > [cqrs-demo](https://github.com/serverlesstechnology/cqrs-demo) project. 32 | > More detailed examples can be found by exploring that package. 33 | 34 | 35 | -------------------------------------------------------------------------------- /docs/book/src/test_add_first.md: -------------------------------------------------------------------------------- 1 | ## Adding aggregate tests 2 | 3 | Now that we have the basic components in place we can begin setting up our aggregate tests. These are the tests that 4 | we will use to verify the business logic for our application. Testing is one of the most valuable aspects of CQRS/event 5 | sourcing as it allows us to configure tests that have no coupling with our application logic. 6 | 7 | We can do this because we rely only on events for past state, so no amount of refactoring of our application logic will 8 | affect the whether a test passes or fails (as long as the result of the command is the same). 9 | These tests follow a pattern that you are likely familiar with: 10 | 11 | - Given some past events 12 | - When a command is applied 13 | - Then some result is expected 14 | 15 | Let's first add a test module and define a new `AccountTestFramework` convenience type for our test framework. 16 | 17 | ```rust 18 | #[cfg(test)] 19 | mod aggregate_tests { 20 | use super::*; 21 | use cqrs_es::test::TestFramework; 22 | 23 | type AccountTestFramework = TestFramework; 24 | } 25 | ``` 26 | 27 | ### A first aggregate test 28 | 29 | Now within our `aggregate_tests` module we will add our first test. 30 | We do not require any previous events so we can initiate our test with the `given_no_previous_events` method. 31 | Let's fire a `DepositMoney` command and expect to a `CustomerDepositedMoney` event. 32 | 33 | ```rust 34 | #[test] 35 | fn test_deposit_money() { 36 | let expected = BankAccountEvent::CustomerDepositedMoney { amount: 200.0, balance: 200.0 }; 37 | 38 | AccountTestFramework::default() 39 | .given_no_previous_events() 40 | .when(DepositMoney{ amount: 200.0 }) 41 | .then_expect_events(vec![expected]); 42 | } 43 | ``` 44 | 45 | Now if we run this test, we should see a test failure with output that looks something like this: 46 | 47 | ``` 48 | thread 'aggregate_tests::test' panicked at 'assertion failed: `(left == right)` 49 | left: `[]`, 50 | right: `[CustomerDepositedMoney{ amount: 200.0, balance: 200.0 }]`', <::std::macros::panic ... 51 | ``` 52 | We have not added any logic yet, so this is what we should see. 53 | We have told the test to expect a `CustomerDepositedMoney` event, but none has been produced. 54 | 55 | ### Adding business logic 56 | 57 | Let's go back to our `Command` implementation for `DepositMoney` and fix this. 58 | 59 | ```rust 60 | async fn handle(&self, command: Self::Command) -> Result, AggregateError> { 61 | match command { 62 | BankAccountCommand::DepositMoney { amount } => { 63 | let balance = self.balance + amount; 64 | Ok(vec![BankAccountEvent::CustomerDepositedMoney { 65 | amount, 66 | balance, 67 | }]) 68 | } 69 | _ => Ok(vec![]) 70 | } 71 | } 72 | 73 | ``` 74 | 75 | And running our first test again - success! 76 | 77 | ### Dealing with previous events 78 | 79 | Now we should verify that our logic is valid if there is a previous balance. For this, we will use the `given` method to 80 | initiate the test, along with a vector containing a sole previous event: 81 | 82 | ```rust 83 | #[test] 84 | fn test_deposit_money_with_balance() { 85 | let previous = BankAccountEvent::CustomerDepositedMoney { amount: 200.0, balance: 200.0 }; 86 | let expected = BankAccountEvent::CustomerDepositedMoney { amount: 200.0, balance: 400.0 }; 87 | 88 | AccountTestFramework::default() 89 | .given(vec![previous]) 90 | .when(DepositMoney{ amount: 200.0 }) 91 | .then_expect_events(vec![expected]); 92 | } 93 | ``` 94 | 95 | These exercises feel a little-brain dead, but they provide a good example of how these tests are structured. 96 | Next we will start adding some real logic. -------------------------------------------------------------------------------- /docs/book/src/test_add_more.md: -------------------------------------------------------------------------------- 1 | ## Adding more logic 2 | 3 | In our simple example a customer can always deposit money, but making a cash withdrawal is another thing. We should ensure that 4 | the customer has the requested funds available before releasing them, lest the account overdraw. 5 | 6 | When discussing events, we noted that the process of applying events cannot produce an error since it is a past 7 | event. Instead, errors should be produced before the event is generated, during the processing of the command. 8 | 9 | ### Account withdrawal - happy path 10 | 11 | First, let's add a test for a happy path withdrawal, again with a previous deposit using the `given` 12 | initial method: 13 | 14 | ```rust 15 | #[test] 16 | fn test_withdraw_money() { 17 | let previous = BankAccountEvent::CustomerDepositedMoney { amount: 200.0, balance: 200.0 }; 18 | let expected = BankAccountEvent::CustomerWithdrewCash { amount: 100.0, balance: 100.0 }; 19 | 20 | AccountTestFramework::default() 21 | .given(vec![previous]) 22 | .when(WithdrawMoney{ amount: 100.0 }) 23 | .then_expect_events(vec![expected]); 24 | } 25 | ``` 26 | 27 | Since we have not added any withdrawal logic yet this should fail. 28 | Let's correct this with some naive logic to produce the event: 29 | 30 | ```rust 31 | async fn handle(&self, command: Self::Command) -> Result, AggregateError> { 32 | match command { 33 | BankAccountCommand::WithdrawMoney { amount } => { 34 | let balance = self.balance - amount; 35 | Ok(vec![BankAccountEvent::CustomerWithdrewCash { 36 | amount, 37 | balance, 38 | }]) 39 | } 40 | ... 41 | } 42 | } 43 | ``` 44 | 45 | ### Verify funds are available 46 | 47 | Now we have success with our happy path test, but then there is nothing to stop a customer from withdrawing more than 48 | is deposited. 49 | Let's add a test case using the `then_expect_error` expect case: 50 | 51 | ```rust 52 | #[test] 53 | fn test_withdraw_money_funds_not_available() { 54 | let error = AccountTestFramework::default() 55 | .given_no_previous_events() 56 | .when(BankAccountCommand::WithdrawMoney { amount: 200.0 }) 57 | .then_expect_error(); 58 | assert_eq!("funds not available", &error.0) 59 | } 60 | ``` 61 | 62 | We should see our new test fail since our naive logic cannot handle this yet. 63 | Now we update our command logic to return an error when this situation arises: 64 | 65 | ```rust 66 | async fn handle(&self, command: Self::Command) -> Result, AggregateError> { 67 | match command { 68 | BankAccountCommand::WithdrawMoney { amount } => { 69 | let balance = self.balance - amount; 70 | if balance < 0_f64 { 71 | return Err(BankAccountError::from("funds not available")); 72 | } 73 | Ok(vec![BankAccountEvent::CustomerWithdrewCash { 74 | amount, 75 | balance, 76 | }]) 77 | } 78 | ... 79 | } 80 | } 81 | ``` 82 | 83 | And we should now see our test pass. 84 | 85 | Note that handling a command is always an atomic process, either all produced events become a part of the factual 86 | history of this aggregate instance, or an error is returned. 87 | 88 | -------------------------------------------------------------------------------- /docs/book/src/theory.md: -------------------------------------------------------------------------------- 1 | ## The patterns 2 | 3 | Command-Query Responsibility Segregation (CQRS) and event sourcing are patterns that enable many of the concepts behind 4 | Domain Driven Design. 5 | All of these tools are designed to provide a great deal of flexibility for applications that have complex or rapidly 6 | changing business rules. 7 | 8 | By separating the business rules from the technical aspects of an application we remove many 9 | of the inherent barriers to software changes that exist in standard applications. 10 | Any application with complex or rapidly change rules might be a good candidate for using CQRS and event sourcing. 11 | 12 | ### A note on terminology 13 | 14 | Though CQRS and event sourcing can be used for a range of software problems they are primarily applied to build 15 | business applications since those so often require that quality. 16 | 17 | I'll use the term "business rules" whenever I'm specifically discussing these complex or changing rulesets. 18 | If your application is not a business application, just replace "business rules" with something more appropriate to your 19 | domain. -------------------------------------------------------------------------------- /docs/book/src/theory_cqrs.md: -------------------------------------------------------------------------------- 1 | ### CQRS 2 | 3 | Command Query Responsibility Segregation (CQRS) splits with standard software development by using separate 4 | write and read models an application object. This allows us to create views, or queries, that more accurately model 5 | our business environment. 6 | 7 | > We define "standard application" here as a generic webapp commonly used to provide backend services in business applications. 8 | 9 | In standard applications, the domain modeling tends to reflect how the application objects are stored in an RDBMS. 10 | Updates generally involve loading the object(s) from a set of tables via a JOIN operation, making the requisite 11 | modifications, and persisting the updated object(s). 12 | 13 | Read operations are usually accomplished via the same load operation or from a view based on the same database tables. 14 | This requires us to make a compromise between our read and write models. 15 | 16 | > One of the primary drivers in using CQRS is to remove the compromises between read and write models. 17 | 18 | ### CQRS basics 19 | 20 | When using CQRS we divide our application into two parts: command processors and queries. Users interact to modify the 21 | system by sending commands but receive no information back. When a user interacts with the query side of our 22 | application they receive information but there is a guarantee that no changes will be made. 23 | 24 | ![CQRS](images/CQRS.png) 25 | 26 | A number of strategies have been devised for decoupling the command and query side of applications. The most 27 | successful among them is communication across this divide via events. This approach leverages the benefits of a reactive 28 | architecture as well as providing an opportunity to use event sourcing. 29 | -------------------------------------------------------------------------------- /docs/book/src/theory_ddd.md: -------------------------------------------------------------------------------- 1 | ### Domain Driven Design 2 | 3 | The intellectual backing for both of these patterns is provided by Domain Driven Design (DDD), which is an approach to 4 | building software that focuses on building a better business model. The field of DDD is expansive, but one of its core 5 | concepts is the Aggregate. 6 | 7 | ### Some terminology 8 | 9 | But first, in DDD all objects are broken down into two groups: 10 | 11 | - entities - objects with a unique identity, and usually with state (e.g., a person). 12 | - value objects - immutable objects that are constructed when needed and discarded when no longer valuable (e.g., a 13 | person's age). 14 | 15 | We use a combination of these components to form an 'aggregate'. An aggregate is a collection of related entities and value objects. 16 | An aggregate always has a root entity that forms the basis of this grouping. 17 | 18 | For example, a `CreditCard` aggregate might be composed of: 19 | 20 | - the credit card account entity itself - _entity_ 21 | - a list of charges - _value objects_ 22 | - a payment history - _value objects_ 23 | 24 | ### An example bounded context 25 | 26 | The above topics are encapsulated within a [bounded context](https://martinfowler.com/bliki/BoundedContext.html), 27 | an important topic, but outside the scope of this book. An example of a bounded context with multiple aggregates is 28 | below. 29 | 30 | ![Bounded context example](images/bounded_context.png) 31 | 32 | 33 | ### More restrictions 34 | 35 | Domain Driven Design requires that Aggregates are only modified as a whole. This means we cannot 36 | operate on any individual component, such as modifying the balance of a bank account, without loading the entire aggregate. 37 | Think of this as using strong encapsulation in an object-oriented pattern. 38 | 39 | This stipulation allows robust, simple testing of the business rules since they are all in one location. 40 | Instead of integration tests spanning multiple application layers we can use simple unit tests to guarantee we meet all 41 | of our business requirements. 42 | 43 | ![comparing ddd with standard applications](images/compare_standard_application.png) 44 | 45 | The above example compares a standard application to that of an application using domain driven design. 46 | The former uses business logic spread through a number of services that access and modify state data in unknown ways. 47 | In a DDD application all changes to the state data are only made through the aggregate resulting in a highly testable 48 | set of business rules. 49 | 50 | In the diagram a CQRS framework is implied, but any DDD framework should promote the same level of flexibility. 51 | 52 | ### Further reading 53 | Domain Driven Design is a topic far too large to discuss in any detail here. Some additional resources: 54 | - [Ubiquitous language](https://martinfowler.com/bliki/UbiquitousLanguage.html) 55 | - [The Blue Book](https://www.amazon.com/Domain-Driven-Design-Tackling-Complexity-Software-ebook/dp/B00794TAUG) - the 56 | original text on DDD by Eric Evans 57 | - [The Red Book](https://www.amazon.com/Implementing-Domain-Driven-Design-Vaughn-Vernon-ebook/dp/B00BCLEBN8) - a book 58 | refined from years of experience with DDD -------------------------------------------------------------------------------- /docs/book/src/theory_event_sourcing.md: -------------------------------------------------------------------------------- 1 | ### Event Sourcing 2 | 3 | Event sourcing adds to the flexibility of CQRS by relying upon the events as our source of truth. Events are now 4 | historical facts that we can use to calculate the current state in ways that we did not originally intend. 5 | In this way, event sourcing acts similar to a financial ledger. 6 | 7 | | **_Event Name_** | **_Event Value_** | **_Account Balance_** | 8 | |:---------------------|:------------------------------------------|----------------------:| 9 | | _account-opened_ | Account XYZ opened by user John Doe | $0.00 | 10 | | _money-deposited_ | John Doe deposited $500 | $500.00 | 11 | | _check-cleared_ | Check #1127 cleared for $27.15 | $472.85 | 12 | | _cash-withdrawn_ | $100 cash withdrawn from ATM #243 | $372.85 | 13 | 14 | In this example we have four events that have been stored. Consider that a new business requirement is added to track 15 | money outflow from accounts. 16 | There is no way to do this immediately if the current state is all that is persisted (i.e., the current account balance). 17 | 18 | However, because our source of truth is these events rather than a simple balance, it is easy to add the business 19 | logic after the fact and calculate what we need from the persisted events. 20 | 21 | | **_Event Name_** | **_Event Value_** | **_Account Balance_** | **_Outflow_** | 22 | |:---------------------|:------------------------------------------|----------------------:|--------------:| 23 | | _account-opened_ | Account XYZ opened by user John Doe | $0.00 | $0.00 | 24 | | _money-deposited_ | John Doe deposited $500 | $500.00 | $0.00 | 25 | | _check-cleared_ | Check #1127 cleared for $27.15 | $472.85 | $27.15 | 26 | | _cash-withdrawn_ | $100 cash withdrawn from ATM #243 | $372.85 | $127.15 | 27 | 28 | In this way, we have used the persisted events to provide new business information that otherwise would not have been 29 | available. In essence we have turned back time and we can now use new logic to compute states that would 30 | otherwise be impossible to know. 31 | -------------------------------------------------------------------------------- /docs/book/src/theory_queries.md: -------------------------------------------------------------------------------- 1 | ### Queries 2 | 3 | Once events are published they can be consumed by our queries (a.k.a., views). As queries consume events they modify 4 | their own state and produce something similar to the views that we have in standard applications. Their real 5 | flexibility is derived from the fact that these queries are not tied in any way to our write model. 6 | 7 | In the previous example we produced events that we could imagine to be of interest to several queries. Certainly, we 8 | would have a `customer-information` query that would need to be updated, but then we might have additional queries 9 | such as an `all-customer-contacts` query that would also respond to the same event. 10 | 11 | Additionally, other downstream services may respond to these events similarly to how they would in any other 12 | messaging-based application. Using the same example we might have a service that sends a verification email to the 13 | new address after an `email-updated` event is fired. 14 | 15 | ![CQRS](../../images/CQRS_flow.png) 16 | 17 | An example of how a single command might move through a CQRS system. 18 | -------------------------------------------------------------------------------- /docs/book/src/theory_updates.md: -------------------------------------------------------------------------------- 1 | ### Updates 2 | When creating updates under the pattern of CQRS, the focus is solely on what the changes are, and not on what these changes 3 | mean to any views or queries. This involves requesting the changes via a **_command_** and reflecting the actual 4 | changes in one or more **_events_**. 5 | 6 | To do this, we use a DDD concept called an **_aggregate_**. Roughly, an aggregate combines the state of an object that 7 | is being acted upon, along with all the business rules of our application. This approach strives to remove any 8 | technical complexity near the business rules where applications are most sensitive to the errors that degrade their 9 | agility. 10 | 11 | The aggregate’s job is to consider the command in the context of its current state and determine what business facts 12 | need to be recorded. In effect, a command is a request that is subject to security, validation, application state and 13 | business rules. The aggregate is the arbiter of these rules. 14 | 15 | For instance, if we send an `update-email` command, we would expect an `email-updated` event to be produced by the 16 | `customer` aggregate. 17 | 18 | > Note the difference in naming between a command and an event. A command is a request in the imperative whereas an event 19 | > is a statement of fact in the past tense. 20 | 21 | A single event per command is the most common situation, but this can change based on a number of factors. In the 22 | event an email address is configured as the customer’s primary contact, we could see a `primary-contact-updated` 23 | event as well. 24 | 25 | Using the same example: If the provided email address is identical to the old email address, then we may not have any 26 | events at all since there is no change to be reflected. Other situations with no resultant events could be seen for a 27 | variety of other reasons, such as if the new email address is invalid or if the user requesting the update is not 28 | authorized to do so. 29 | -------------------------------------------------------------------------------- /docs/ladr/01-postgres-is-primary-datastore.md: -------------------------------------------------------------------------------- 1 | # Postgres is the primary datastore 2 | 3 | * Status: accepted 4 | * Date: 2019-11-07 5 | 6 | ## Context 7 | 8 | A backing datastore is needed to store events, snapshots and query views. 9 | Though additional datastores may be supported in the future, the primary datastore must support all initial development 10 | requirements. As the reference datastore its eccentricities may also drive implementation decisions for new features. 11 | 12 | ## Decision Drivers 13 | 14 | - Must be durable storage. 15 | - Needs support for optimistic locking that is transactional across multiple tables. 16 | - Will store serialized data of arbitrary sizes, generally in the low KB range but up to multiple MB for large views. 17 | - Suitable for serverless environments. 18 | - Ideally available both as a platform and as a service to meet varying ops requirements. 19 | 20 | ## Considered Options 21 | 22 | - PostgresSql 23 | - MySql 24 | - DynamoDb 25 | 26 | ## Decision Outcome 27 | 28 | Postgres: 29 | - Robust, ACID database with a large user base. 30 | - Available as a serverless component via Amazon Aurora (along with MySql). -------------------------------------------------------------------------------- /docs/ladr/02-use-async-rust.md: -------------------------------------------------------------------------------- 1 | # Use async Rust 2 | 3 | * Status: accepted 4 | * Date: 2021-06-02 5 | 6 | ## Context 7 | 8 | As part of 9 | [release 1.39.0](https://blog.rust-lang.org/2019/11/07/Rust-1.39.0.html) 10 | Rust began support for async-await. 11 | Since then a significant number of libraries have moved to async making their use difficult from within the `cqrs-es` 12 | library 13 | ([red/blue functions](https://journal.stuffwithstuff.com/2015/02/01/what-color-is-your-function/)). 14 | 15 | ## Decision Drivers 16 | 17 | - A significant number of libraries have moved to use async. 18 | - Most open source server frameworks now use async. 19 | 20 | ## Considered options 21 | 22 | - Move the library to async-await. 23 | - Greatly simplifies the use of libraries built on async, in particular those using http clients. 24 | - Better suited for use in web applications. 25 | - Continue using standard Rust. 26 | - Using async requires a runtime which could increase application start time. This both adds call latency and 27 | increases the risk of serverless cold start timeouts. 28 | - Core Rust features such as traits do not yet support async. 29 | 30 | ## Decision outcome 31 | 32 | Use async with the Tokio runtime: 33 | - The additional work to use asynchronous libraries without async is significant and more than can be justified. 34 | - Many database drivers (core dependencies of the persistence packages) now are async. 35 | - Absence of async severely limits the HTTP servers that can be used for standalone applications. 36 | 37 | -------------------------------------------------------------------------------- /docs/tenets.md: -------------------------------------------------------------------------------- 1 | # Our Tenets 2 | 3 | These tenets are shared goals that we keep in mind when building this family of projects. 4 | Together they provide a basis for making design decisions when multiple solutions present themselves. 5 | 6 | ### Target serverless but ensure support for standalone applications. 7 | > Serverless solutions are the primary use case for this project, but in this pursuit we should sacrifice neither 8 | > features nor performance when used in standalone applications. 9 | 10 | ### Be lightweight - prefer distributed over monolithic solutions. 11 | > Our target architectures are serverless functions or microservices: lightweight, likely distributed and decoupled. 12 | > To fit these needs we strive for reduced startup and processing latency, a smaller memory footprint and minimal 13 | > complexity. We deprioritize the more full-featured but cumbersome approach that is characteristic of many 14 | > CQRS frameworks. 15 | 16 | ### Avoid unsafe code. 17 | > Rust provides an exceedingly fast and safe environment which we should not take for granted. 18 | > Unsafe code must be heavily tested and meticulously maintained to ensure the Rust contract of safety is not broken, 19 | > that being the case unsafe code should be avoided wherever possible. 20 | 21 | ### Enforce Domain Driven Design principals. 22 | > This package is focused on DDD solutions using CQRS and event sourcing rather than a general purpose event processing 23 | library. We should therefore enforce the guidelines and best practices around those topics in an opinionated way. 24 | -------------------------------------------------------------------------------- /docs/versions/change_log.md: -------------------------------------------------------------------------------- 1 | # Change log 2 | 3 | #### `v0.4.11` 4 | - Commits with no produced events will not invoke downstream queries. 5 | - Updates per Clippy due to Rust compiler changes. 6 | - Rust minimum version changed to 1.71.0 7 | 8 | #### `v0.4.10` 9 | - Updated dependencies. 10 | - Rust minimum version changed to 1.63.0 11 | 12 | #### `v0.4.9` 13 | - Added the `when_async` and `and` methods to the test executor to support `async` test frameworks and Gherkin testing. 14 | 15 | #### `v0.4.8` 16 | - ***The primary branch is now set to `main`.*** 17 | - Makes the runtime and TLS library configurable for the SQL based persistence crates. 18 | - [Postgres](https://github.com/serverlesstechnology/postgres-es#runtime-and-tls-configuration) 19 | - [Mysql](https://github.com/serverlesstechnology/mysql-es#runtime-and-tls-configuration) 20 | 21 | #### `v0.4.6` and `v0.4.7` 22 | Not released. 23 | 24 | #### `v0.4.5` 25 | - Changes required for Rust 1.63 clippy additions. 26 | 27 | #### `v0.4.4` 28 | - Dependency updates including moving to sqlx v0.6 in postgres-es and mysql-es crates. 29 | 30 | #### `v0.4.3` 31 | - Added a `QueryReplay` component to assist in replaying events across a query. 32 | - Made the `QueryErrorHandler` convenience type public. 33 | - Set consistent reads in dynamo-es to reduce optimistic locking errors. 34 | 35 | #### `v0.4.2` 36 | - Add `append_query` method to the framework. 37 | 38 | #### `v0.4.0` 39 | - Modified the aggregate `handle` method to expect a reply of `Self::Error`. 40 | Previously `AggregateError` was returned which allowed overloading enum branches other than the `UserError`, this is no longer allowed. 41 | - Added `Services` type to the aggregate trait. 42 | Any external services used should now be configured with the CqrsFramework rather than attached to a command. 43 | - The TestFramework now provides the `expect_error` validator that is available if the `Self::Error` implements `PartialEq`. 44 | - Removed deprecated `EventEnvelope` methods and `UserErrorPayload` struct from use in examples. 45 | 46 | #### `v0.3.2` 47 | - Removed deprecated methods and structs from use in examples. 48 | 49 | #### `v0.3.1` 50 | - Removed the `timestamp` field from the database tables. This field is not needed by the framework or repositories and its use in examples led to confusion. 51 | - Deprecated UserErrorPayload, this will be removed in v0.4.0. User should create a custom error for their aggregate. 52 | - Deprecated helper methods on `EventEnvelope`. These methods will be removed in v0.4.0 however the fields on `EventEnvelope` will remain public. 53 | 54 | #### `v0.3.0` 55 | > See the [v0.2.5 ==> v0.3.0 migration guide](migration_0_3_0.md) for more details. 56 | 57 | - Published a new persistence repository, [dynamo-es](https://crates.io/crates/dynamo-es), providing an underlying persistence layer based on [AWS' DyanomoDb](https://aws.amazon.com/dynamodb/). 58 | - The `handle` method within the `Aggregate` trait is now async. This will greatly simplify calling asynchronous clients and services from the aggregate logic. 59 | - Deprecation of common peristence crate [persist-es](https://crates.io/crates/persist-es), all logic has moved to the `persist` module of [cqrs-es](https://crates.io/crates/cqrs-es). 60 | - The event and snapshot table names are now configurable in the persistence packages. 61 | - Corrected a bug that caused [mysql-es](https://crates.io/crates/mysql-es) to return the wrong error when an optimistic lock violation was encountered. 62 | - In `AggregateTestExecutor` the method `then_expect_error_message` was added to replace the now deprecated `then_expect_error`. 63 | 64 | #### `v0.2.5` 65 | > See the [v0.2.4 ==> v0.2.5 migration guide](migration_0_2_5.md) for more details. 66 | 67 | - The payload for user errors in the aggregate is now configurable. 68 | - Additional enumerations for `AggregateError`. 69 | - Unexpected errors now return with the root cause rather than just the message. 70 | 71 | #### `v0.2.4` 72 | - Move to Rust 2021 edition. 73 | - Audit and update dependencies. 74 | 75 | #### `v0.2.3` 76 | - Added upcasters to event stores. 77 | 78 | #### `v0.2.2` 79 | - Consolidated repositories to a single trait encompassing all functionality. 80 | 81 | #### `v0.2.1` 82 | - Moved generic persistence logic in from postgres-es package. 83 | - Added event context information to event envelope. 84 | 85 | #### `v0.2.0` 86 | Moved to async/await for better tool support. 87 | 88 | #### `v0.1.3` 89 | Aggregates now consume events on `apply`. 90 | 91 | #### `v0.1.2` 92 | Require `Send + Sync` for queries. 93 | 94 | #### `v0.1.1` 95 | Require `Send + Sync` for support of multi-threaded applications. 96 | 97 | #### `v0.1.0` 98 | Corrected to move all command and event logic into the aggregate. 99 | -------------------------------------------------------------------------------- /docs/versions/migration_0_2_5.md: -------------------------------------------------------------------------------- 1 | ## Migrating guide to v0.2.5 2 | 3 | > v0.2.4 ==> v0.2.5 4 | 5 | ### Aggregate changes 6 | The error payload returned from business logic is now configurable. 7 | The `UserErrorPayload` struct is still available as a reference implementation. 8 | 9 | As part of the aggregate implementation two changes are needed. 10 | - You must specify the error type, using the `UserErrorPayload` that originally was required will simplify this. 11 | - The signature for `handle` now includes the custom error as part of the error result. 12 | ```rust 13 | impl Aggregate for TestAggregate { 14 | type Error = UserErrorPayload; 15 | 16 | ... 17 | 18 | fn handle(&self, command: Self::Command) -> Result, AggregateError> { 19 | ... 20 | } 21 | } 22 | ``` 23 | 24 | ### Simple creation error 25 | 26 | Previously helper functions were available to create a simple aggregate error from a `&str`, 27 | this is more complex with configurable user error payloads. 28 | If you continue to use the `UserErrorPayload` we have implemented `From<&str>` in order to 29 | provide the same functionality. 30 | 31 | ```rust 32 | // Formerly this was 33 | // let error = AggregateError::new("the expected error message"); 34 | 35 | // Updated simple implementation 36 | let error: AggregateError = "the expected error message".into(); 37 | ``` 38 | 39 | ### Boxed causes and additional error enumerations 40 | 41 | We've received a number of requests for help debugging applications in the early stages while configuration is still being tweaked. 42 | To help with this we've added new error enumerations to better identify the root cause of unexpected errors. 43 | - AggregateError::DatabaseConnectionError 44 | - AggregateError::DeserializationError 45 | 46 | We're also passing the root cause errors back (rather than just the error messages) to provide additional debugging 47 | information. 48 | 49 | ```rust 50 | let error_message = match &aggregate_error { 51 | AggregateError::UserError(e) => serde_json::to_string(e).unwrap(), 52 | AggregateError::TechnicalError(e) => e.to_string(), 53 | AggregateError::AggregateConflict => "command collision encountered, please try again".to_string(), 54 | 55 | // New error enumerations 56 | AggregateError::DatabaseConnectionError(e) => e.to_string(), 57 | AggregateError::DeserializationError(e) => e.to_string(), 58 | }; 59 | ``` 60 | -------------------------------------------------------------------------------- /docs/versions/migration_0_3_0.md: -------------------------------------------------------------------------------- 1 | ## Migrating guide to v0.3.0 2 | 3 | > v0.2.5 ==> v0.3.0 4 | 5 | ### The `handle` method within the `Aggregate` trait is now async 6 | Logic within the command handler can now use asynchronous clients and services directly. 7 | 8 | The signature for `handle` now includes the `async` keyword: 9 | ```rust 10 | impl Aggregate for TestAggregate { 11 | ... 12 | 13 | async fn handle(&self, command: Self::Command) -> Result, AggregateError> { 14 | ... 15 | } 16 | } 17 | ``` 18 | 19 | ### Deprecation of common peristence crate 20 | The [persist-es crate](https://crates.io/crates/persist-es) used for housing logic that is common across the three 21 | peristence crates has been deprecated. All components have been moved to the `persist` module of 22 | [cqrs-es](https://crates.io/crates/cqrs-es). 23 | This should only require a change to the namespace of any imports. 24 | 25 | E.g., 26 | ```rust 27 | // Previous namespace 'persist_es' should now be 'cqrs_es::persist' 28 | // use persist_es::{GenericQuery,ViewRepository}; 29 | use cqrs_es::persist::{GenericQuery,ViewRepository}; 30 | ``` 31 | 32 | ### Changes to persisted tables 33 | Significant changes to event, snapshot and test view tables have been made. 34 | See individual persistence packages for details of each. 35 | 36 | ### Aggregate test fixtures 37 | A Tokio thread runner has been added to the test fixtures so these should not need any changes due to the change 38 | in the Aggregate command handler. 39 | 40 | The `then_expect_error` method on `AggregateTestExecutor` has been deprecated in order to be repurposed in v0.3.0, 41 | please use `then_expect_error_message` instead. 42 | 43 | ### String replaces `&'static str` in interfaces 44 | 45 | The `Aggregate` and `DomainEvent` interfaces had methods producing `&'static str`, in all cases these should not return 46 | a String. -------------------------------------------------------------------------------- /docs/versions/migration_0_4_0.md: -------------------------------------------------------------------------------- 1 | ## Migrating guide to v0.4.0 2 | 3 | > v0.3.x ==> v0.4.0 4 | 5 | ### The `Aggregate` trait now has a `Services` type that must be implemented 6 | Services that were previously injected with the command can now be configured with the aggregate and injected by the `CqrsFramework`. 7 | To use this feature your services must be thread-safe, i.e., they should implement `Send` & `Sync` (these are 8 | [auto traits](https://doc.rust-lang.org/reference/special-types-and-traits.html#auto-traits) and are implemented 9 | automatically byt the compiler if the logic is safe for use across threads). 10 | 11 | ### The `handle` method within the `Aggregate` trait has changed 12 | Logic within the command handler can now use asynchronous clients and services directly. 13 | 14 | The signature for `handle` now includes a borrowed reference to the configured services. 15 | Additionally, the result now just returns the configured error, it is no longer necessary to wrap the error in an `AggregateError`. 16 | ```rust,ignore 17 | impl Aggregate for MyAggregate { 18 | Services = MyServices; 19 | ... 20 | 21 | async fn handle(&self, command: Self::Command, services: &Self::Services) -> Result, Self::Error> { 22 | ... 23 | } 24 | } 25 | ``` 26 | 27 | Note that the result of the `execute` method on `CqrsFramework` will still return an `AggregateError` with the `UserError` 28 | variant wrapping the returned error. 29 | 30 | ### Aggregate test fixtures 31 | 32 | - The `then_expect_error` method on `AggregateTestExecutor` is available to directly test a resulting aggregate error. 33 | *Note that the configured error must implement `PartialEq` to take advantage of this.* 34 | - The `inspect_result` method will return the command handler result in order to directly test. 35 | - Test fixtures must now be configured with a service stub when created. 36 | 37 | ```rust,ignore 38 | // Formerly create with the `default` method. 39 | // let test_framework: TestFramework = TestFramework::default(); 40 | 41 | // A service now must be injected to create the test harness. 42 | let test_framework: TestFramework = TestFramework::with(MyService::mock()); 43 | ``` -------------------------------------------------------------------------------- /persistence/dynamo-es/.gitignore: -------------------------------------------------------------------------------- 1 | .idea 2 | -------------------------------------------------------------------------------- /persistence/dynamo-es/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "dynamo-es" 3 | version.workspace = true 4 | authors.workspace = true 5 | edition.workspace = true 6 | license.workspace = true 7 | keywords.workspace = true 8 | description = "A DynamoDB implementation of an event repository for cqrs-es." 9 | repository.workspace = true 10 | documentation = "https://docs.rs/dynamo-es" 11 | readme = "README.md" 12 | 13 | [dependencies] 14 | cqrs-es.workspace = true 15 | aws-sdk-dynamodb = "1.79" 16 | serde = { workspace = true, features = ["derive"]} 17 | serde_json = "1.0" 18 | tokio = { workspace = true, features = ["rt"] } 19 | thiserror = "2.0.12" 20 | 21 | [dev-dependencies] 22 | uuid.workspace = true 23 | -------------------------------------------------------------------------------- /persistence/dynamo-es/README.md: -------------------------------------------------------------------------------- 1 | # dynamo-es 2 | 3 | > A DynamoDB implementation of the `PersistedEventRepository` trait in cqrs-es. 4 | 5 | ## Usage 6 | Add to your Cargo.toml file: 7 | 8 | ```toml 9 | [dependencies] 10 | cqrs-es = "0.4.11" 11 | dynamo-es = "0.4.11" 12 | ``` 13 | 14 | Requires access to a Dynamo DB with existing tables. See: 15 | - [Sample database configuration](db/dynamo_db.yaml) 16 | - [Sample database table layout](db/create_tables.sh) 17 | - Use `docker-compose` and the `./db/create_tables.sh` script to quickly setup [a local database](docker-compose.yml) 18 | 19 | ### DynamoDb caveats 20 | AWS' DynamoDb is fast, flexible and highly available, but it does 21 | [set some limitations](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ServiceQuotas.html) 22 | that must be considered in the design of your application. 23 | 24 | #### Maximum limit of 25 operations in any transaction 25 | 26 | Events are inserted in a single transaction, which limits the number of events that can be handled from a single command 27 | using this repository. To operate correctly a command must not produce more than 28 | - 25 events if using [an event store without snapshots](https://docs.rs/cqrs-es/latest/cqrs_es/persist/struct.PersistedEventStore.html#method.new_event_store) 29 | - 24 events if using [snapshots](https://docs.rs/cqrs-es/latest/cqrs_es/persist/struct.PersistedEventStore.html#method.new_snapshot_store) 30 | or [an aggregate store](https://docs.rs/cqrs-es/latest/cqrs_es/persist/struct.PersistedEventStore.html#method.new_aggregate_store) 31 | 32 | #### Item size limit of 400 KB 33 | A single event should never reach this size, but a large serialized aggregate might. 34 | If this is the case for your aggregate beware of using [snapshots](https://docs.rs/cqrs-es/latest/cqrs_es/persist/struct.PersistedEventStore.html#method.new_snapshot_store) 35 | or [an aggregate store](https://docs.rs/cqrs-es/latest/cqrs_es/persist/struct.PersistedEventStore.html#method.new_aggregate_store). 36 | 37 | #### Maximum request size of 1 MB 38 | This could have the same ramifications as the above for [snapshots](https://docs.rs/cqrs-es/latest/cqrs_es/persist/struct.PersistedEventStore.html#method.new_snapshot_store) 39 | or [an aggregate store](https://docs.rs/cqrs-es/latest/cqrs_es/persist/struct.PersistedEventStore.html#method.new_aggregate_store). 40 | Additionally, an aggregate instance with a large number of events may reach this threshold. 41 | To prevent an error while loading or replaying events, 42 | [set the streaming channel size](https://docs.rs/dynamo-es/latest/dynamo_es/struct.DynamoEventRepository.html#method.with_streaming_channel_size) 43 | to a value that ensures you won't exceed this threshold. 44 | 45 | 46 | ### Testing 47 | 48 | Requires access to DynamoDb with existing tables. This can be created locally using the included 49 | `docker-compose.yml` and database initialization script. 50 | 51 | To prepare a local test environment (requires a local installation of 52 | [Docker](https://www.docker.com/products/docker-desktop) and 53 | [AWS CLI](https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-welcome.html)): 54 | ``` 55 | docker-compose up -d 56 | ./db/create_tables.sh 57 | ``` 58 | 59 | It is recommended that tables are configured to allow only transactions. 60 | See: 61 | https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/transaction-apis-iam.html 62 | 63 | --- 64 | 65 | Things that could be helpful: 66 | - [User guide](https://doc.rust-cqrs.org) along with an introduction to CQRS and event sourcing. 67 | - [Demo application](https://github.com/serverlesstechnology/cqrs-demo) using the warp http server. 68 | - [Change log](https://github.com/serverlesstechnology/cqrs/blob/main/change_log.md) 69 | 70 | [![Crates.io](https://img.shields.io/crates/v/dynamo-es)](https://crates.io/crates/dynamo-es) 71 | [![docs](https://img.shields.io/badge/API-docs-blue.svg)](https://docs.rs/dynamo-es) 72 | ![build status](https://codebuild.us-west-2.amazonaws.com/badges?uuid=eyJlbmNyeXB0ZWREYXRhIjoiVVUyR0tRbTZmejFBYURoTHdpR3FnSUFqKzFVZE9JNW5haDZhcUFlY2xtREhtaVVJMWsxcWZOeC8zSUR0UWhpaWZMa0ZQSHlEYjg0N2FoU2lwV1FsTXFRPSIsIml2UGFyYW1ldGVyU3BlYyI6IldjUVMzVEpKN1V3aWxXWGUiLCJtYXRlcmlhbFNldFNlcmlhbCI6MX0%3D&branch=main) 73 | -------------------------------------------------------------------------------- /persistence/dynamo-es/buildspec_test.yml: -------------------------------------------------------------------------------- 1 | version: 0.2 2 | 3 | phases: 4 | install: 5 | commands: 6 | - echo "${DOCKERHUB_PASSWORD}" | docker login -u "${DOCKERHUB_USERNAME}" --password-stdin 7 | - nohup /usr/local/bin/dockerd --host=unix:///var/run/docker.sock --host=tcp://127.0.0.1:2375 --storage-driver=overlay2& 8 | - timeout 15 sh -c "until docker info; do echo .; sleep 1; done" 9 | pre_build: 10 | commands: 11 | - docker build -t dynamo-es . 12 | build: 13 | commands: 14 | - docker-compose up -d 15 | - docker ps 16 | - sleep 5 17 | - ./db/create_tables.sh 18 | - docker image inspect dynamo-es 19 | - docker run --network="host" dynamo-es cargo test 20 | -------------------------------------------------------------------------------- /persistence/dynamo-es/db/create_tables.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | aws dynamodb create-table \ 3 | --table-name Events \ 4 | --key-schema \ 5 | AttributeName=AggregateTypeAndId,KeyType=HASH \ 6 | AttributeName=AggregateIdSequence,KeyType=RANGE \ 7 | --attribute-definitions \ 8 | AttributeName=AggregateTypeAndId,AttributeType=S \ 9 | AttributeName=AggregateIdSequence,AttributeType=N \ 10 | --billing-mode PAY_PER_REQUEST \ 11 | --endpoint-url http://localhost:8000 12 | 13 | aws dynamodb create-table \ 14 | --table-name Snapshots \ 15 | --key-schema \ 16 | AttributeName=AggregateTypeAndId,KeyType=HASH \ 17 | --attribute-definitions \ 18 | AttributeName=AggregateTypeAndId,AttributeType=S \ 19 | --billing-mode PAY_PER_REQUEST \ 20 | --endpoint-url http://localhost:8000 21 | 22 | aws dynamodb create-table \ 23 | --table-name TestViewTable \ 24 | --key-schema \ 25 | AttributeName=ViewId,KeyType=HASH \ 26 | --attribute-definitions \ 27 | AttributeName=ViewId,AttributeType=S \ 28 | --billing-mode PAY_PER_REQUEST \ 29 | --endpoint-url http://localhost:8000 30 | -------------------------------------------------------------------------------- /persistence/dynamo-es/db/dynamo_db.yaml: -------------------------------------------------------------------------------- 1 | Resources: 2 | Events: 3 | Type: AWS::DynamoDB::Table 4 | Properties: 5 | AttributeDefinitions: 6 | - 7 | AttributeName: "AggregateTypeAndId" 8 | AttributeType: "S" 9 | - 10 | AttributeName: "AggregateIdSequence" 11 | AttributeType: "N" 12 | KeySchema: 13 | - 14 | AttributeName: "AggregateTypeAndId" 15 | KeyType: "HASH" 16 | - 17 | AttributeName: "AggregateIdSequence" 18 | KeyType: "RANGE" 19 | BillingMode: PAY_PER_REQUEST 20 | 21 | Snapshots: 22 | Type: AWS::DynamoDB::Table 23 | Properties: 24 | AttributeDefinitions: 25 | - 26 | AttributeName: "AggregateTypeAndId" 27 | AttributeType: "S" 28 | KeySchema: 29 | - 30 | AttributeName: "AggregateTypeAndId" 31 | KeyType: "HASH" 32 | BillingMode: PAY_PER_REQUEST 33 | 34 | TestViewTable: 35 | Type: AWS::DynamoDB::Table 36 | Properties: 37 | AttributeDefinitions: 38 | - 39 | AttributeName: "ViewId" 40 | AttributeType: "S" 41 | KeySchema: 42 | - 43 | AttributeName: "ViewId" 44 | KeyType: "HASH" 45 | BillingMode: PAY_PER_REQUEST 46 | -------------------------------------------------------------------------------- /persistence/dynamo-es/docker-compose.yml: -------------------------------------------------------------------------------- 1 | services: 2 | dynamodb-local: 3 | command: "-jar DynamoDBLocal.jar -sharedDb" 4 | image: amazon/dynamodb-local:latest 5 | container_name: dynamodb-local 6 | environment: 7 | AWS_ACCESS_KEY_ID: 'TESTAWSID' 8 | AWS_SECRET_ACCESS_KEY: 'TESTAWSKEY' 9 | AWS_DEFAULT_REGION: 'us-west-2' 10 | ports: 11 | - 8000:8000 -------------------------------------------------------------------------------- /persistence/dynamo-es/src/cqrs.rs: -------------------------------------------------------------------------------- 1 | use cqrs_es::persist::PersistedEventStore; 2 | use cqrs_es::{Aggregate, CqrsFramework, Query}; 3 | 4 | use crate::{DynamoCqrs, DynamoEventRepository}; 5 | 6 | /// A convenience function for creating a CqrsFramework from a DynamoDb client 7 | /// and queries. 8 | pub fn dynamodb_cqrs( 9 | dynamo_client: aws_sdk_dynamodb::client::Client, 10 | query_processor: Vec>>, 11 | services: A::Services, 12 | ) -> DynamoCqrs 13 | where 14 | A: Aggregate, 15 | { 16 | let repo = DynamoEventRepository::new(dynamo_client); 17 | let store = PersistedEventStore::new_event_store(repo); 18 | CqrsFramework::new(store, query_processor, services) 19 | } 20 | 21 | /// A convenience function for creating a CqrsFramework using an aggregate store. 22 | pub fn dynamodb_aggregate_cqrs( 23 | dynamo_client: aws_sdk_dynamodb::client::Client, 24 | query_processor: Vec>>, 25 | services: A::Services, 26 | ) -> DynamoCqrs 27 | where 28 | A: Aggregate, 29 | { 30 | let repo = DynamoEventRepository::new(dynamo_client); 31 | let store = PersistedEventStore::new_aggregate_store(repo); 32 | CqrsFramework::new(store, query_processor, services) 33 | } 34 | 35 | /// A convenience function for creating a CqrsFramework using a snapshot store. 36 | pub fn dynamodb_snapshot_cqrs( 37 | dynamo_client: aws_sdk_dynamodb::client::Client, 38 | query_processor: Vec>>, 39 | snapshot_size: usize, 40 | services: A::Services, 41 | ) -> DynamoCqrs 42 | where 43 | A: Aggregate, 44 | { 45 | let repo = DynamoEventRepository::new(dynamo_client); 46 | let store = PersistedEventStore::new_snapshot_store(repo, snapshot_size); 47 | CqrsFramework::new(store, query_processor, services) 48 | } 49 | 50 | #[cfg(test)] 51 | mod test { 52 | use std::sync::Arc; 53 | 54 | use crate::cqrs::dynamodb_cqrs; 55 | use crate::testing::tests::{test_dynamodb_client, TestQueryRepository, TestServices}; 56 | use crate::DynamoViewRepository; 57 | 58 | #[tokio::test] 59 | async fn test_valid_cqrs_framework() { 60 | let client = test_dynamodb_client().await; 61 | let view_repo = DynamoViewRepository::new("test_query", client.clone()); 62 | let query = TestQueryRepository::new(Arc::new(view_repo)); 63 | let _ps = dynamodb_cqrs(client, vec![Box::new(query)], TestServices); 64 | } 65 | } 66 | -------------------------------------------------------------------------------- /persistence/dynamo-es/src/error.rs: -------------------------------------------------------------------------------- 1 | use aws_sdk_dynamodb::error::{BuildError, SdkError}; 2 | use aws_sdk_dynamodb::operation::query::QueryError; 3 | use aws_sdk_dynamodb::operation::scan::ScanError; 4 | use aws_sdk_dynamodb::operation::transact_write_items::TransactWriteItemsError; 5 | use cqrs_es::persist::PersistenceError; 6 | use cqrs_es::AggregateError; 7 | use serde::de::StdError; 8 | 9 | #[derive(Debug, thiserror::Error)] 10 | pub enum DynamoAggregateError { 11 | #[error("optimistic lock error")] 12 | OptimisticLock, 13 | #[error(transparent)] 14 | ConnectionError(Box), 15 | #[error(transparent)] 16 | DeserializationError(Box), 17 | #[error( 18 | "Too many operations: {0}, DynamoDb supports only up to 25 operations per transactions" 19 | )] 20 | TransactionListTooLong(usize), 21 | #[error("missing attribute: {0}")] 22 | MissingAttribute(String), 23 | #[error(transparent)] 24 | UnknownError(Box), 25 | } 26 | 27 | impl From for AggregateError { 28 | fn from(error: DynamoAggregateError) -> Self { 29 | match error { 30 | DynamoAggregateError::OptimisticLock => Self::AggregateConflict, 31 | DynamoAggregateError::ConnectionError(err) => Self::DatabaseConnectionError(err), 32 | DynamoAggregateError::DeserializationError(err) => Self::DeserializationError(err), 33 | DynamoAggregateError::TransactionListTooLong(_) => { 34 | Self::UnexpectedError(Box::new(error)) 35 | } 36 | DynamoAggregateError::MissingAttribute(err) => { 37 | Self::UnexpectedError(Box::new(DynamoAggregateError::MissingAttribute(err))) 38 | } 39 | DynamoAggregateError::UnknownError(err) => Self::UnexpectedError(err), 40 | } 41 | } 42 | } 43 | 44 | impl From for DynamoAggregateError { 45 | fn from(err: serde_json::Error) -> Self { 46 | Self::UnknownError(Box::new(err)) 47 | } 48 | } 49 | 50 | impl From> for DynamoAggregateError { 51 | fn from(error: SdkError) -> Self { 52 | if let SdkError::ServiceError(err) = &error { 53 | if let TransactWriteItemsError::TransactionCanceledException(cancellation) = err.err() { 54 | for reason in cancellation.cancellation_reasons() { 55 | if reason.code() == Some("ConditionalCheckFailed") { 56 | return Self::OptimisticLock; 57 | } 58 | } 59 | } 60 | } 61 | Self::UnknownError(Box::new(error)) 62 | } 63 | } 64 | 65 | impl From> for DynamoAggregateError { 66 | fn from(error: SdkError) -> Self { 67 | unknown_error(error) 68 | } 69 | } 70 | 71 | impl From for DynamoAggregateError { 72 | fn from(error: BuildError) -> Self { 73 | Self::UnknownError(Box::new(error)) 74 | } 75 | } 76 | 77 | impl From> for DynamoAggregateError { 78 | fn from(error: SdkError) -> Self { 79 | unknown_error(error) 80 | } 81 | } 82 | 83 | fn unknown_error(error: SdkError) -> DynamoAggregateError { 84 | DynamoAggregateError::UnknownError(Box::new(error)) 85 | } 86 | 87 | impl From for PersistenceError { 88 | fn from(error: DynamoAggregateError) -> Self { 89 | match error { 90 | DynamoAggregateError::OptimisticLock => Self::OptimisticLockError, 91 | DynamoAggregateError::ConnectionError(err) => Self::ConnectionError(err), 92 | DynamoAggregateError::DeserializationError(err) => Self::DeserializationError(err), 93 | DynamoAggregateError::TransactionListTooLong(_) => Self::UnknownError(Box::new(error)), 94 | DynamoAggregateError::MissingAttribute(err) => { 95 | Self::UnknownError(Box::new(DynamoAggregateError::MissingAttribute(err))) 96 | } 97 | DynamoAggregateError::UnknownError(err) => Self::UnknownError(err), 98 | } 99 | } 100 | } 101 | -------------------------------------------------------------------------------- /persistence/dynamo-es/src/helpers.rs: -------------------------------------------------------------------------------- 1 | use aws_sdk_dynamodb::client::Client; 2 | use aws_sdk_dynamodb::operation::query::QueryOutput; 3 | use aws_sdk_dynamodb::types::{AttributeValue, TransactWriteItem}; 4 | use serde_json::Value; 5 | use std::collections::HashMap; 6 | 7 | use crate::error::DynamoAggregateError; 8 | 9 | pub(crate) async fn load_dynamo_view( 10 | client: &Client, 11 | table_name: &str, 12 | view_id: &str, 13 | ) -> Result { 14 | Ok(client 15 | .query() 16 | .table_name(table_name) 17 | .key_condition_expression("#view_type_id = :view_type_id") 18 | .expression_attribute_names("#view_type_id", "ViewId") 19 | .expression_attribute_values(":view_type_id", AttributeValue::S(String::from(view_id))) 20 | .send() 21 | .await?) 22 | } 23 | 24 | pub(crate) async fn commit_transactions( 25 | client: &Client, 26 | transactions: Vec, 27 | ) -> Result<(), DynamoAggregateError> { 28 | let transaction_len = transactions.len(); 29 | if transaction_len > 25 { 30 | return Err(DynamoAggregateError::TransactionListTooLong( 31 | transaction_len, 32 | )); 33 | } 34 | client 35 | .transact_write_items() 36 | .set_transact_items(Some(transactions)) 37 | .send() 38 | .await?; 39 | Ok(()) 40 | } 41 | 42 | pub(crate) fn att_as_value( 43 | values: &HashMap, 44 | attribute_name: &str, 45 | ) -> Result { 46 | let attribute = require_attribute(values, attribute_name)?; 47 | match attribute.as_b() { 48 | Ok(payload_blob) => Ok(serde_json::from_slice(payload_blob.as_ref())?), 49 | Err(_) => Err(DynamoAggregateError::MissingAttribute( 50 | attribute_name.to_string(), 51 | )), 52 | } 53 | } 54 | 55 | pub(crate) fn att_as_number( 56 | values: &HashMap, 57 | attribute_name: &str, 58 | ) -> Result { 59 | let attribute = require_attribute(values, attribute_name)?; 60 | match attribute.as_n() { 61 | Ok(attribute_as_n) => attribute_as_n 62 | .parse::() 63 | .map_err(|_| DynamoAggregateError::MissingAttribute(attribute_name.to_string())), 64 | Err(_) => Err(DynamoAggregateError::MissingAttribute( 65 | attribute_name.to_string(), 66 | )), 67 | } 68 | } 69 | 70 | pub(crate) fn att_as_string( 71 | values: &HashMap, 72 | attribute_name: &str, 73 | ) -> Result { 74 | let attribute = require_attribute(values, attribute_name)?; 75 | match attribute.as_s() { 76 | Ok(attribute_as_s) => Ok(attribute_as_s.to_string()), 77 | Err(_) => Err(DynamoAggregateError::MissingAttribute( 78 | attribute_name.to_string(), 79 | )), 80 | } 81 | } 82 | 83 | pub(crate) fn require_attribute<'a>( 84 | values: &'a HashMap, 85 | attribute_name: &str, 86 | ) -> Result<&'a AttributeValue, DynamoAggregateError> { 87 | values 88 | .get(attribute_name) 89 | .ok_or(DynamoAggregateError::MissingAttribute( 90 | attribute_name.to_string(), 91 | )) 92 | } 93 | -------------------------------------------------------------------------------- /persistence/dynamo-es/src/lib.rs: -------------------------------------------------------------------------------- 1 | #![forbid(unsafe_code)] 2 | #![deny(missing_docs)] 3 | #![deny(clippy::all)] 4 | #![warn(rust_2018_idioms)] 5 | //! # dynamo-es 6 | //! 7 | //! > A DynamoDb implementation of the `EventStore` trait in [cqrs-es](https://crates.io/crates/cqrs-es). 8 | //! 9 | pub use crate::cqrs::*; 10 | pub use crate::event_repository::*; 11 | pub use crate::types::*; 12 | pub use crate::view_repository::*; 13 | 14 | mod cqrs; 15 | mod error; 16 | mod event_repository; 17 | mod helpers; 18 | mod testing; 19 | mod types; 20 | mod view_repository; 21 | -------------------------------------------------------------------------------- /persistence/dynamo-es/src/types.rs: -------------------------------------------------------------------------------- 1 | use cqrs_es::persist::PersistedEventStore; 2 | use cqrs_es::CqrsFramework; 3 | 4 | use crate::DynamoEventRepository; 5 | 6 | /// A convenience type for a CqrsFramework backed by 7 | /// [DynamoStore](struct.DynamoStore.html). 8 | pub type DynamoCqrs = CqrsFramework>; 9 | -------------------------------------------------------------------------------- /persistence/mysql-es/.gitignore: -------------------------------------------------------------------------------- 1 | .idea 2 | -------------------------------------------------------------------------------- /persistence/mysql-es/CHANGELOG.md: -------------------------------------------------------------------------------- 1 | The changelog for all crates in the cqrs-es project are located 2 | [here](https://github.com/serverlesstechnology/cqrs/blob/main/docs/versions/change_log.md). 3 | -------------------------------------------------------------------------------- /persistence/mysql-es/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "mysql-es" 3 | version.workspace = true 4 | authors.workspace = true 5 | edition.workspace = true 6 | license.workspace = true 7 | keywords.workspace = true 8 | description = "A MySql implementation of an event repository for cqrs-es." 9 | repository.workspace = true 10 | documentation = "https://docs.rs/mysql-es" 11 | readme = "README.md" 12 | 13 | [dependencies] 14 | cqrs-es.workspace = true 15 | futures = "0.3" 16 | serde = { workspace = true, features = ["derive"]} 17 | serde_json = "1.0" 18 | sqlx = { version = "0.8", features = [ "mysql", "json"] } 19 | tokio = { workspace = true, features = ["rt"] } 20 | thiserror = "2.0.12" 21 | 22 | [dev-dependencies] 23 | uuid.workspace = true 24 | 25 | [features] 26 | default = ["runtime-tokio-rustls"] 27 | runtime-async-std-native-tls = ["sqlx/runtime-async-std-native-tls"] 28 | runtime-tokio-native-tls = ["sqlx/runtime-tokio-native-tls"] 29 | runtime-async-std-rustls = ["sqlx/runtime-async-std-rustls"] 30 | runtime-tokio-rustls = ["sqlx/runtime-tokio-rustls"] 31 | -------------------------------------------------------------------------------- /persistence/mysql-es/README.md: -------------------------------------------------------------------------------- 1 | # mysql-es 2 | 3 | > A MySql implementation of the `PersistedEventRepository` trait in cqrs-es. 4 | 5 | --- 6 | 7 | ## Usage 8 | Add to your Cargo.toml file: 9 | 10 | ```toml 11 | [dependencies] 12 | cqrs-es = "0.4.12" 13 | mysql-es = "0.4.12" 14 | ``` 15 | 16 | Requires access to a MySql DB with existing tables. See: 17 | - [Sample database configuration](db/init.sql) 18 | - Use `docker-compose` to quickly setup [a local database](docker-compose.yml) 19 | 20 | A simple configuration example: 21 | ``` 22 | let store = default_mysql_pool("mysql://my_user:my_pass@localhost:3306/my_db"); 23 | let cqrs = mysql_es::mysql_cqrs(pool, vec![]) 24 | ``` 25 | 26 | Things that could be helpful: 27 | - [User guide](https://doc.rust-cqrs.org) along with an introduction to CQRS and event sourcing. 28 | - [Demo application](https://github.com/serverlesstechnology/cqrs-demo) using the warp http server. 29 | - [Change log](https://github.com/serverlesstechnology/cqrs/blob/main/docs/versions/change_log.md) 30 | 31 | 32 | ## Runtime and TLS configuration 33 | This package defaults to expect the [Tokio runtime](https://crates.io/crates/tokio) and the 34 | [Rustls library](https://crates.io/crates/rustls) for TLS. 35 | If a different combination is desired the appropriate feature flag should be used: 36 | - `runtime-tokio-native-tls` 37 | - `runtime-tokio-rustls` (default) 38 | - `runtime-async-std-native-tls` 39 | - `runtime-async-std-rustls` 40 | - `runtime-actix-native-tls` 41 | - `runtime-actix-rustls` 42 | 43 | [![Crates.io](https://img.shields.io/crates/v/mysql-es)](https://crates.io/crates/mysql-es) 44 | [![docs](https://img.shields.io/badge/API-docs-blue.svg)](https://docs.rs/mysql-es) 45 | ![docs](https://codebuild.us-west-2.amazonaws.com/badges?uuid=eyJlbmNyeXB0ZWREYXRhIjoiRTZsVnY1emVCV1JXblVOMHpZTHdoS3JuVVVOUmRRb054Z2dYZmhKMk9PVU1zYklUaUhOTkM1d3l1czRWQUhBa28yWHM0RmRacmE3SWRmT1pJVU83akFVPSIsIml2UGFyYW1ldGVyU3BlYyI6InNuZ3U4MVBGYUFNbmhmLzIiLCJtYXRlcmlhbFNldFNlcmlhbCI6MX0%3D&branch=main) 46 | -------------------------------------------------------------------------------- /persistence/mysql-es/buildspec_test.yml: -------------------------------------------------------------------------------- 1 | version: 0.2 2 | 3 | phases: 4 | install: 5 | commands: 6 | - echo "${DOCKERHUB_PASSWORD}" | docker login -u "${DOCKERHUB_USERNAME}" --password-stdin 7 | - nohup /usr/local/bin/dockerd --host=unix:///var/run/docker.sock --host=tcp://127.0.0.1:2375 --storage-driver=overlay2& 8 | - timeout 15 sh -c "until docker info; do echo .; sleep 1; done" 9 | pre_build: 10 | commands: 11 | - docker build -t mysql-es . 12 | build: 13 | commands: 14 | - docker-compose up -d 15 | - docker ps 16 | - docker image inspect mysql-es 17 | - docker run --network="host" mysql-es cargo test 18 | -------------------------------------------------------------------------------- /persistence/mysql-es/db/init.sql: -------------------------------------------------------------------------------- 1 | -- a single table is used for all events in the cqrs system 2 | CREATE TABLE events 3 | ( 4 | aggregate_type varchar(255) NOT NULL, 5 | aggregate_id varchar(255) NOT NULL, 6 | sequence bigint CHECK (sequence >= 0) NOT NULL, 7 | event_type text NOT NULL, 8 | event_version text NOT NULL, 9 | payload json NOT NULL, 10 | metadata json NOT NULL, 11 | CONSTRAINT events_pk PRIMARY KEY (aggregate_type, aggregate_id, sequence) 12 | ); 13 | 14 | -- this table is only needed if snapshotting is employed 15 | CREATE TABLE snapshots 16 | ( 17 | aggregate_type varchar(255) NOT NULL, 18 | aggregate_id varchar(255) NOT NULL, 19 | last_sequence bigint CHECK (last_sequence >= 0) NOT NULL, 20 | current_snapshot bigint CHECK (current_snapshot >= 0) NOT NULL, 21 | payload json NOT NULL, 22 | CONSTRAINT snapshots_pk PRIMARY KEY (aggregate_type, aggregate_id) 23 | ); 24 | 25 | -- one view table should be created for every `MysqlViewRepository` used 26 | -- replace name with the value used in `MysqlViewRepository::new(view_name: String)` 27 | CREATE TABLE test_view 28 | ( 29 | view_id varchar(255) NOT NULL, 30 | version bigint CHECK (version >= 0) NOT NULL, 31 | payload json NOT NULL, 32 | CONSTRAINT test_view_pk PRIMARY KEY (view_id) 33 | ); 34 | 35 | INSERT INTO events (aggregate_type, aggregate_id, sequence, event_type, event_version, payload, metadata) 36 | VALUES ('Customer', 'previous_event_in_need_of_upcast', 1, 'NameAdded', '1.0', '{ 37 | "NameAdded": {} 38 | }', '{}'); 39 | -------------------------------------------------------------------------------- /persistence/mysql-es/docker-compose.yml: -------------------------------------------------------------------------------- 1 | services: 2 | db: 3 | image: mysql 4 | restart: always 5 | ports: 6 | - 3306:3306 7 | environment: 8 | MYSQL_ROOT_PASSWORD: rootpw 9 | MYSQL_DATABASE: test 10 | MYSQL_USER: test_user 11 | MYSQL_PASSWORD: test_pass 12 | volumes: 13 | - './db:/docker-entrypoint-initdb.d' 14 | -------------------------------------------------------------------------------- /persistence/mysql-es/src/cqrs.rs: -------------------------------------------------------------------------------- 1 | use cqrs_es::persist::PersistedEventStore; 2 | use cqrs_es::{Aggregate, CqrsFramework, Query}; 3 | 4 | use crate::{MysqlCqrs, MysqlEventRepository}; 5 | use sqlx::mysql::MySqlPoolOptions; 6 | use sqlx::{MySql, Pool}; 7 | 8 | /// A convenience building a simple connection pool for MySql database. 9 | pub async fn default_mysql_pool(connection_string: &str) -> Pool { 10 | MySqlPoolOptions::new() 11 | .max_connections(10) 12 | .connect(connection_string) 13 | .await 14 | .expect("unable to connect to database") 15 | } 16 | 17 | /// A convenience method for building a simple connection pool for MySql. 18 | /// A connection pool is needed for both the event and view repositories. 19 | /// 20 | /// ``` 21 | /// use sqlx::{MySql, Pool}; 22 | /// use mysql_es::default_mysql_pool; 23 | /// 24 | /// # async fn configure_pool() { 25 | /// let connection_string = "mysql://test_user:test_pass@localhost:3306/test"; 26 | /// let pool: Pool = default_mysql_pool(connection_string).await; 27 | /// # } 28 | /// ``` 29 | pub fn mysql_cqrs( 30 | pool: Pool, 31 | query_processor: Vec>>, 32 | services: A::Services, 33 | ) -> MysqlCqrs 34 | where 35 | A: Aggregate, 36 | { 37 | let repo = MysqlEventRepository::new(pool); 38 | let store = PersistedEventStore::new_event_store(repo); 39 | CqrsFramework::new(store, query_processor, services) 40 | } 41 | 42 | /// A convenience function for creating a CqrsFramework using a snapshot store. 43 | pub fn mysql_snapshot_cqrs( 44 | pool: Pool, 45 | query_processor: Vec>>, 46 | snapshot_size: usize, 47 | services: A::Services, 48 | ) -> MysqlCqrs 49 | where 50 | A: Aggregate, 51 | { 52 | let repo = MysqlEventRepository::new(pool); 53 | let store = PersistedEventStore::new_snapshot_store(repo, snapshot_size); 54 | CqrsFramework::new(store, query_processor, services) 55 | } 56 | 57 | /// A convenience function for creating a CqrsFramework using an aggregate store. 58 | pub fn mysql_aggregate_cqrs( 59 | pool: Pool, 60 | query_processor: Vec>>, 61 | services: A::Services, 62 | ) -> MysqlCqrs 63 | where 64 | A: Aggregate, 65 | { 66 | let repo = MysqlEventRepository::new(pool); 67 | let store = PersistedEventStore::new_aggregate_store(repo); 68 | CqrsFramework::new(store, query_processor, services) 69 | } 70 | 71 | #[cfg(test)] 72 | mod test { 73 | use crate::testing::tests::{ 74 | TestAggregate, TestQueryRepository, TestServices, TestView, TEST_CONNECTION_STRING, 75 | }; 76 | use crate::{default_mysql_pool, mysql_cqrs, MysqlViewRepository}; 77 | use std::sync::Arc; 78 | 79 | #[tokio::test] 80 | async fn test_valid_cqrs_framework() { 81 | let pool = default_mysql_pool(TEST_CONNECTION_STRING).await; 82 | let repo = MysqlViewRepository::::new("test_view", pool.clone()); 83 | let query = TestQueryRepository::new(Arc::new(repo)); 84 | let _ps = mysql_cqrs(pool, vec![Box::new(query)], TestServices); 85 | } 86 | } 87 | -------------------------------------------------------------------------------- /persistence/mysql-es/src/error.rs: -------------------------------------------------------------------------------- 1 | use cqrs_es::persist::PersistenceError; 2 | use cqrs_es::AggregateError; 3 | use sqlx::Error; 4 | 5 | #[derive(Debug, thiserror::Error)] 6 | pub enum MysqlAggregateError { 7 | #[error("optimistic lock error")] 8 | OptimisticLock, 9 | #[error(transparent)] 10 | ConnectionError(Box), 11 | #[error(transparent)] 12 | DeserializationError(Box), 13 | #[error(transparent)] 14 | UnknownError(Box), 15 | } 16 | 17 | impl From for MysqlAggregateError { 18 | fn from(err: sqlx::Error) -> Self { 19 | // TODO: improve error handling 20 | match &err { 21 | Error::Database(database_error) => { 22 | if let Some(code) = database_error.code() { 23 | if code.as_ref() == "23000" { 24 | return Self::OptimisticLock; 25 | } 26 | } 27 | Self::UnknownError(Box::new(err)) 28 | } 29 | Error::Io(_) | Error::Tls(_) => Self::ConnectionError(Box::new(err)), 30 | _ => Self::UnknownError(Box::new(err)), 31 | } 32 | } 33 | } 34 | 35 | impl From for AggregateError { 36 | fn from(err: MysqlAggregateError) -> Self { 37 | match err { 38 | MysqlAggregateError::OptimisticLock => Self::AggregateConflict, 39 | MysqlAggregateError::DeserializationError(error) => Self::DeserializationError(error), 40 | MysqlAggregateError::ConnectionError(error) => Self::DatabaseConnectionError(error), 41 | MysqlAggregateError::UnknownError(error) => Self::UnexpectedError(error), 42 | } 43 | } 44 | } 45 | 46 | impl From for MysqlAggregateError { 47 | fn from(err: serde_json::Error) -> Self { 48 | match err.classify() { 49 | serde_json::error::Category::Data | serde_json::error::Category::Syntax => { 50 | Self::DeserializationError(Box::new(err)) 51 | } 52 | serde_json::error::Category::Io | serde_json::error::Category::Eof => { 53 | Self::UnknownError(Box::new(err)) 54 | } 55 | } 56 | } 57 | } 58 | 59 | impl From for PersistenceError { 60 | fn from(err: MysqlAggregateError) -> Self { 61 | match err { 62 | MysqlAggregateError::OptimisticLock => Self::OptimisticLockError, 63 | MysqlAggregateError::ConnectionError(error) => Self::ConnectionError(error), 64 | MysqlAggregateError::DeserializationError(error) => Self::DeserializationError(error), 65 | MysqlAggregateError::UnknownError(error) => Self::UnknownError(error), 66 | } 67 | } 68 | } 69 | -------------------------------------------------------------------------------- /persistence/mysql-es/src/lib.rs: -------------------------------------------------------------------------------- 1 | #![forbid(unsafe_code)] 2 | #![deny(missing_docs)] 3 | #![deny(clippy::all)] 4 | #![warn(rust_2018_idioms)] 5 | //! # mysql-es 6 | //! 7 | //! > A MySql implementation of the `EventStore` trait in [cqrs-es](https://crates.io/crates/cqrs-es). 8 | //! 9 | pub use crate::cqrs::*; 10 | pub use crate::event_repository::*; 11 | pub use crate::types::*; 12 | pub use crate::view_repository::*; 13 | 14 | mod cqrs; 15 | mod error; 16 | mod event_repository; 17 | pub(crate) mod sql_query; 18 | mod testing; 19 | mod types; 20 | mod view_repository; 21 | -------------------------------------------------------------------------------- /persistence/mysql-es/src/sql_query.rs: -------------------------------------------------------------------------------- 1 | pub(crate) struct SqlQueryFactory { 2 | event_table: String, 3 | select_events: String, 4 | insert_event: String, 5 | all_events: String, 6 | insert_snapshot: String, 7 | update_snapshot: String, 8 | select_snapshot: String, 9 | } 10 | 11 | impl SqlQueryFactory { 12 | pub fn new(event_table: &str, snapshot_table: &str) -> Self { 13 | Self { 14 | event_table: event_table.to_string(), 15 | select_events: format!(" 16 | SELECT aggregate_type, aggregate_id, sequence, event_type, event_version, payload, metadata 17 | FROM {event_table} 18 | WHERE aggregate_type = ? AND aggregate_id = ? 19 | ORDER BY sequence"), 20 | insert_event: format!(" 21 | INSERT INTO {event_table} (aggregate_type, aggregate_id, sequence, event_type, event_version, payload, metadata) 22 | VALUES (?, ?, ?, ?, ?, ?, ?)"), 23 | all_events: format!(" 24 | SELECT aggregate_type, aggregate_id, sequence, event_type, event_version, payload, metadata 25 | FROM {event_table} 26 | WHERE aggregate_type = ? 27 | ORDER BY sequence"), 28 | insert_snapshot: format!(" 29 | INSERT INTO {snapshot_table} (aggregate_type, aggregate_id, last_sequence, current_snapshot, payload) 30 | VALUES (?, ?, ?, ?, ?)"), 31 | update_snapshot: format!(" 32 | UPDATE {snapshot_table} 33 | SET last_sequence= ? , payload= ?, current_snapshot= ? 34 | WHERE aggregate_type= ? AND aggregate_id= ? AND current_snapshot= ?"), 35 | select_snapshot: format!(" 36 | SELECT aggregate_type, aggregate_id, last_sequence, current_snapshot, payload 37 | FROM {snapshot_table} 38 | WHERE aggregate_type = ? AND aggregate_id = ?") 39 | } 40 | } 41 | pub fn select_events(&self) -> &str { 42 | &self.select_events 43 | } 44 | pub fn insert_event(&self) -> &str { 45 | &self.insert_event 46 | } 47 | pub fn insert_snapshot(&self) -> &str { 48 | &self.insert_snapshot 49 | } 50 | pub fn update_snapshot(&self) -> &str { 51 | &self.update_snapshot 52 | } 53 | pub fn select_snapshot(&self) -> &str { 54 | &self.select_snapshot 55 | } 56 | pub fn all_events(&self) -> &str { 57 | &self.all_events 58 | } 59 | pub fn get_last_events(&self, last_sequence: usize) -> String { 60 | format!( 61 | " 62 | SELECT aggregate_type, aggregate_id, sequence, event_type, event_version, payload, metadata 63 | FROM {} 64 | WHERE aggregate_type = ? AND aggregate_id = ? AND sequence > {} 65 | ORDER BY sequence", 66 | &self.event_table, last_sequence 67 | ) 68 | } 69 | } 70 | 71 | #[test] 72 | fn test_queries() { 73 | let query_factory = SqlQueryFactory::new("my_events", "my_snapshots"); 74 | assert_eq!( 75 | query_factory.select_events(), 76 | " 77 | SELECT aggregate_type, aggregate_id, sequence, event_type, event_version, payload, metadata 78 | FROM my_events 79 | WHERE aggregate_type = ? AND aggregate_id = ? 80 | ORDER BY sequence" 81 | ); 82 | assert_eq!(query_factory.insert_event(), " 83 | INSERT INTO my_events (aggregate_type, aggregate_id, sequence, event_type, event_version, payload, metadata) 84 | VALUES (?, ?, ?, ?, ?, ?, ?)"); 85 | assert_eq!( 86 | query_factory.all_events(), 87 | " 88 | SELECT aggregate_type, aggregate_id, sequence, event_type, event_version, payload, metadata 89 | FROM my_events 90 | WHERE aggregate_type = ? 91 | ORDER BY sequence" 92 | ); 93 | assert_eq!( 94 | query_factory.insert_snapshot(), 95 | " 96 | INSERT INTO my_snapshots (aggregate_type, aggregate_id, last_sequence, current_snapshot, payload) 97 | VALUES (?, ?, ?, ?, ?)" 98 | ); 99 | assert_eq!( 100 | query_factory.update_snapshot(), 101 | " 102 | UPDATE my_snapshots 103 | SET last_sequence= ? , payload= ?, current_snapshot= ? 104 | WHERE aggregate_type= ? AND aggregate_id= ? AND current_snapshot= ?" 105 | ); 106 | assert_eq!( 107 | query_factory.select_snapshot(), 108 | " 109 | SELECT aggregate_type, aggregate_id, last_sequence, current_snapshot, payload 110 | FROM my_snapshots 111 | WHERE aggregate_type = ? AND aggregate_id = ?" 112 | ); 113 | assert_eq!( 114 | query_factory.get_last_events(20), 115 | " 116 | SELECT aggregate_type, aggregate_id, sequence, event_type, event_version, payload, metadata 117 | FROM my_events 118 | WHERE aggregate_type = ? AND aggregate_id = ? AND sequence > 20 119 | ORDER BY sequence" 120 | ); 121 | } 122 | -------------------------------------------------------------------------------- /persistence/mysql-es/src/testing.rs: -------------------------------------------------------------------------------- 1 | #[cfg(test)] 2 | pub(crate) mod tests { 3 | use cqrs_es::persist::{GenericQuery, SerializedEvent, SerializedSnapshot}; 4 | use cqrs_es::{Aggregate, DomainEvent, EventEnvelope, View}; 5 | use serde::{Deserialize, Serialize}; 6 | use serde_json::Value; 7 | use std::fmt::{Display, Formatter}; 8 | 9 | use crate::view_repository::MysqlViewRepository; 10 | 11 | #[derive(Debug, Serialize, Deserialize, PartialEq, Default)] 12 | pub struct TestAggregate { 13 | pub(crate) id: String, 14 | pub(crate) description: String, 15 | pub(crate) tests: Vec, 16 | } 17 | 18 | impl Aggregate for TestAggregate { 19 | const TYPE: &'static str = "TestAggregate"; 20 | type Command = TestCommand; 21 | type Event = TestEvent; 22 | type Error = TestError; 23 | type Services = TestServices; 24 | 25 | async fn handle( 26 | &self, 27 | _command: Self::Command, 28 | _services: &Self::Services, 29 | ) -> Result, Self::Error> { 30 | Ok(vec![]) 31 | } 32 | 33 | fn apply(&mut self, _e: Self::Event) {} 34 | } 35 | 36 | #[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] 37 | pub enum TestEvent { 38 | Created(Created), 39 | Tested(Tested), 40 | SomethingElse(SomethingElse), 41 | } 42 | 43 | #[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] 44 | pub struct Created { 45 | pub id: String, 46 | } 47 | 48 | #[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] 49 | pub struct Tested { 50 | pub test_name: String, 51 | } 52 | 53 | #[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] 54 | pub struct SomethingElse { 55 | pub description: String, 56 | } 57 | 58 | impl DomainEvent for TestEvent { 59 | fn event_type(&self) -> String { 60 | match self { 61 | Self::Created(_) => "Created".to_string(), 62 | Self::Tested(_) => "Tested".to_string(), 63 | Self::SomethingElse(_) => "SomethingElse".to_string(), 64 | } 65 | } 66 | 67 | fn event_version(&self) -> String { 68 | "1.0".to_string() 69 | } 70 | } 71 | 72 | #[derive(Debug, PartialEq)] 73 | pub struct TestError(String); 74 | 75 | pub struct TestServices; 76 | 77 | impl Display for TestError { 78 | fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { 79 | write!(f, "{}", self.0) 80 | } 81 | } 82 | 83 | impl std::error::Error for TestError {} 84 | 85 | pub enum TestCommand {} 86 | 87 | pub(crate) type TestQueryRepository = 88 | GenericQuery, TestView, TestAggregate>; 89 | 90 | #[derive(Clone, Debug, Default, Serialize, Deserialize, PartialEq)] 91 | pub(crate) struct TestView { 92 | pub(crate) events: Vec, 93 | } 94 | 95 | impl View for TestView { 96 | fn update(&mut self, event: &EventEnvelope) { 97 | self.events.push(event.payload.clone()); 98 | } 99 | } 100 | 101 | pub(crate) const TEST_CONNECTION_STRING: &str = 102 | "mysql://test_user:test_pass@127.0.0.1:3306/test"; 103 | 104 | pub(crate) fn test_event_envelope( 105 | id: &str, 106 | sequence: usize, 107 | event: TestEvent, 108 | ) -> SerializedEvent { 109 | let payload: Value = serde_json::to_value(&event).unwrap(); 110 | SerializedEvent { 111 | aggregate_id: id.to_string(), 112 | sequence, 113 | aggregate_type: TestAggregate::TYPE.to_string(), 114 | event_type: event.event_type(), 115 | event_version: event.event_version(), 116 | payload, 117 | metadata: Value::default(), 118 | } 119 | } 120 | 121 | pub(crate) fn snapshot_context( 122 | aggregate_id: String, 123 | current_sequence: usize, 124 | current_snapshot: usize, 125 | aggregate: Value, 126 | ) -> SerializedSnapshot { 127 | SerializedSnapshot { 128 | aggregate_id, 129 | aggregate, 130 | current_sequence, 131 | current_snapshot, 132 | } 133 | } 134 | } 135 | -------------------------------------------------------------------------------- /persistence/mysql-es/src/types.rs: -------------------------------------------------------------------------------- 1 | use crate::MysqlEventRepository; 2 | use cqrs_es::persist::PersistedEventStore; 3 | use cqrs_es::CqrsFramework; 4 | 5 | /// A convenience type for a CqrsFramework backed by 6 | /// [MysqlStore](struct.MysqlStore.html). 7 | pub type MysqlCqrs = CqrsFramework>; 8 | -------------------------------------------------------------------------------- /persistence/mysql-es/tests/lib.rs: -------------------------------------------------------------------------------- 1 | use std::collections::HashMap; 2 | 3 | use cqrs_es::doc::{Customer, CustomerEvent}; 4 | use cqrs_es::persist::{PersistedEventStore, SemanticVersionEventUpcaster}; 5 | use cqrs_es::EventStore; 6 | use mysql_es::{default_mysql_pool, MysqlEventRepository}; 7 | use serde_json::Value; 8 | use sqlx::{MySql, Pool}; 9 | 10 | const TEST_CONNECTION_STRING: &str = "mysql://test_user:test_pass@127.0.0.1:3306/test"; 11 | 12 | fn new_test_event_store(pool: Pool) -> PersistedEventStore { 13 | let repo = MysqlEventRepository::new(pool); 14 | PersistedEventStore::::new_event_store(repo) 15 | } 16 | 17 | #[tokio::test] 18 | async fn commit_and_load_events() { 19 | let pool = default_mysql_pool(TEST_CONNECTION_STRING).await; 20 | let repo = MysqlEventRepository::new(pool); 21 | let event_store = PersistedEventStore::::new_event_store(repo); 22 | 23 | simple_es_commit_and_load_test(event_store).await; 24 | } 25 | 26 | #[tokio::test] 27 | async fn commit_and_load_events_snapshot_store() { 28 | let pool = default_mysql_pool(TEST_CONNECTION_STRING).await; 29 | let repo = MysqlEventRepository::new(pool); 30 | let event_store = 31 | PersistedEventStore::::new_aggregate_store(repo); 32 | 33 | simple_es_commit_and_load_test(event_store).await; 34 | } 35 | 36 | async fn simple_es_commit_and_load_test( 37 | event_store: PersistedEventStore, 38 | ) { 39 | let id = uuid::Uuid::new_v4().to_string(); 40 | assert_eq!(0, event_store.load_events(id.as_str()).await.unwrap().len()); 41 | let context = event_store.load_aggregate(id.as_str()).await.unwrap(); 42 | 43 | event_store 44 | .commit( 45 | vec![ 46 | CustomerEvent::NameAdded { 47 | name: "test_event_A".to_string(), 48 | }, 49 | CustomerEvent::EmailUpdated { 50 | new_email: "email A".to_string(), 51 | }, 52 | ], 53 | context, 54 | HashMap::default(), 55 | ) 56 | .await 57 | .unwrap(); 58 | 59 | assert_eq!(2, event_store.load_events(id.as_str()).await.unwrap().len()); 60 | let context = event_store.load_aggregate(id.as_str()).await.unwrap(); 61 | 62 | event_store 63 | .commit( 64 | vec![CustomerEvent::EmailUpdated { 65 | new_email: "email B".to_string(), 66 | }], 67 | context, 68 | HashMap::default(), 69 | ) 70 | .await 71 | .unwrap(); 72 | assert_eq!(3, event_store.load_events(id.as_str()).await.unwrap().len()); 73 | } 74 | 75 | #[tokio::test] 76 | async fn upcasted_event() { 77 | let pool = default_mysql_pool(TEST_CONNECTION_STRING).await; 78 | let upcaster = SemanticVersionEventUpcaster::new( 79 | "NameAdded", 80 | "1.0.1", 81 | Box::new(|mut event| match event.get_mut("NameAdded").unwrap() { 82 | Value::Object(object) => { 83 | object.insert("name".to_string(), Value::String("UNKNOWN".to_string())); 84 | event 85 | } 86 | _ => panic!("not the expected object"), 87 | }), 88 | ); 89 | let event_store = new_test_event_store(pool).with_upcasters(vec![Box::new(upcaster)]); 90 | 91 | let id = "previous_event_in_need_of_upcast".to_string(); 92 | let result = event_store.load_aggregate(id.as_str()).await.unwrap(); 93 | assert_eq!(1, result.current_sequence); 94 | assert_eq!(None, result.current_snapshot); 95 | } 96 | -------------------------------------------------------------------------------- /persistence/postgres-es/.gitignore: -------------------------------------------------------------------------------- 1 | .idea 2 | -------------------------------------------------------------------------------- /persistence/postgres-es/CHANGELOG.md: -------------------------------------------------------------------------------- 1 | The changelog for all crates in the cqrs-es project are located 2 | [here](https://github.com/serverlesstechnology/cqrs/blob/main/docs/versions/change_log.md). 3 | -------------------------------------------------------------------------------- /persistence/postgres-es/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "postgres-es" 3 | version.workspace = true 4 | authors.workspace = true 5 | edition.workspace = true 6 | license.workspace = true 7 | keywords.workspace = true 8 | description = "A Postgres implementation of an event repository for cqrs-es." 9 | repository.workspace = true 10 | documentation = "https://docs.rs/postgres-es" 11 | readme = "README.md" 12 | 13 | [dependencies] 14 | cqrs-es.workspace = true 15 | futures = "0.3" 16 | serde = { workspace = true, features = ["derive"] } 17 | serde_json = "1.0" 18 | sqlx = { version = "0.8", features = ["postgres", "json"] } 19 | tokio = { workspace = true, features = ["rt"] } 20 | thiserror = "2.0.12" 21 | 22 | [dev-dependencies] 23 | uuid.workspace = true 24 | 25 | [features] 26 | default = ["runtime-tokio-rustls"] 27 | runtime-async-std-native-tls = ["sqlx/runtime-async-std-native-tls"] 28 | runtime-tokio-native-tls = ["sqlx/runtime-tokio-native-tls"] 29 | runtime-async-std-rustls = ["sqlx/runtime-async-std-rustls"] 30 | runtime-tokio-rustls = ["sqlx/runtime-tokio-rustls"] 31 | -------------------------------------------------------------------------------- /persistence/postgres-es/README.md: -------------------------------------------------------------------------------- 1 | # postgres-es 2 | 3 | > A Postgres implementation of the `PersistedEventRepository` trait in cqrs-es. 4 | 5 | --- 6 | 7 | ## Usage 8 | Add to your Cargo.toml file: 9 | 10 | ```toml 11 | [dependencies] 12 | cqrs-es = "0.4.11" 13 | postgres-es = "0.4.11" 14 | ``` 15 | 16 | Requires access to a Postgres DB with existing tables. See: 17 | - [Sample database configuration](db/init.sql) 18 | - Use `docker-compose` to quickly setup [a local database](docker-compose.yml) 19 | 20 | A simple configuration example: 21 | ``` 22 | let store = default_postgress_pool("postgresql://my_user:my_pass@localhost:5432/my_db"); 23 | let cqrs = postgres_es::postgres_cqrs(pool, vec![]) 24 | ``` 25 | 26 | Things that could be helpful: 27 | - [User guide](https://doc.rust-cqrs.org) along with an introduction to CQRS and event sourcing. 28 | - [Demo application](https://github.com/serverlesstechnology/cqrs-demo) using the warp http server. 29 | - [Change log](https://github.com/serverlesstechnology/cqrs/blob/main/docs/versions/change_log.md) 30 | 31 | ## Runtime and TLS configuration 32 | This package defaults to expect the [Tokio runtime](https://crates.io/crates/tokio) and the 33 | [Rustls library](https://crates.io/crates/rustls) for TLS. 34 | If a different combination is desired the appropriate feature flag should be used: 35 | - `runtime-tokio-native-tls` 36 | - `runtime-tokio-rustls` (default) 37 | - `runtime-async-std-native-tls` 38 | - `runtime-async-std-rustls` 39 | - `runtime-actix-native-tls` 40 | - `runtime-actix-rustls` 41 | 42 | [![Crates.io](https://img.shields.io/crates/v/postgres-es)](https://crates.io/crates/postgres-es) 43 | [![docs](https://img.shields.io/badge/API-docs-blue.svg)](https://docs.rs/postgres-es) 44 | ![docs](https://codebuild.us-west-2.amazonaws.com/badges?uuid=eyJlbmNyeXB0ZWREYXRhIjoiVVUyR0tRbTZmejFBYURoTHdpR3FnSUFqKzFVZE9JNW5haDZhcUFlY2xtREhtaVVJMWsxcWZOeC8zSUR0UWhpaWZMa0ZQSHlEYjg0N2FoU2lwV1FsTXFRPSIsIml2UGFyYW1ldGVyU3BlYyI6IldjUVMzVEpKN1V3aWxXWGUiLCJtYXRlcmlhbFNldFNlcmlhbCI6MX0%3D&branch=main) 45 | -------------------------------------------------------------------------------- /persistence/postgres-es/buildspec_test.yml: -------------------------------------------------------------------------------- 1 | version: 0.2 2 | 3 | phases: 4 | install: 5 | commands: 6 | - echo "${DOCKERHUB_PASSWORD}" | docker login -u "${DOCKERHUB_USERNAME}" --password-stdin 7 | - nohup /usr/local/bin/dockerd --host=unix:///var/run/docker.sock --host=tcp://127.0.0.1:2375 --storage-driver=overlay2& 8 | - timeout 15 sh -c "until docker info; do echo .; sleep 1; done" 9 | pre_build: 10 | commands: 11 | - docker build -t postgres-es . 12 | build: 13 | commands: 14 | - docker-compose up -d 15 | - docker ps 16 | - docker image inspect postgres-es 17 | - docker run --network="host" postgres-es cargo test 18 | -------------------------------------------------------------------------------- /persistence/postgres-es/db/init.sql: -------------------------------------------------------------------------------- 1 | -- a single table is used for all events in the cqrs system 2 | CREATE TABLE events 3 | ( 4 | aggregate_type text NOT NULL, 5 | aggregate_id text NOT NULL, 6 | sequence bigint CHECK (sequence >= 0) NOT NULL, 7 | event_type text NOT NULL, 8 | event_version text NOT NULL, 9 | payload json NOT NULL, 10 | metadata json NOT NULL, 11 | PRIMARY KEY (aggregate_type, aggregate_id, sequence) 12 | ); 13 | 14 | -- this table is only needed if snapshotting is employed 15 | CREATE TABLE snapshots 16 | ( 17 | aggregate_type text NOT NULL, 18 | aggregate_id text NOT NULL, 19 | last_sequence bigint CHECK (last_sequence >= 0) NOT NULL, 20 | current_snapshot bigint CHECK (current_snapshot >= 0) NOT NULL, 21 | payload json NOT NULL, 22 | PRIMARY KEY (aggregate_type, aggregate_id, last_sequence) 23 | ); 24 | 25 | -- one view table should be created for every `PostgresViewRepository` used 26 | -- replace name with the value used in `PostgresViewRepository::new(view_name: String)` 27 | CREATE TABLE test_view 28 | ( 29 | view_id text NOT NULL, 30 | version bigint CHECK (version >= 0) NOT NULL, 31 | payload json NOT NULL, 32 | PRIMARY KEY (view_id) 33 | ); 34 | 35 | INSERT INTO public.events (aggregate_type, aggregate_id, sequence, event_type, event_version, payload, metadata) 36 | VALUES ('Customer', 'previous_event_in_need_of_upcast', 1, 'NameAdded', '1.0', '{"NameAdded": {}}', '{}'); 37 | 38 | CREATE USER test_user WITH ENCRYPTED PASSWORD 'test_pass'; 39 | GRANT ALL PRIVILEGES ON DATABASE postgres TO test_user; -------------------------------------------------------------------------------- /persistence/postgres-es/docker-compose.yml: -------------------------------------------------------------------------------- 1 | services: 2 | db: 3 | image: postgres 4 | restart: always 5 | ports: 6 | - 5432:5432 7 | environment: 8 | POSTGRES_DB: test 9 | POSTGRES_USER: test_user 10 | POSTGRES_PASSWORD: test_pass 11 | volumes: 12 | - './db:/docker-entrypoint-initdb.d' -------------------------------------------------------------------------------- /persistence/postgres-es/src/cqrs.rs: -------------------------------------------------------------------------------- 1 | use cqrs_es::persist::PersistedEventStore; 2 | use cqrs_es::{Aggregate, CqrsFramework, Query}; 3 | 4 | use crate::{PostgresCqrs, PostgresEventRepository}; 5 | use sqlx::postgres::PgPoolOptions; 6 | use sqlx::{Pool, Postgres}; 7 | 8 | /// A convenience method for building a simple connection pool for PostgresDb. 9 | /// A connection pool is needed for both the event and view repositories. 10 | /// 11 | /// ``` 12 | /// use sqlx::{Pool, Postgres}; 13 | /// use postgres_es::default_postgress_pool; 14 | /// 15 | /// # async fn configure_pool() { 16 | /// let connection_string = "postgresql://test_user:test_pass@localhost:5432/test"; 17 | /// let pool: Pool = default_postgress_pool(connection_string).await; 18 | /// # } 19 | /// ``` 20 | pub async fn default_postgress_pool(connection_string: &str) -> Pool { 21 | PgPoolOptions::new() 22 | .max_connections(10) 23 | .connect(connection_string) 24 | .await 25 | .expect("unable to connect to database") 26 | } 27 | 28 | /// A convenience function for creating a CqrsFramework from a database connection pool 29 | /// and queries. 30 | pub fn postgres_cqrs( 31 | pool: Pool, 32 | query_processor: Vec>>, 33 | services: A::Services, 34 | ) -> PostgresCqrs 35 | where 36 | A: Aggregate, 37 | { 38 | let repo = PostgresEventRepository::new(pool); 39 | let store = PersistedEventStore::new_event_store(repo); 40 | CqrsFramework::new(store, query_processor, services) 41 | } 42 | 43 | /// A convenience function for creating a CqrsFramework using a snapshot store. 44 | pub fn postgres_snapshot_cqrs( 45 | pool: Pool, 46 | query_processor: Vec>>, 47 | snapshot_size: usize, 48 | services: A::Services, 49 | ) -> PostgresCqrs 50 | where 51 | A: Aggregate, 52 | { 53 | let repo = PostgresEventRepository::new(pool); 54 | let store = PersistedEventStore::new_snapshot_store(repo, snapshot_size); 55 | CqrsFramework::new(store, query_processor, services) 56 | } 57 | 58 | /// A convenience function for creating a CqrsFramework using an aggregate store. 59 | pub fn postgres_aggregate_cqrs( 60 | pool: Pool, 61 | query_processor: Vec>>, 62 | services: A::Services, 63 | ) -> PostgresCqrs 64 | where 65 | A: Aggregate, 66 | { 67 | let repo = PostgresEventRepository::new(pool); 68 | let store = PersistedEventStore::new_aggregate_store(repo); 69 | CqrsFramework::new(store, query_processor, services) 70 | } 71 | 72 | #[cfg(test)] 73 | mod test { 74 | use crate::testing::tests::{ 75 | TestAggregate, TestQueryRepository, TestServices, TestView, TEST_CONNECTION_STRING, 76 | }; 77 | use crate::{default_postgress_pool, postgres_cqrs, PostgresViewRepository}; 78 | use std::sync::Arc; 79 | 80 | #[tokio::test] 81 | async fn test_valid_cqrs_framework() { 82 | let pool = default_postgress_pool(TEST_CONNECTION_STRING).await; 83 | let repo = 84 | PostgresViewRepository::::new("test_view", pool.clone()); 85 | let query = TestQueryRepository::new(Arc::new(repo)); 86 | let _ps = postgres_cqrs(pool, vec![Box::new(query)], TestServices); 87 | } 88 | } 89 | -------------------------------------------------------------------------------- /persistence/postgres-es/src/error.rs: -------------------------------------------------------------------------------- 1 | use cqrs_es::persist::PersistenceError; 2 | use cqrs_es::AggregateError; 3 | use sqlx::Error; 4 | 5 | #[derive(Debug, thiserror::Error)] 6 | pub enum PostgresAggregateError { 7 | #[error("optimistic lock error")] 8 | OptimisticLock, 9 | #[error(transparent)] 10 | ConnectionError(Box), 11 | #[error(transparent)] 12 | DeserializationError(Box), 13 | #[error(transparent)] 14 | UnknownError(Box), 15 | } 16 | 17 | impl From for PostgresAggregateError { 18 | fn from(err: sqlx::Error) -> Self { 19 | // TODO: improve error handling 20 | match &err { 21 | Error::Database(database_error) => { 22 | if let Some(code) = database_error.code() { 23 | if code.as_ref() == "23505" { 24 | return Self::OptimisticLock; 25 | } 26 | } 27 | Self::UnknownError(Box::new(err)) 28 | } 29 | Error::Io(_) | Error::Tls(_) => Self::ConnectionError(Box::new(err)), 30 | _ => Self::UnknownError(Box::new(err)), 31 | } 32 | } 33 | } 34 | 35 | impl From for AggregateError { 36 | fn from(err: PostgresAggregateError) -> Self { 37 | match err { 38 | PostgresAggregateError::OptimisticLock => Self::AggregateConflict, 39 | PostgresAggregateError::ConnectionError(error) => Self::DatabaseConnectionError(error), 40 | PostgresAggregateError::DeserializationError(error) => { 41 | Self::DeserializationError(error) 42 | } 43 | PostgresAggregateError::UnknownError(error) => Self::UnexpectedError(error), 44 | } 45 | } 46 | } 47 | 48 | impl From for PostgresAggregateError { 49 | fn from(err: serde_json::Error) -> Self { 50 | match err.classify() { 51 | serde_json::error::Category::Data | serde_json::error::Category::Syntax => { 52 | Self::DeserializationError(Box::new(err)) 53 | } 54 | serde_json::error::Category::Io | serde_json::error::Category::Eof => { 55 | Self::UnknownError(Box::new(err)) 56 | } 57 | } 58 | } 59 | } 60 | 61 | impl From for PersistenceError { 62 | fn from(err: PostgresAggregateError) -> Self { 63 | match err { 64 | PostgresAggregateError::OptimisticLock => Self::OptimisticLockError, 65 | PostgresAggregateError::ConnectionError(error) => Self::ConnectionError(error), 66 | PostgresAggregateError::DeserializationError(error) => Self::UnknownError(error), 67 | PostgresAggregateError::UnknownError(error) => Self::UnknownError(error), 68 | } 69 | } 70 | } 71 | -------------------------------------------------------------------------------- /persistence/postgres-es/src/lib.rs: -------------------------------------------------------------------------------- 1 | #![forbid(unsafe_code)] 2 | #![deny(missing_docs)] 3 | #![deny(clippy::all)] 4 | #![warn(rust_2018_idioms)] 5 | //! # postgres-es 6 | //! 7 | //! > A Postgres implementation of the `EventStore` trait in [cqrs-es](https://crates.io/crates/cqrs-es). 8 | //! 9 | pub use crate::cqrs::*; 10 | pub use crate::event_repository::*; 11 | pub use crate::types::*; 12 | pub use crate::view_repository::*; 13 | 14 | mod cqrs; 15 | mod error; 16 | mod event_repository; 17 | pub(crate) mod sql_query; 18 | mod testing; 19 | mod types; 20 | mod view_repository; 21 | -------------------------------------------------------------------------------- /persistence/postgres-es/src/sql_query.rs: -------------------------------------------------------------------------------- 1 | pub(crate) struct SqlQueryFactory { 2 | event_table: String, 3 | select_events: String, 4 | insert_event: String, 5 | all_events: String, 6 | insert_snapshot: String, 7 | update_snapshot: String, 8 | select_snapshot: String, 9 | } 10 | 11 | impl SqlQueryFactory { 12 | pub fn new(event_table: &str, snapshot_table: &str) -> Self { 13 | Self { 14 | event_table: event_table.to_string(), 15 | select_events: format!(" 16 | SELECT aggregate_type, aggregate_id, sequence, event_type, event_version, payload, metadata 17 | FROM {event_table} 18 | WHERE aggregate_type = $1 AND aggregate_id = $2 19 | ORDER BY sequence"), 20 | insert_event: format!(" 21 | INSERT INTO {event_table} (aggregate_type, aggregate_id, sequence, event_type, event_version, payload, metadata) 22 | VALUES ($1, $2, $3, $4, $5, $6, $7)"), 23 | all_events: format!(" 24 | SELECT aggregate_type, aggregate_id, sequence, event_type, event_version, payload, metadata 25 | FROM {event_table} 26 | WHERE aggregate_type = $1 27 | ORDER BY sequence"), 28 | insert_snapshot: format!(" 29 | INSERT INTO {snapshot_table} (aggregate_type, aggregate_id, last_sequence, current_snapshot, payload) 30 | VALUES ($1, $2, $3, $4, $5)"), 31 | update_snapshot: format!(" 32 | UPDATE {snapshot_table} 33 | SET last_sequence= $3 , payload= $6, current_snapshot= $4 34 | WHERE aggregate_type= $1 AND aggregate_id= $2 AND current_snapshot= $5"), 35 | select_snapshot: format!(" 36 | SELECT aggregate_type, aggregate_id, last_sequence, current_snapshot, payload 37 | FROM {snapshot_table} 38 | WHERE aggregate_type = $1 AND aggregate_id = $2") 39 | } 40 | } 41 | pub fn select_events(&self) -> &str { 42 | &self.select_events 43 | } 44 | pub fn insert_event(&self) -> &str { 45 | &self.insert_event 46 | } 47 | pub fn insert_snapshot(&self) -> &str { 48 | &self.insert_snapshot 49 | } 50 | pub fn update_snapshot(&self) -> &str { 51 | &self.update_snapshot 52 | } 53 | pub fn select_snapshot(&self) -> &str { 54 | &self.select_snapshot 55 | } 56 | pub fn all_events(&self) -> &str { 57 | &self.all_events 58 | } 59 | pub fn get_last_events(&self, last_sequence: usize) -> String { 60 | format!( 61 | " 62 | SELECT aggregate_type, aggregate_id, sequence, event_type, event_version, payload, metadata 63 | FROM {} 64 | WHERE aggregate_type = $1 AND aggregate_id = $2 AND sequence > {} 65 | ORDER BY sequence", 66 | &self.event_table, last_sequence 67 | ) 68 | } 69 | } 70 | 71 | #[test] 72 | fn test_queries() { 73 | let query_factory = SqlQueryFactory::new("my_events", "my_snapshots"); 74 | assert_eq!( 75 | query_factory.select_events(), 76 | " 77 | SELECT aggregate_type, aggregate_id, sequence, event_type, event_version, payload, metadata 78 | FROM my_events 79 | WHERE aggregate_type = $1 AND aggregate_id = $2 80 | ORDER BY sequence" 81 | ); 82 | assert_eq!(query_factory.insert_event(), " 83 | INSERT INTO my_events (aggregate_type, aggregate_id, sequence, event_type, event_version, payload, metadata) 84 | VALUES ($1, $2, $3, $4, $5, $6, $7)"); 85 | assert_eq!( 86 | query_factory.all_events(), 87 | " 88 | SELECT aggregate_type, aggregate_id, sequence, event_type, event_version, payload, metadata 89 | FROM my_events 90 | WHERE aggregate_type = $1 91 | ORDER BY sequence" 92 | ); 93 | assert_eq!( 94 | query_factory.insert_snapshot(), 95 | " 96 | INSERT INTO my_snapshots (aggregate_type, aggregate_id, last_sequence, current_snapshot, payload) 97 | VALUES ($1, $2, $3, $4, $5)" 98 | ); 99 | assert_eq!( 100 | query_factory.update_snapshot(), 101 | " 102 | UPDATE my_snapshots 103 | SET last_sequence= $3 , payload= $6, current_snapshot= $4 104 | WHERE aggregate_type= $1 AND aggregate_id= $2 AND current_snapshot= $5" 105 | ); 106 | assert_eq!( 107 | query_factory.select_snapshot(), 108 | " 109 | SELECT aggregate_type, aggregate_id, last_sequence, current_snapshot, payload 110 | FROM my_snapshots 111 | WHERE aggregate_type = $1 AND aggregate_id = $2" 112 | ); 113 | assert_eq!( 114 | query_factory.get_last_events(20), 115 | " 116 | SELECT aggregate_type, aggregate_id, sequence, event_type, event_version, payload, metadata 117 | FROM my_events 118 | WHERE aggregate_type = $1 AND aggregate_id = $2 AND sequence > 20 119 | ORDER BY sequence" 120 | ); 121 | } 122 | -------------------------------------------------------------------------------- /persistence/postgres-es/src/testing.rs: -------------------------------------------------------------------------------- 1 | #[cfg(test)] 2 | pub(crate) mod tests { 3 | use crate::PostgresViewRepository; 4 | use cqrs_es::persist::{GenericQuery, SerializedEvent, SerializedSnapshot}; 5 | use cqrs_es::{Aggregate, DomainEvent, EventEnvelope, View}; 6 | use serde::{Deserialize, Serialize}; 7 | use serde_json::Value; 8 | use std::fmt::{Display, Formatter}; 9 | 10 | #[derive(Debug, Serialize, Deserialize, PartialEq, Default)] 11 | pub(crate) struct TestAggregate { 12 | pub(crate) id: String, 13 | pub(crate) description: String, 14 | pub(crate) tests: Vec, 15 | } 16 | 17 | impl Aggregate for TestAggregate { 18 | const TYPE: &'static str = "TestAggregate"; 19 | type Command = TestCommand; 20 | type Event = TestEvent; 21 | type Error = TestError; 22 | type Services = TestServices; 23 | 24 | async fn handle( 25 | &self, 26 | _command: Self::Command, 27 | _services: &Self::Services, 28 | ) -> Result, Self::Error> { 29 | Ok(vec![]) 30 | } 31 | 32 | fn apply(&mut self, _e: Self::Event) {} 33 | } 34 | 35 | #[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] 36 | pub(crate) enum TestEvent { 37 | Created(Created), 38 | Tested(Tested), 39 | SomethingElse(SomethingElse), 40 | } 41 | 42 | #[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] 43 | pub(crate) struct Created { 44 | pub id: String, 45 | } 46 | 47 | #[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] 48 | pub(crate) struct Tested { 49 | pub test_name: String, 50 | } 51 | 52 | #[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] 53 | pub struct SomethingElse { 54 | pub description: String, 55 | } 56 | 57 | impl DomainEvent for TestEvent { 58 | fn event_type(&self) -> String { 59 | match self { 60 | Self::Created(_) => "Created".to_string(), 61 | Self::Tested(_) => "Tested".to_string(), 62 | Self::SomethingElse(_) => "SomethingElse".to_string(), 63 | } 64 | } 65 | 66 | fn event_version(&self) -> String { 67 | "1.0".to_string() 68 | } 69 | } 70 | 71 | #[derive(Debug, PartialEq)] 72 | pub(crate) struct TestError(String); 73 | 74 | #[derive(Debug)] 75 | pub(crate) struct TestServices; 76 | 77 | impl Display for TestError { 78 | fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { 79 | write!(f, "{}", self.0) 80 | } 81 | } 82 | 83 | impl std::error::Error for TestError {} 84 | 85 | pub(crate) enum TestCommand {} 86 | 87 | pub(crate) type TestQueryRepository = 88 | GenericQuery, TestView, TestAggregate>; 89 | 90 | #[derive(Clone, Debug, Default, Serialize, Deserialize, PartialEq)] 91 | pub(crate) struct TestView { 92 | pub(crate) events: Vec, 93 | } 94 | 95 | impl View for TestView { 96 | fn update(&mut self, event: &EventEnvelope) { 97 | self.events.push(event.payload.clone()); 98 | } 99 | } 100 | 101 | pub(crate) const TEST_CONNECTION_STRING: &str = 102 | "postgresql://test_user:test_pass@127.0.0.1:5432/test"; 103 | 104 | pub(crate) fn test_event_envelope( 105 | id: &str, 106 | sequence: usize, 107 | event: TestEvent, 108 | ) -> SerializedEvent { 109 | let payload: Value = serde_json::to_value(&event).unwrap(); 110 | SerializedEvent { 111 | aggregate_id: id.to_string(), 112 | sequence, 113 | aggregate_type: TestAggregate::TYPE.to_string(), 114 | event_type: event.event_type(), 115 | event_version: event.event_version(), 116 | payload, 117 | metadata: Value::default(), 118 | } 119 | } 120 | 121 | pub(crate) fn snapshot_context( 122 | aggregate_id: String, 123 | current_sequence: usize, 124 | current_snapshot: usize, 125 | aggregate: Value, 126 | ) -> SerializedSnapshot { 127 | SerializedSnapshot { 128 | aggregate_id, 129 | aggregate, 130 | current_sequence, 131 | current_snapshot, 132 | } 133 | } 134 | } 135 | -------------------------------------------------------------------------------- /persistence/postgres-es/src/types.rs: -------------------------------------------------------------------------------- 1 | use crate::PostgresEventRepository; 2 | use cqrs_es::persist::PersistedEventStore; 3 | use cqrs_es::CqrsFramework; 4 | 5 | /// A convenience type for a CqrsFramework backed by 6 | /// [PostgresStore](struct.PostgresStore.html). 7 | pub type PostgresCqrs = CqrsFramework>; 8 | -------------------------------------------------------------------------------- /persistence/postgres-es/tests/lib.rs: -------------------------------------------------------------------------------- 1 | use std::collections::HashMap; 2 | 3 | use cqrs_es::doc::{Customer, CustomerEvent}; 4 | use cqrs_es::persist::{PersistedEventStore, SemanticVersionEventUpcaster}; 5 | use cqrs_es::EventStore; 6 | use postgres_es::{default_postgress_pool, PostgresEventRepository}; 7 | use serde_json::Value; 8 | use sqlx::{Pool, Postgres}; 9 | 10 | const TEST_CONNECTION_STRING: &str = "postgresql://test_user:test_pass@127.0.0.1:5432/test"; 11 | 12 | fn new_test_event_store( 13 | pool: Pool, 14 | ) -> PersistedEventStore { 15 | let repo = PostgresEventRepository::new(pool); 16 | PersistedEventStore::::new_event_store(repo) 17 | } 18 | 19 | #[tokio::test] 20 | async fn commit_and_load_events() { 21 | let pool = default_postgress_pool(TEST_CONNECTION_STRING).await; 22 | let repo = PostgresEventRepository::new(pool); 23 | let event_store = 24 | PersistedEventStore::::new_event_store(repo); 25 | 26 | simple_es_commit_and_load_test(event_store).await; 27 | } 28 | 29 | #[tokio::test] 30 | async fn commit_and_load_events_snapshot_store() { 31 | let pool = default_postgress_pool(TEST_CONNECTION_STRING).await; 32 | let repo = PostgresEventRepository::new(pool); 33 | let event_store = 34 | PersistedEventStore::::new_aggregate_store(repo); 35 | 36 | simple_es_commit_and_load_test(event_store).await; 37 | } 38 | 39 | async fn simple_es_commit_and_load_test( 40 | event_store: PersistedEventStore, 41 | ) { 42 | let id = uuid::Uuid::new_v4().to_string(); 43 | assert_eq!(0, event_store.load_events(id.as_str()).await.unwrap().len()); 44 | let context = event_store.load_aggregate(id.as_str()).await.unwrap(); 45 | 46 | event_store 47 | .commit( 48 | vec![ 49 | CustomerEvent::NameAdded { 50 | name: "test_event_A".to_string(), 51 | }, 52 | CustomerEvent::EmailUpdated { 53 | new_email: "email A".to_string(), 54 | }, 55 | ], 56 | context, 57 | HashMap::default(), 58 | ) 59 | .await 60 | .unwrap(); 61 | 62 | assert_eq!(2, event_store.load_events(id.as_str()).await.unwrap().len()); 63 | let context = event_store.load_aggregate(id.as_str()).await.unwrap(); 64 | 65 | event_store 66 | .commit( 67 | vec![CustomerEvent::EmailUpdated { 68 | new_email: "email B".to_string(), 69 | }], 70 | context, 71 | HashMap::default(), 72 | ) 73 | .await 74 | .unwrap(); 75 | assert_eq!(3, event_store.load_events(id.as_str()).await.unwrap().len()); 76 | } 77 | 78 | #[tokio::test] 79 | async fn upcasted_event() { 80 | let pool = default_postgress_pool(TEST_CONNECTION_STRING).await; 81 | let upcaster = SemanticVersionEventUpcaster::new( 82 | "NameAdded", 83 | "1.0.1", 84 | Box::new(|mut event| match event.get_mut("NameAdded").unwrap() { 85 | Value::Object(object) => { 86 | object.insert("name".to_string(), Value::String("UNKNOWN".to_string())); 87 | event 88 | } 89 | _ => panic!("not the expected object"), 90 | }), 91 | ); 92 | let event_store = new_test_event_store(pool).with_upcasters(vec![Box::new(upcaster)]); 93 | 94 | let id = "previous_event_in_need_of_upcast".to_string(); 95 | let result = event_store.load_aggregate(id.as_str()).await.unwrap(); 96 | assert_eq!(1, result.current_sequence); 97 | assert_eq!(None, result.current_snapshot); 98 | } 99 | -------------------------------------------------------------------------------- /src/error.rs: -------------------------------------------------------------------------------- 1 | use std::error; 2 | 3 | /// The base error for the framework. 4 | #[derive(Debug, thiserror::Error)] 5 | pub enum AggregateError { 6 | /// This is the error returned when a user violates a business rule. The payload within 7 | /// `AggregateError::UserError` should be used to pass information to inform the user of 8 | /// the nature of problem. 9 | /// 10 | /// The `UserErrorPayload` struct has been provided as a reference implementation for this 11 | /// purpose. 12 | /// 13 | /// ### Handling 14 | /// In a Restful application this should translate to a 400 response status. 15 | #[error("{0}")] 16 | UserError(T), 17 | /// A command has been rejected due to a conflict with another command on the same aggregate 18 | /// instance. This is handled by optimistic locking in systems backed by an RDBMS. 19 | /// 20 | /// ### Handling 21 | /// In a Restful application this usually translates to a 503 or 429 response status, often with 22 | /// a [Retry-After response header](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Retry-After) 23 | /// indicating that the user should try again. 24 | #[error("aggregate conflict")] 25 | AggregateConflict, 26 | /// A error occurred while attempting to read or write from a database. 27 | #[error("{0}")] 28 | DatabaseConnectionError(Box), 29 | /// A deserialization error occurred due to invalid JSON. 30 | #[error("{0}")] 31 | DeserializationError(Box), 32 | /// A technical error was encountered that prevented the command from being applied to the 33 | /// aggregate. In general the accompanying message should be logged for investigation rather 34 | /// than returned to the user. 35 | #[error("{0}")] 36 | UnexpectedError(Box), 37 | } 38 | -------------------------------------------------------------------------------- /src/event.rs: -------------------------------------------------------------------------------- 1 | use std::collections::HashMap; 2 | use std::fmt; 3 | 4 | use serde::de::DeserializeOwned; 5 | use serde::Serialize; 6 | 7 | use crate::aggregate::Aggregate; 8 | 9 | /// A `DomainEvent` represents any business change in the state of an `Aggregate`. 10 | /// 11 | /// `DomainEvent`s are immutable, and when 12 | /// [event sourcing](https://martinfowler.com/eaaDev/EventSourcing.html) 13 | /// is used they are the single source of truth. 14 | /// 15 | /// The name of a `DomainEvent` should always be in the past tense, e.g., 16 | /// - AdminPrivilegesGranted 17 | /// - EmailAddressChanged 18 | /// - DependencyAdded 19 | /// 20 | /// To simplify serialization, an event should be an enum, and each variant should carry any 21 | /// important information. 22 | /// 23 | /// Though the `DomainEvent` trait only has a single function, the events must also derive a 24 | /// number of standard traits. 25 | /// - `Clone` - events may be cloned throughout the framework, particularly when applied to queries 26 | /// - `Serialize` and `Deserialize` - required for persistence 27 | /// - `PartialEq` and `Debug` - needed for effective testing 28 | /// 29 | /// # Examples 30 | /// ``` 31 | /// # use cqrs_es::doc::Customer; 32 | /// # use cqrs_es::{Aggregate,DomainEvent}; 33 | /// # use serde::{Serialize,Deserialize}; 34 | /// #[derive(Clone,Debug,Serialize,Deserialize,PartialEq)] 35 | /// pub enum CustomerEvent { 36 | /// NameChanged{ changed_name: String }, 37 | /// EmailUpdated{ new_email: String }, 38 | /// } 39 | /// ``` 40 | pub trait DomainEvent: 41 | Serialize + DeserializeOwned + Clone + PartialEq + fmt::Debug + Sync + Send 42 | { 43 | /// A name specifying the event, used for event upcasting. 44 | fn event_type(&self) -> String; 45 | /// A version of the `event_type`, used for event upcasting. 46 | fn event_version(&self) -> String; 47 | } 48 | 49 | /// `EventEnvelope` is a data structure that encapsulates an event with its pertinent 50 | /// information. 51 | /// All of the associated data will be transported and persisted together and will be available 52 | /// for queries. 53 | /// 54 | /// Within any system an event must be unique based on the compound key composed of its: 55 | /// - [`aggregate_type`](https://docs.rs/cqrs-es/latest/cqrs_es/trait.Aggregate.html#tymethod.aggregate_type) 56 | /// - `aggregate_id` 57 | /// - `sequence` 58 | /// 59 | /// Thus an `EventEnvelope` provides a uniqueness value along with an event `payload` and 60 | /// `metadata`. 61 | #[derive(Debug)] 62 | pub struct EventEnvelope 63 | where 64 | A: Aggregate, 65 | { 66 | /// The id of the aggregate instance. 67 | pub aggregate_id: String, 68 | /// The sequence number for an aggregate instance. 69 | pub sequence: usize, 70 | /// The event payload with all business information. 71 | pub payload: A::Event, 72 | /// Additional metadata for use in auditing, logging or debugging purposes. 73 | pub metadata: HashMap, 74 | } 75 | 76 | impl Clone for EventEnvelope { 77 | fn clone(&self) -> Self { 78 | Self { 79 | aggregate_id: self.aggregate_id.clone(), 80 | sequence: self.sequence, 81 | payload: self.payload.clone(), 82 | metadata: self.metadata.clone(), 83 | } 84 | } 85 | } 86 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | #![forbid(unsafe_code)] 2 | #![deny(missing_docs)] 3 | #![deny(clippy::all)] 4 | #![warn(rust_2018_idioms)] 5 | #![doc = include_str!("../README.md")] 6 | //! 7 | pub use crate::aggregate::*; 8 | pub use crate::cqrs::*; 9 | pub use crate::error::*; 10 | pub use crate::event::*; 11 | pub use crate::query::*; 12 | pub use crate::store::*; 13 | 14 | mod aggregate; 15 | mod cqrs; 16 | mod error; 17 | mod event; 18 | mod query; 19 | mod store; 20 | 21 | #[doc(hidden)] 22 | pub mod doc; 23 | 24 | /// An in-memory event store suitable for local testing. 25 | /// 26 | /// A backing store is necessary for any application to store and retrieve the generated events. 27 | /// This in-memory store is useful for application development and integration tests that do not 28 | /// require persistence after running. 29 | /// 30 | /// ``` 31 | /// # use cqrs_es::doc::{MyAggregate, MyService}; 32 | /// use cqrs_es::CqrsFramework; 33 | /// use cqrs_es::mem_store::MemStore; 34 | /// 35 | /// let store = MemStore::::default(); 36 | /// let service = MyService::default(); 37 | /// let cqrs = CqrsFramework::new(store, vec![], service); 38 | /// ``` 39 | pub mod mem_store; 40 | 41 | pub mod persist; 42 | 43 | pub mod test; 44 | -------------------------------------------------------------------------------- /src/persist.rs: -------------------------------------------------------------------------------- 1 | //! Common persistence mechanisms. 2 | //! 3 | //! This module is used alongside one of the available repository crates: 4 | //! - [postgres-es](https://crates.io/crates/postgres-es) 5 | //! - [mysql-es](https://crates.io/crates/mysql-es) 6 | //! - [dynamo-es](https://crates.io/crates/dynamo-es) 7 | //! 8 | //! 9 | //! 10 | pub use context::EventStoreAggregateContext; 11 | pub use error::PersistenceError; 12 | pub use event_repository::PersistedEventRepository; 13 | pub use event_store::PersistedEventStore; 14 | pub use event_stream::{ReplayFeed, ReplayStream}; 15 | pub use generic_query::{GenericQuery, QueryErrorHandler}; 16 | pub use replay::QueryReplay; 17 | pub use serialized_event::{SerializedEvent, SerializedSnapshot}; 18 | pub use upcaster::{ 19 | EventUpcaster, SemanticVersion, SemanticVersionError, SemanticVersionEventUpcaster, 20 | SemanticVersionEventUpcasterFunc, 21 | }; 22 | pub use view_repository::{ViewContext, ViewRepository}; 23 | 24 | mod context; 25 | mod error; 26 | mod event_repository; 27 | mod event_store; 28 | mod event_stream; 29 | mod generic_query; 30 | mod replay; 31 | mod serialized_event; 32 | mod upcaster; 33 | mod view_repository; 34 | 35 | // Documentation items 36 | #[doc(hidden)] 37 | pub mod doc; 38 | -------------------------------------------------------------------------------- /src/persist/context.rs: -------------------------------------------------------------------------------- 1 | use crate::{Aggregate, AggregateContext}; 2 | 3 | /// Holds context for the pure event store implementation PostgresStore. 4 | /// This is only used internally within the `EventStore`. 5 | pub struct EventStoreAggregateContext { 6 | /// The aggregate ID of the aggregate instance that has been loaded. 7 | pub aggregate_id: String, 8 | /// The current state of the aggregate instance. 9 | pub aggregate: A, 10 | /// The last committed event sequence number for this aggregate instance. 11 | pub current_sequence: usize, 12 | /// The last committed snapshot version for this aggregate instance. 13 | pub current_snapshot: Option, 14 | } 15 | 16 | impl EventStoreAggregateContext { 17 | pub(crate) fn context_for(aggregate_id: &str, _is_event_source: bool) -> Self { 18 | Self { 19 | aggregate_id: aggregate_id.to_string(), 20 | aggregate: A::default(), 21 | current_sequence: 0, 22 | current_snapshot: None, 23 | } 24 | } 25 | } 26 | 27 | impl AggregateContext for EventStoreAggregateContext { 28 | fn aggregate(&self) -> &A { 29 | &self.aggregate 30 | } 31 | } 32 | -------------------------------------------------------------------------------- /src/persist/doc.rs: -------------------------------------------------------------------------------- 1 | use crate::doc::MyAggregate; 2 | use crate::persist::event_stream::ReplayStream; 3 | use crate::persist::{ 4 | PersistedEventRepository, PersistenceError, SerializedEvent, SerializedSnapshot, ViewContext, 5 | ViewRepository, 6 | }; 7 | use crate::{Aggregate, EventEnvelope, View}; 8 | use serde::{Deserialize, Serialize}; 9 | use serde_json::Value; 10 | 11 | #[derive(Debug, Default, Serialize, Deserialize)] 12 | pub struct MyView; 13 | 14 | impl View for MyView { 15 | fn update(&mut self, _event: &EventEnvelope) { 16 | todo!() 17 | } 18 | } 19 | 20 | pub struct MyDatabaseConnection; 21 | pub struct MyViewRepository; 22 | 23 | impl MyViewRepository { 24 | pub fn new(_db: MyDatabaseConnection) -> Self { 25 | Self 26 | } 27 | } 28 | 29 | impl ViewRepository for MyViewRepository { 30 | async fn load(&self, _view_id: &str) -> Result, PersistenceError> { 31 | todo!() 32 | } 33 | 34 | async fn load_with_context( 35 | &self, 36 | _view_id: &str, 37 | ) -> Result, PersistenceError> { 38 | todo!() 39 | } 40 | 41 | async fn update_view( 42 | &self, 43 | _view: MyView, 44 | _context: ViewContext, 45 | ) -> Result<(), PersistenceError> { 46 | todo!() 47 | } 48 | } 49 | 50 | pub struct MyEventIterator; 51 | impl Iterator for MyEventIterator { 52 | type Item = Result; 53 | 54 | fn next(&mut self) -> Option { 55 | todo!() 56 | } 57 | } 58 | 59 | pub struct MyEventRepository; 60 | 61 | impl MyEventRepository { 62 | pub fn new(_db: MyDatabaseConnection) -> Self { 63 | Self 64 | } 65 | } 66 | 67 | impl PersistedEventRepository for MyEventRepository { 68 | async fn get_events( 69 | &self, 70 | _aggregate_id: &str, 71 | ) -> Result, PersistenceError> { 72 | todo!() 73 | } 74 | 75 | async fn get_last_events( 76 | &self, 77 | _aggregate_id: &str, 78 | _number_events: usize, 79 | ) -> Result, PersistenceError> { 80 | todo!() 81 | } 82 | 83 | async fn get_snapshot( 84 | &self, 85 | _aggregate_id: &str, 86 | ) -> Result, PersistenceError> { 87 | todo!() 88 | } 89 | 90 | async fn persist( 91 | &self, 92 | _events: &[SerializedEvent], 93 | _snapshot_update: Option<(String, Value, usize)>, 94 | ) -> Result<(), PersistenceError> { 95 | todo!() 96 | } 97 | 98 | async fn stream_events( 99 | &self, 100 | _aggregate_id: &str, 101 | ) -> Result { 102 | todo!() 103 | } 104 | 105 | async fn stream_all_events(&self) -> Result { 106 | todo!() 107 | } 108 | } 109 | -------------------------------------------------------------------------------- /src/persist/error.rs: -------------------------------------------------------------------------------- 1 | use crate::persist::SerializedEvent; 2 | use crate::AggregateError; 3 | use std::error; 4 | 5 | /// Errors for implementations of a persistent event store. 6 | #[derive(Debug, thiserror::Error)] 7 | pub enum PersistenceError { 8 | /// Optimistic locking conflict occurred while committing and aggregate. 9 | #[error("optimistic lock error")] 10 | OptimisticLockError, 11 | /// An error occurred connecting to the database. 12 | #[error("{0}")] 13 | ConnectionError(Box), 14 | /// Error occurred while attempting to deserialize data. 15 | #[error("{0}")] 16 | DeserializationError(Box), 17 | /// An unexpected error occurred while accessing the database. 18 | #[error("{0}")] 19 | UnknownError(Box), 20 | } 21 | 22 | impl From for AggregateError { 23 | fn from(err: PersistenceError) -> Self { 24 | match err { 25 | PersistenceError::OptimisticLockError => Self::AggregateConflict, 26 | PersistenceError::ConnectionError(error) => Self::DatabaseConnectionError(error), 27 | PersistenceError::DeserializationError(error) => Self::DeserializationError(error), 28 | PersistenceError::UnknownError(error) => Self::UnexpectedError(error), 29 | } 30 | } 31 | } 32 | 33 | impl From for PersistenceError { 34 | fn from(err: serde_json::Error) -> Self { 35 | match err.classify() { 36 | serde_json::error::Category::Data | serde_json::error::Category::Syntax => { 37 | Self::DeserializationError(Box::new(err)) 38 | } 39 | serde_json::error::Category::Io | serde_json::error::Category::Eof => { 40 | Self::UnknownError(Box::new(err)) 41 | } 42 | } 43 | } 44 | } 45 | 46 | impl From>> for PersistenceError { 47 | fn from(err: tokio::sync::mpsc::error::SendError>) -> Self { 48 | Self::UnknownError(Box::new(err)) 49 | } 50 | } 51 | 52 | impl From for AggregateError { 53 | fn from(err: serde_json::error::Error) -> Self { 54 | match err.classify() { 55 | serde_json::error::Category::Data | serde_json::error::Category::Syntax => { 56 | Self::DeserializationError(Box::new(err)) 57 | } 58 | serde_json::error::Category::Io | serde_json::error::Category::Eof => { 59 | Self::UnexpectedError(Box::new(err)) 60 | } 61 | } 62 | } 63 | } 64 | -------------------------------------------------------------------------------- /src/persist/event_repository.rs: -------------------------------------------------------------------------------- 1 | use std::future::Future; 2 | 3 | use crate::persist::event_stream::ReplayStream; 4 | use crate::persist::{PersistenceError, SerializedEvent, SerializedSnapshot}; 5 | use crate::Aggregate; 6 | use serde_json::Value; 7 | 8 | /// Handles the database access needed for operation of a PersistedSnapshotStore. 9 | pub trait PersistedEventRepository: Send + Sync { 10 | /// Returns all events for a single aggregate instance. 11 | fn get_events( 12 | &self, 13 | aggregate_id: &str, 14 | ) -> impl Future, PersistenceError>> + Send; 15 | 16 | /// Returns the last events for a single aggregate instance. 17 | fn get_last_events( 18 | &self, 19 | aggregate_id: &str, 20 | last_sequence: usize, 21 | ) -> impl Future, PersistenceError>> + Send; 22 | 23 | /// Returns the current snapshot for an aggregate instance. 24 | fn get_snapshot( 25 | &self, 26 | aggregate_id: &str, 27 | ) -> impl Future, PersistenceError>> + Send; 28 | 29 | /// Commits the updated aggregate and accompanying events. 30 | fn persist( 31 | &self, 32 | events: &[SerializedEvent], 33 | snapshot_update: Option<(String, Value, usize)>, 34 | ) -> impl Future> + Send; 35 | 36 | /// Streams all events for an aggregate instance. 37 | fn stream_events( 38 | &self, 39 | aggregate_id: &str, 40 | ) -> impl Future> + Send; 41 | 42 | /// Streams all events for an aggregate type. 43 | fn stream_all_events( 44 | &self, 45 | ) -> impl Future> + Send; 46 | } 47 | -------------------------------------------------------------------------------- /src/persist/event_stream.rs: -------------------------------------------------------------------------------- 1 | use crate::persist::{EventUpcaster, PersistenceError, SerializedEvent}; 2 | use crate::{Aggregate, EventEnvelope}; 3 | use tokio::sync::mpsc::{Receiver, Sender}; 4 | 5 | /// Accesses a domain event stream for a particular aggregate. 6 | /// 7 | /// _Note: design expected to change after [implementation of RFC 2996](https://github.com/rust-lang/rust/issues/79024)._ 8 | pub struct ReplayStream { 9 | queue: Receiver>, 10 | } 11 | 12 | impl ReplayStream { 13 | /// Creates a new `ReplayStream` that will buffer events up to the `queue_size`. 14 | pub fn new(queue_size: usize) -> (ReplayFeed, Self) { 15 | let (sender, queue) = tokio::sync::mpsc::channel(queue_size); 16 | (ReplayFeed { sender }, Self { queue }) 17 | } 18 | 19 | /// Receive the next upcasted event or error in the stream, if no event is available this will block. 20 | pub async fn next( 21 | &mut self, 22 | upcasters: &[Box], 23 | ) -> Option, PersistenceError>> { 24 | self.queue 25 | .recv() 26 | .await 27 | .map(|result| result.and_then(|event| event.upcast(upcasters).try_into())) 28 | } 29 | } 30 | 31 | /// Used to send events to a `ReplayStream` for replaying events. 32 | pub struct ReplayFeed { 33 | sender: Sender>, 34 | } 35 | 36 | impl ReplayFeed { 37 | /// Push the next event onto the stream. 38 | pub async fn push( 39 | &mut self, 40 | result: Result, 41 | ) -> Result<(), PersistenceError> { 42 | self.sender.send(result).await?; 43 | Ok(()) 44 | } 45 | } 46 | #[cfg(test)] 47 | mod test { 48 | use crate::doc::MyAggregate; 49 | use crate::persist::{PersistenceError, ReplayStream}; 50 | 51 | #[tokio::test] 52 | async fn test_replay_stream() { 53 | let (mut feed, mut stream) = ReplayStream::new(5); 54 | feed.push(Err(PersistenceError::OptimisticLockError)) 55 | .await 56 | .unwrap(); 57 | drop(feed); 58 | let found = stream.next::(&[]).await; 59 | assert!( 60 | matches!( 61 | found.unwrap().unwrap_err(), 62 | PersistenceError::OptimisticLockError 63 | ), 64 | "expected optimistic lock error" 65 | ); 66 | } 67 | } 68 | -------------------------------------------------------------------------------- /src/persist/view_repository.rs: -------------------------------------------------------------------------------- 1 | use std::future::Future; 2 | 3 | use crate::persist::PersistenceError; 4 | use crate::{Aggregate, View}; 5 | 6 | /// Handles the database access needed for a GenericQuery. 7 | pub trait ViewRepository: Send + Sync 8 | where 9 | V: View, 10 | A: Aggregate, 11 | { 12 | /// Returns the current view instance. 13 | fn load( 14 | &self, 15 | view_id: &str, 16 | ) -> impl Future, PersistenceError>> + Send; 17 | 18 | /// Returns the current view instance and context, used by the `GenericQuery` to update 19 | /// views with committed events. 20 | fn load_with_context( 21 | &self, 22 | view_id: &str, 23 | ) -> impl Future, PersistenceError>> + Send; 24 | 25 | /// Updates the view instance and context, used by the `GenericQuery` to update 26 | /// views with committed events. 27 | fn update_view( 28 | &self, 29 | view: V, 30 | context: ViewContext, 31 | ) -> impl Future> + Send; 32 | } 33 | 34 | /// A data structure maintaining context when updating views. 35 | pub struct ViewContext { 36 | /// Unique identifier of the view instance that is being modified. 37 | pub view_instance_id: String, 38 | /// The current version of the view instance, used for optimistic locking. 39 | pub version: i64, 40 | } 41 | 42 | impl ViewContext { 43 | /// Convenience function to create a new [`QueryContext`]. 44 | pub fn new(view_instance_id: String, version: i64) -> Self { 45 | Self { 46 | view_instance_id, 47 | version, 48 | } 49 | } 50 | } 51 | -------------------------------------------------------------------------------- /src/query.rs: -------------------------------------------------------------------------------- 1 | use async_trait::async_trait; 2 | use std::fmt::Debug; 3 | 4 | use serde::de::DeserializeOwned; 5 | use serde::Serialize; 6 | 7 | use crate::aggregate::Aggregate; 8 | use crate::event::EventEnvelope; 9 | 10 | /// Each CQRS platform should have one or more queries where it will distribute committed 11 | /// events. 12 | /// 13 | /// Some example of tasks that queries commonly provide: 14 | /// - update materialized views 15 | /// - publish events to messaging service 16 | /// - trigger a command on another aggregate 17 | #[async_trait] 18 | pub trait Query: Send + Sync { 19 | /// Events will be dispatched here immediately after being committed. 20 | async fn dispatch(&self, aggregate_id: &str, events: &[EventEnvelope]); 21 | } 22 | 23 | /// A `View` represents a materialized view, generally serialized for persistence, that is updated by a query. 24 | /// This a read element in a CQRS system. 25 | /// 26 | pub trait View: Debug + Default + Serialize + DeserializeOwned + Send + Sync { 27 | /// Each implemented view is responsible for updating its state based on events passed via 28 | /// this method. 29 | fn update(&mut self, event: &EventEnvelope); 30 | } 31 | -------------------------------------------------------------------------------- /src/store.rs: -------------------------------------------------------------------------------- 1 | use std::collections::HashMap; 2 | use std::future::Future; 3 | 4 | use crate::aggregate::Aggregate; 5 | use crate::event::EventEnvelope; 6 | use crate::AggregateError; 7 | 8 | /// The abstract central source for loading past events and committing new events. 9 | pub trait EventStore: Send + Sync 10 | where 11 | A: Aggregate, 12 | { 13 | /// Provides the current state of an aggregate along with surrounding context. 14 | /// This is used by the [CqrsFramework](struct.CqrsFramework.html) when loading 15 | /// an aggregate in order to handle incoming commands. 16 | type AC: AggregateContext; 17 | 18 | /// Load all events for a particular `aggregate_id` 19 | fn load_events( 20 | &self, 21 | aggregate_id: &str, 22 | ) -> impl Future>, AggregateError>> + Send; 23 | /// Load aggregate at current state 24 | fn load_aggregate( 25 | &self, 26 | aggregate_id: &str, 27 | ) -> impl Future>> + Send; 28 | /// Commit new events 29 | fn commit( 30 | &self, 31 | events: Vec, 32 | context: Self::AC, 33 | metadata: HashMap, 34 | ) -> impl Future>, AggregateError>> + Send; 35 | } 36 | 37 | /// Returns the aggregate as well as the context around it. 38 | /// 39 | /// This is used internally within an `EventStore` to persist an aggregate instance and events 40 | /// with the correct context after it has been loaded and modified. 41 | pub trait AggregateContext 42 | where 43 | A: Aggregate, 44 | { 45 | /// The aggregate instance with all state loaded. 46 | fn aggregate(&self) -> &A; 47 | } 48 | -------------------------------------------------------------------------------- /src/test.rs: -------------------------------------------------------------------------------- 1 | //! This module provides a test framework for building a resilient test base around aggregates. 2 | //! 3 | //! A `TestFramework` should be used to build a comprehensive set of aggregate tests to verify 4 | //! your application logic. 5 | //! 6 | //! ```rust 7 | //! # use cqrs_es::test::TestFramework; 8 | //! # use cqrs_es::doc::{Customer, CustomerEvent, CustomerCommand, CustomerService}; 9 | //! # fn test() { 10 | //! type CustomerTestFramework = TestFramework; 11 | //! 12 | //! CustomerTestFramework::with(CustomerService::default()) 13 | //! .given_no_previous_events() 14 | //! .when(CustomerCommand::AddCustomerName{ 15 | //! name: "John Doe".to_string() 16 | //! }) 17 | //! .then_expect_events(vec![ 18 | //! CustomerEvent::NameAdded{ 19 | //! name: "John Doe".to_string() 20 | //! }]); 21 | //! # } 22 | //! ``` 23 | mod executor; 24 | mod framework; 25 | mod validator; 26 | 27 | pub use crate::test::executor::*; 28 | pub use crate::test::framework::*; 29 | pub use crate::test::validator::*; 30 | -------------------------------------------------------------------------------- /src/test/executor.rs: -------------------------------------------------------------------------------- 1 | use crate::aggregate::Aggregate; 2 | use crate::test::AggregateResultValidator; 3 | 4 | /// Holds the initial event state of an aggregate and accepts a command. 5 | pub struct AggregateTestExecutor 6 | where 7 | A: Aggregate, 8 | { 9 | events: Vec, 10 | service: A::Services, 11 | } 12 | 13 | impl AggregateTestExecutor 14 | where 15 | A: Aggregate, 16 | { 17 | /// Consumes a command and provides a validator object to test against. 18 | /// 19 | /// ``` 20 | /// # use cqrs_es::doc::{MyAggregate, MyCommands, MyService}; 21 | /// use cqrs_es::test::TestFramework; 22 | /// 23 | /// let executor = TestFramework::::with(MyService) 24 | /// .given_no_previous_events(); 25 | /// 26 | /// let validator = executor.when(MyCommands::DoSomething); 27 | /// ``` 28 | /// 29 | /// For `async` tests use `when_async` instead. 30 | pub fn when(self, command: A::Command) -> AggregateResultValidator { 31 | let result = when::(self.events, command, self.service); 32 | AggregateResultValidator::new(result) 33 | } 34 | 35 | /// Consumes a command in an `async` test and provides a validator object 36 | /// to test against. 37 | /// 38 | /// ``` 39 | /// # use cqrs_es::doc::{MyAggregate, MyCommands, MyService}; 40 | /// use cqrs_es::test::TestFramework; 41 | /// 42 | /// #[tokio::test] 43 | /// async fn test() { 44 | /// let executor = TestFramework::::with(MyService) 45 | /// .given_no_previous_events(); 46 | /// 47 | /// let validator = executor.when_async(MyCommands::DoSomething).await; 48 | /// } 49 | /// ``` 50 | pub async fn when_async(self, command: A::Command) -> AggregateResultValidator { 51 | let mut aggregate = A::default(); 52 | for event in self.events { 53 | aggregate.apply(event); 54 | } 55 | let result = aggregate.handle(command, &self.service).await; 56 | AggregateResultValidator::new(result) 57 | } 58 | 59 | /// Adds additional events to an aggregate test. 60 | /// 61 | /// ``` 62 | /// # use cqrs_es::doc::{MyAggregate, MyEvents, MyService}; 63 | /// use cqrs_es::test::TestFramework; 64 | /// 65 | /// let executor = TestFramework::::with(MyService) 66 | /// .given(vec![MyEvents::SomethingWasDone]) 67 | /// .and(vec![MyEvents::SomethingElseWasDone]); 68 | /// ``` 69 | #[must_use] 70 | pub fn and(self, new_events: Vec) -> Self { 71 | let mut events = self.events; 72 | events.extend(new_events); 73 | let service = self.service; 74 | Self { events, service } 75 | } 76 | 77 | pub(crate) fn new(events: Vec, service: A::Services) -> Self { 78 | Self { events, service } 79 | } 80 | } 81 | 82 | #[tokio::main(flavor = "current_thread")] 83 | async fn when( 84 | events: Vec, 85 | command: A::Command, 86 | service: A::Services, 87 | ) -> Result, A::Error> { 88 | let mut aggregate = A::default(); 89 | for event in events { 90 | aggregate.apply(event); 91 | } 92 | aggregate.handle(command, &service).await 93 | } 94 | -------------------------------------------------------------------------------- /src/test/framework.rs: -------------------------------------------------------------------------------- 1 | use crate::aggregate::Aggregate; 2 | use crate::test::AggregateTestExecutor; 3 | 4 | /// A framework for rigorously testing the aggregate logic, one of the *most important* 5 | /// parts of any DDD system. 6 | pub struct TestFramework { 7 | service: A::Services, 8 | } 9 | 10 | impl TestFramework { 11 | /// Create a test framework using the provided service. 12 | pub fn with(service: A::Services) -> Self { 13 | Self { service } 14 | } 15 | } 16 | 17 | impl TestFramework 18 | where 19 | A: Aggregate, 20 | { 21 | /// Initiates an aggregate test with no previous events. 22 | /// 23 | /// ``` 24 | /// # use cqrs_es::doc::{MyAggregate, MyService}; 25 | /// use cqrs_es::test::TestFramework; 26 | /// 27 | /// let executor = TestFramework::::with(MyService) 28 | /// .given_no_previous_events(); 29 | /// ``` 30 | #[must_use] 31 | pub fn given_no_previous_events(self) -> AggregateTestExecutor { 32 | AggregateTestExecutor::new(Vec::new(), self.service) 33 | } 34 | /// Initiates an aggregate test with a collection of previous events. 35 | /// 36 | /// ``` 37 | /// # use cqrs_es::doc::{MyAggregate, MyEvents, MyService}; 38 | /// use cqrs_es::test::TestFramework; 39 | /// 40 | /// let executor = TestFramework::::with(MyService) 41 | /// .given(vec![MyEvents::SomethingWasDone]); 42 | /// ``` 43 | #[must_use] 44 | pub fn given(self, events: Vec) -> AggregateTestExecutor { 45 | AggregateTestExecutor::new(events, self.service) 46 | } 47 | } 48 | -------------------------------------------------------------------------------- /src/test/validator.rs: -------------------------------------------------------------------------------- 1 | use crate::aggregate::Aggregate; 2 | 3 | /// Validation object for the `TestFramework` package. 4 | pub struct AggregateResultValidator 5 | where 6 | A: Aggregate, 7 | { 8 | result: Result, A::Error>, 9 | } 10 | 11 | impl AggregateResultValidator { 12 | /// Verifies that the expected events have been produced by the command. 13 | /// 14 | /// ``` 15 | /// # use cqrs_es::doc::{MyAggregate, MyCommands, MyEvents, MyService}; 16 | /// # async fn test() { 17 | /// use cqrs_es::test::TestFramework; 18 | /// 19 | /// let validator = TestFramework::::with(MyService) 20 | /// .given_no_previous_events() 21 | /// .when(MyCommands::DoSomething); 22 | /// 23 | /// validator.then_expect_events(vec![MyEvents::SomethingWasDone]); 24 | /// # } 25 | /// ``` 26 | pub fn then_expect_events(self, expected_events: Vec) { 27 | let events = self.result.unwrap_or_else(|err| { 28 | panic!("expected success, received aggregate error: '{err}'"); 29 | }); 30 | assert_eq!(events, expected_events); 31 | } 32 | 33 | /// Verifies that the result is a `UserError` and returns the internal error payload for 34 | /// further validation. 35 | /// 36 | /// ``` 37 | /// # use cqrs_es::doc::{MyAggregate, MyCommands, MyEvents, MyService}; 38 | /// use cqrs_es::test::TestFramework; 39 | /// 40 | /// let validator = TestFramework::::with(MyService) 41 | /// .given_no_previous_events() 42 | /// .when(MyCommands::BadCommand); 43 | /// 44 | /// validator.then_expect_error_message("the expected error message"); 45 | /// ``` 46 | pub fn then_expect_error_message(self, error_message: &str) { 47 | match self.result { 48 | Ok(events) => { 49 | panic!("expected error, received events: '{events:?}'"); 50 | } 51 | Err(err) => assert_eq!(err.to_string(), error_message.to_string()), 52 | } 53 | } 54 | 55 | /// Returns the internal error payload for validation by the user. 56 | /// 57 | /// ``` 58 | /// # use cqrs_es::doc::{MyAggregate, MyCommands, MyEvents, MyService, MyUserError}; 59 | /// use cqrs_es::test::TestFramework; 60 | /// 61 | /// let validator = TestFramework::::with(MyService) 62 | /// .given_no_previous_events() 63 | /// .when(MyCommands::BadCommand); 64 | /// 65 | /// let expected = MyUserError("the expected error message".to_string()); 66 | /// assert_eq!(expected,validator.inspect_result().unwrap_err()); 67 | /// ``` 68 | pub fn inspect_result(self) -> Result, A::Error> { 69 | self.result 70 | } 71 | 72 | pub(crate) fn new(result: Result, A::Error>) -> Self { 73 | Self { result } 74 | } 75 | } 76 | impl AggregateResultValidator 77 | where 78 | A: Aggregate, 79 | A::Error: PartialEq, 80 | { 81 | /// Verifies that the result is the expected error. 82 | /// 83 | /// > Note that the configured Error *must* implement `std::cmp::PartialEq`. 84 | /// 85 | /// ``` 86 | /// # use cqrs_es::doc::{MyAggregate, MyCommands, MyEvents, MyService, MyUserError}; 87 | /// use cqrs_es::test::TestFramework; 88 | /// 89 | /// let validator = TestFramework::::with(MyService) 90 | /// .given_no_previous_events() 91 | /// .when(MyCommands::BadCommand); 92 | /// 93 | /// let expected = MyUserError("the expected error message".to_string()); 94 | /// validator.then_expect_error(expected); 95 | /// ``` 96 | pub fn then_expect_error(self, expected_error: A::Error) { 97 | match self.result { 98 | Ok(events) => { 99 | panic!("expected error, received events: '{events:?}'"); 100 | } 101 | Err(err) => { 102 | assert_eq!(err, expected_error); 103 | } 104 | } 105 | } 106 | } 107 | --------------------------------------------------------------------------------