├── .githooks └── pre-commit ├── .gitignore ├── CHANGELOG.md ├── Cargo.toml ├── Dockerfile ├── LICENSE ├── README.md ├── bin └── install-githooks ├── buildspec_test.yml ├── db └── init.sql ├── docker-compose.yml ├── src ├── cqrs.rs ├── error.rs ├── event_repository.rs ├── lib.rs ├── sql_query.rs ├── testing.rs ├── types.rs └── view_repository.rs └── tests └── lib.rs /.githooks/pre-commit: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Ensures there are no formatting errors. 3 | # 4 | TMP_DIR="_GIT_COMMIT_CHECK_DIR" 5 | CURRENT_DIR=`pwd` 6 | trap _reset_dirs EXIT 7 | function _reset_dirs () { 8 | cd $CURRENT_DIR 9 | rm -rf $TMP_DIR 10 | } 11 | mkdir $TMP_DIR 12 | git checkout-index --prefix=$TMP_DIR/ -af 13 | cd $TMP_DIR 14 | cargo fmt --check 15 | if [ $? -ne 0 ]; then 16 | echo 17 | echo -e "\033[0;31mInvalid formatting, please run 'cargo fmt' \033[0m" 18 | exit 1 19 | fi 20 | 21 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | Cargo.lock 3 | .idea 4 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | The changelog for all crates in the cqrs-es project are located 2 | [here](https://github.com/serverlesstechnology/cqrs/blob/main/docs/versions/change_log.md). 3 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "postgres-es" 3 | version = "0.4.12" 4 | authors = ["Dave Garred "] 5 | edition = "2021" 6 | license = "Apache-2.0" 7 | keywords = ["cqrs", "event-sourcing", "serverless"] 8 | description = "A Postgres implementation of an event repository for cqrs-es." 9 | repository = "https://github.com/serverlesstechnology/postgres-es" 10 | documentation = "https://docs.rs/postgres-es" 11 | readme = "README.md" 12 | 13 | [dependencies] 14 | cqrs-es = "0.4.12" 15 | 16 | async-trait = "0.1" 17 | futures = "0.3" 18 | serde = { version = "1.0", features = ["derive"] } 19 | serde_json = "1.0" 20 | sqlx = { version = "0.8", features = ["postgres", "json"] } 21 | tokio = { version = "1", features = ["rt"] } 22 | 23 | [dev-dependencies] 24 | uuid = { version = "1.10", features = ["v4"] } 25 | 26 | [features] 27 | default = ["runtime-tokio-rustls"] 28 | runtime-async-std-native-tls = ["sqlx/runtime-async-std-native-tls"] 29 | runtime-tokio-native-tls = ["sqlx/runtime-tokio-native-tls"] 30 | runtime-async-std-rustls = ["sqlx/runtime-async-std-rustls"] 31 | runtime-tokio-rustls = ["sqlx/runtime-tokio-rustls"] 32 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM rust:latest as builder 2 | 3 | WORKDIR /home/build 4 | RUN git clone https://github.com/serverlesstechnology/postgres-es.git 5 | WORKDIR /home/build/postgres-es 6 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright 2023 Serverless Technology Consulting, LLC 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # postgres-es 2 | 3 | > A Postgres implementation of the `PersistedEventRepository` trait in cqrs-es. 4 | 5 | --- 6 | 7 | ## Usage 8 | Add to your Cargo.toml file: 9 | 10 | ```toml 11 | [dependencies] 12 | cqrs-es = "0.4.11" 13 | postgres-es = "0.4.11" 14 | ``` 15 | 16 | Requires access to a Postgres DB with existing tables. See: 17 | - [Sample database configuration](db/init.sql) 18 | - Use `docker-compose` to quickly setup [a local database](docker-compose.yml) 19 | 20 | A simple configuration example: 21 | ``` 22 | let store = default_postgress_pool("postgresql://my_user:my_pass@localhost:5432/my_db"); 23 | let cqrs = postgres_es::postgres_cqrs(pool, vec![]) 24 | ``` 25 | 26 | Things that could be helpful: 27 | - [User guide](https://doc.rust-cqrs.org) along with an introduction to CQRS and event sourcing. 28 | - [Demo application](https://github.com/serverlesstechnology/cqrs-demo) using the warp http server. 29 | - [Change log](https://github.com/serverlesstechnology/cqrs/blob/main/docs/versions/change_log.md) 30 | 31 | ## Runtime and TLS configuration 32 | This package defaults to expect the [Tokio runtime](https://crates.io/crates/tokio) and the 33 | [Rustls library](https://crates.io/crates/rustls) for TLS. 34 | If a different combination is desired the appropriate feature flag should be used: 35 | - `runtime-tokio-native-tls` 36 | - `runtime-tokio-rustls` (default) 37 | - `runtime-async-std-native-tls` 38 | - `runtime-async-std-rustls` 39 | - `runtime-actix-native-tls` 40 | - `runtime-actix-rustls` 41 | 42 | [![Crates.io](https://img.shields.io/crates/v/postgres-es)](https://crates.io/crates/postgres-es) 43 | [![docs](https://img.shields.io/badge/API-docs-blue.svg)](https://docs.rs/postgres-es) 44 | ![docs](https://codebuild.us-west-2.amazonaws.com/badges?uuid=eyJlbmNyeXB0ZWREYXRhIjoiVVUyR0tRbTZmejFBYURoTHdpR3FnSUFqKzFVZE9JNW5haDZhcUFlY2xtREhtaVVJMWsxcWZOeC8zSUR0UWhpaWZMa0ZQSHlEYjg0N2FoU2lwV1FsTXFRPSIsIml2UGFyYW1ldGVyU3BlYyI6IldjUVMzVEpKN1V3aWxXWGUiLCJtYXRlcmlhbFNldFNlcmlhbCI6MX0%3D&branch=main) 45 | -------------------------------------------------------------------------------- /bin/install-githooks: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Configures the .githooks directory locally 3 | # 4 | git config --local core.hooksPath .githooks/ 5 | -------------------------------------------------------------------------------- /buildspec_test.yml: -------------------------------------------------------------------------------- 1 | version: 0.2 2 | 3 | phases: 4 | install: 5 | commands: 6 | - echo "${DOCKERHUB_PASSWORD}" | docker login -u "${DOCKERHUB_USERNAME}" --password-stdin 7 | - nohup /usr/local/bin/dockerd --host=unix:///var/run/docker.sock --host=tcp://127.0.0.1:2375 --storage-driver=overlay2& 8 | - timeout 15 sh -c "until docker info; do echo .; sleep 1; done" 9 | pre_build: 10 | commands: 11 | - docker build -t postgres-es . 12 | build: 13 | commands: 14 | - docker-compose up -d 15 | - docker ps 16 | - docker image inspect postgres-es 17 | - docker run --network="host" postgres-es cargo test 18 | -------------------------------------------------------------------------------- /db/init.sql: -------------------------------------------------------------------------------- 1 | -- a single table is used for all events in the cqrs system 2 | CREATE TABLE events 3 | ( 4 | aggregate_type text NOT NULL, 5 | aggregate_id text NOT NULL, 6 | sequence bigint CHECK (sequence >= 0) NOT NULL, 7 | event_type text NOT NULL, 8 | event_version text NOT NULL, 9 | payload json NOT NULL, 10 | metadata json NOT NULL, 11 | PRIMARY KEY (aggregate_type, aggregate_id, sequence) 12 | ); 13 | 14 | -- this table is only needed if snapshotting is employed 15 | CREATE TABLE snapshots 16 | ( 17 | aggregate_type text NOT NULL, 18 | aggregate_id text NOT NULL, 19 | last_sequence bigint CHECK (last_sequence >= 0) NOT NULL, 20 | current_snapshot bigint CHECK (current_snapshot >= 0) NOT NULL, 21 | payload json NOT NULL, 22 | PRIMARY KEY (aggregate_type, aggregate_id, last_sequence) 23 | ); 24 | 25 | -- one view table should be created for every `PostgresViewRepository` used 26 | -- replace name with the value used in `PostgresViewRepository::new(view_name: String)` 27 | CREATE TABLE test_view 28 | ( 29 | view_id text NOT NULL, 30 | version bigint CHECK (version >= 0) NOT NULL, 31 | payload json NOT NULL, 32 | PRIMARY KEY (view_id) 33 | ); 34 | 35 | INSERT INTO public.events (aggregate_type, aggregate_id, sequence, event_type, event_version, payload, metadata) 36 | VALUES ('Customer', 'previous_event_in_need_of_upcast', 1, 'NameAdded', '1.0', '{"NameAdded": {}}', '{}'); 37 | 38 | CREATE USER test_user WITH ENCRYPTED PASSWORD 'test_pass'; 39 | GRANT ALL PRIVILEGES ON DATABASE postgres TO test_user; -------------------------------------------------------------------------------- /docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3.1' 2 | 3 | services: 4 | db: 5 | image: postgres 6 | restart: always 7 | ports: 8 | - 5432:5432 9 | environment: 10 | POSTGRES_DB: test 11 | POSTGRES_USER: test_user 12 | POSTGRES_PASSWORD: test_pass 13 | volumes: 14 | - './db:/docker-entrypoint-initdb.d' -------------------------------------------------------------------------------- /src/cqrs.rs: -------------------------------------------------------------------------------- 1 | use cqrs_es::persist::PersistedEventStore; 2 | use cqrs_es::{Aggregate, CqrsFramework, Query}; 3 | 4 | use crate::{PostgresCqrs, PostgresEventRepository}; 5 | use sqlx::postgres::PgPoolOptions; 6 | use sqlx::{Pool, Postgres}; 7 | 8 | /// A convenience method for building a simple connection pool for PostgresDb. 9 | /// A connection pool is needed for both the event and view repositories. 10 | /// 11 | /// ``` 12 | /// use sqlx::{Pool, Postgres}; 13 | /// use postgres_es::default_postgress_pool; 14 | /// 15 | /// # async fn configure_pool() { 16 | /// let connection_string = "postgresql://test_user:test_pass@localhost:5432/test"; 17 | /// let pool: Pool = default_postgress_pool(connection_string).await; 18 | /// # } 19 | /// ``` 20 | pub async fn default_postgress_pool(connection_string: &str) -> Pool { 21 | PgPoolOptions::new() 22 | .max_connections(10) 23 | .connect(connection_string) 24 | .await 25 | .expect("unable to connect to database") 26 | } 27 | 28 | /// A convenience function for creating a CqrsFramework from a database connection pool 29 | /// and queries. 30 | pub fn postgres_cqrs( 31 | pool: Pool, 32 | query_processor: Vec>>, 33 | services: A::Services, 34 | ) -> PostgresCqrs 35 | where 36 | A: Aggregate, 37 | { 38 | let repo = PostgresEventRepository::new(pool); 39 | let store = PersistedEventStore::new_event_store(repo); 40 | CqrsFramework::new(store, query_processor, services) 41 | } 42 | 43 | /// A convenience function for creating a CqrsFramework using a snapshot store. 44 | pub fn postgres_snapshot_cqrs( 45 | pool: Pool, 46 | query_processor: Vec>>, 47 | snapshot_size: usize, 48 | services: A::Services, 49 | ) -> PostgresCqrs 50 | where 51 | A: Aggregate, 52 | { 53 | let repo = PostgresEventRepository::new(pool); 54 | let store = PersistedEventStore::new_snapshot_store(repo, snapshot_size); 55 | CqrsFramework::new(store, query_processor, services) 56 | } 57 | 58 | /// A convenience function for creating a CqrsFramework using an aggregate store. 59 | pub fn postgres_aggregate_cqrs( 60 | pool: Pool, 61 | query_processor: Vec>>, 62 | services: A::Services, 63 | ) -> PostgresCqrs 64 | where 65 | A: Aggregate, 66 | { 67 | let repo = PostgresEventRepository::new(pool); 68 | let store = PersistedEventStore::new_aggregate_store(repo); 69 | CqrsFramework::new(store, query_processor, services) 70 | } 71 | 72 | #[cfg(test)] 73 | mod test { 74 | use crate::testing::tests::{ 75 | TestAggregate, TestQueryRepository, TestServices, TestView, TEST_CONNECTION_STRING, 76 | }; 77 | use crate::{default_postgress_pool, postgres_cqrs, PostgresViewRepository}; 78 | use std::sync::Arc; 79 | 80 | #[tokio::test] 81 | async fn test_valid_cqrs_framework() { 82 | let pool = default_postgress_pool(TEST_CONNECTION_STRING).await; 83 | let repo = 84 | PostgresViewRepository::::new("test_view", pool.clone()); 85 | let query = TestQueryRepository::new(Arc::new(repo)); 86 | let _ps = postgres_cqrs(pool, vec![Box::new(query)], TestServices); 87 | } 88 | } 89 | -------------------------------------------------------------------------------- /src/error.rs: -------------------------------------------------------------------------------- 1 | use std::fmt::{Debug, Display, Formatter}; 2 | 3 | use cqrs_es::persist::PersistenceError; 4 | use cqrs_es::AggregateError; 5 | use sqlx::Error; 6 | 7 | #[derive(Debug)] 8 | pub enum PostgresAggregateError { 9 | OptimisticLock, 10 | ConnectionError(Box), 11 | DeserializationError(Box), 12 | UnknownError(Box), 13 | } 14 | 15 | impl Display for PostgresAggregateError { 16 | fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { 17 | match self { 18 | PostgresAggregateError::OptimisticLock => write!(f, "optimistic lock error"), 19 | PostgresAggregateError::UnknownError(error) => write!(f, "{}", error), 20 | PostgresAggregateError::DeserializationError(error) => write!(f, "{}", error), 21 | PostgresAggregateError::ConnectionError(error) => write!(f, "{}", error), 22 | } 23 | } 24 | } 25 | 26 | impl std::error::Error for PostgresAggregateError {} 27 | 28 | impl From for PostgresAggregateError { 29 | fn from(err: sqlx::Error) -> Self { 30 | // TODO: improve error handling 31 | match &err { 32 | Error::Database(database_error) => { 33 | if let Some(code) = database_error.code() { 34 | if code.as_ref() == "23505" { 35 | return PostgresAggregateError::OptimisticLock; 36 | } 37 | } 38 | PostgresAggregateError::UnknownError(Box::new(err)) 39 | } 40 | Error::Io(_) | Error::Tls(_) => PostgresAggregateError::ConnectionError(Box::new(err)), 41 | _ => PostgresAggregateError::UnknownError(Box::new(err)), 42 | } 43 | } 44 | } 45 | 46 | impl From for AggregateError { 47 | fn from(err: PostgresAggregateError) -> Self { 48 | match err { 49 | PostgresAggregateError::OptimisticLock => AggregateError::AggregateConflict, 50 | PostgresAggregateError::ConnectionError(error) => { 51 | AggregateError::DatabaseConnectionError(error) 52 | } 53 | PostgresAggregateError::DeserializationError(error) => { 54 | AggregateError::DeserializationError(error) 55 | } 56 | PostgresAggregateError::UnknownError(error) => AggregateError::UnexpectedError(error), 57 | } 58 | } 59 | } 60 | 61 | impl From for PostgresAggregateError { 62 | fn from(err: serde_json::Error) -> Self { 63 | match err.classify() { 64 | serde_json::error::Category::Data | serde_json::error::Category::Syntax => { 65 | PostgresAggregateError::DeserializationError(Box::new(err)) 66 | } 67 | serde_json::error::Category::Io | serde_json::error::Category::Eof => { 68 | PostgresAggregateError::UnknownError(Box::new(err)) 69 | } 70 | } 71 | } 72 | } 73 | 74 | impl From for PersistenceError { 75 | fn from(err: PostgresAggregateError) -> Self { 76 | match err { 77 | PostgresAggregateError::OptimisticLock => PersistenceError::OptimisticLockError, 78 | PostgresAggregateError::ConnectionError(error) => { 79 | PersistenceError::ConnectionError(error) 80 | } 81 | PostgresAggregateError::DeserializationError(error) => { 82 | PersistenceError::UnknownError(error) 83 | } 84 | PostgresAggregateError::UnknownError(error) => PersistenceError::UnknownError(error), 85 | } 86 | } 87 | } 88 | -------------------------------------------------------------------------------- /src/event_repository.rs: -------------------------------------------------------------------------------- 1 | use async_trait::async_trait; 2 | use cqrs_es::persist::{ 3 | PersistedEventRepository, PersistenceError, ReplayStream, SerializedEvent, SerializedSnapshot, 4 | }; 5 | use cqrs_es::Aggregate; 6 | use futures::TryStreamExt; 7 | use serde_json::Value; 8 | use sqlx::postgres::PgRow; 9 | use sqlx::{Pool, Postgres, Row, Transaction}; 10 | 11 | use crate::error::PostgresAggregateError; 12 | use crate::sql_query::SqlQueryFactory; 13 | 14 | const DEFAULT_EVENT_TABLE: &str = "events"; 15 | const DEFAULT_SNAPSHOT_TABLE: &str = "snapshots"; 16 | 17 | const DEFAULT_STREAMING_CHANNEL_SIZE: usize = 200; 18 | 19 | /// An event repository relying on a Postgres database for persistence. 20 | pub struct PostgresEventRepository { 21 | pool: Pool, 22 | query_factory: SqlQueryFactory, 23 | stream_channel_size: usize, 24 | } 25 | 26 | #[async_trait] 27 | impl PersistedEventRepository for PostgresEventRepository { 28 | async fn get_events( 29 | &self, 30 | aggregate_id: &str, 31 | ) -> Result, PersistenceError> { 32 | self.select_events::(aggregate_id, self.query_factory.select_events()) 33 | .await 34 | } 35 | 36 | async fn get_last_events( 37 | &self, 38 | aggregate_id: &str, 39 | last_sequence: usize, 40 | ) -> Result, PersistenceError> { 41 | let query = self.query_factory.get_last_events(last_sequence); 42 | self.select_events::(aggregate_id, &query).await 43 | } 44 | 45 | async fn get_snapshot( 46 | &self, 47 | aggregate_id: &str, 48 | ) -> Result, PersistenceError> { 49 | let row: PgRow = match sqlx::query(self.query_factory.select_snapshot()) 50 | .bind(A::aggregate_type()) 51 | .bind(aggregate_id) 52 | .fetch_optional(&self.pool) 53 | .await 54 | .map_err(PostgresAggregateError::from)? 55 | { 56 | Some(row) => row, 57 | None => { 58 | return Ok(None); 59 | } 60 | }; 61 | Ok(Some(self.deser_snapshot(row)?)) 62 | } 63 | 64 | async fn persist( 65 | &self, 66 | events: &[SerializedEvent], 67 | snapshot_update: Option<(String, Value, usize)>, 68 | ) -> Result<(), PersistenceError> { 69 | match snapshot_update { 70 | None => { 71 | self.insert_events::(events).await?; 72 | } 73 | Some((aggregate_id, aggregate, current_snapshot)) => { 74 | if current_snapshot == 1 { 75 | self.insert::(aggregate, aggregate_id, current_snapshot, events) 76 | .await?; 77 | } else { 78 | self.update::(aggregate, aggregate_id, current_snapshot, events) 79 | .await?; 80 | } 81 | } 82 | }; 83 | Ok(()) 84 | } 85 | 86 | async fn stream_events( 87 | &self, 88 | aggregate_id: &str, 89 | ) -> Result { 90 | Ok(stream_events( 91 | self.query_factory.select_events().to_string(), 92 | A::aggregate_type(), 93 | aggregate_id.to_string(), 94 | self.pool.clone(), 95 | self.stream_channel_size, 96 | )) 97 | } 98 | 99 | // TODO: aggregate id is unused here, `stream_events` function needs to be broken up 100 | async fn stream_all_events(&self) -> Result { 101 | Ok(stream_events( 102 | self.query_factory.all_events().to_string(), 103 | A::aggregate_type(), 104 | "".to_string(), 105 | self.pool.clone(), 106 | self.stream_channel_size, 107 | )) 108 | } 109 | } 110 | 111 | fn stream_events( 112 | query: String, 113 | aggregate_type: String, 114 | aggregate_id: String, 115 | pool: Pool, 116 | channel_size: usize, 117 | ) -> ReplayStream { 118 | let (mut feed, stream) = ReplayStream::new(channel_size); 119 | tokio::spawn(async move { 120 | let query = sqlx::query(&query) 121 | .bind(&aggregate_type) 122 | .bind(&aggregate_id); 123 | let mut rows = query.fetch(&pool); 124 | while let Some(row) = rows.try_next().await.unwrap() { 125 | let event_result: Result = 126 | PostgresEventRepository::deser_event(row).map_err(Into::into); 127 | if feed.push(event_result).await.is_err() { 128 | // TODO: in the unlikely event of a broken channel this error should be reported. 129 | return; 130 | }; 131 | } 132 | }); 133 | stream 134 | } 135 | 136 | impl PostgresEventRepository { 137 | async fn select_events( 138 | &self, 139 | aggregate_id: &str, 140 | query: &str, 141 | ) -> Result, PersistenceError> { 142 | let mut rows = sqlx::query(query) 143 | .bind(A::aggregate_type()) 144 | .bind(aggregate_id) 145 | .fetch(&self.pool); 146 | let mut result: Vec = Default::default(); 147 | while let Some(row) = rows 148 | .try_next() 149 | .await 150 | .map_err(PostgresAggregateError::from)? 151 | { 152 | result.push(PostgresEventRepository::deser_event(row)?); 153 | } 154 | Ok(result) 155 | } 156 | } 157 | 158 | impl PostgresEventRepository { 159 | /// Creates a new `PostgresEventRepository` from the provided database connection. 160 | /// This uses the default tables 'events' and 'snapshots'. 161 | /// 162 | /// ``` 163 | /// use sqlx::{Pool, Postgres}; 164 | /// use postgres_es::PostgresEventRepository; 165 | /// 166 | /// fn configure_repo(pool: Pool) -> PostgresEventRepository { 167 | /// PostgresEventRepository::new(pool) 168 | /// } 169 | /// ``` 170 | pub fn new(pool: Pool) -> Self { 171 | Self::use_tables(pool, DEFAULT_EVENT_TABLE, DEFAULT_SNAPSHOT_TABLE) 172 | } 173 | 174 | /// Configures a `PostgresEventRepository` to use a streaming queue of the provided size. 175 | /// 176 | /// _Example: configure the repository to stream with a 1000 event buffer._ 177 | /// ``` 178 | /// use sqlx::{Pool, Postgres}; 179 | /// use postgres_es::PostgresEventRepository; 180 | /// 181 | /// fn configure_repo(pool: Pool) -> PostgresEventRepository { 182 | /// let store = PostgresEventRepository::new(pool); 183 | /// store.with_streaming_channel_size(1000) 184 | /// } 185 | /// ``` 186 | pub fn with_streaming_channel_size(self, stream_channel_size: usize) -> Self { 187 | Self { 188 | pool: self.pool, 189 | query_factory: self.query_factory, 190 | stream_channel_size, 191 | } 192 | } 193 | 194 | /// Configures a `PostgresEventRepository` to use the provided table names. 195 | /// 196 | /// _Example: configure the repository to use "my_event_table" and "my_snapshot_table" 197 | /// for the event and snapshot table names._ 198 | /// ``` 199 | /// use sqlx::{Pool, Postgres}; 200 | /// use postgres_es::PostgresEventRepository; 201 | /// 202 | /// fn configure_repo(pool: Pool) -> PostgresEventRepository { 203 | /// let store = PostgresEventRepository::new(pool); 204 | /// store.with_tables("my_event_table", "my_snapshot_table") 205 | /// } 206 | /// ``` 207 | pub fn with_tables(self, events_table: &str, snapshots_table: &str) -> Self { 208 | Self::use_tables(self.pool, events_table, snapshots_table) 209 | } 210 | 211 | fn use_tables(pool: Pool, events_table: &str, snapshots_table: &str) -> Self { 212 | Self { 213 | pool, 214 | query_factory: SqlQueryFactory::new(events_table, snapshots_table), 215 | stream_channel_size: DEFAULT_STREAMING_CHANNEL_SIZE, 216 | } 217 | } 218 | 219 | pub(crate) async fn insert_events( 220 | &self, 221 | events: &[SerializedEvent], 222 | ) -> Result<(), PostgresAggregateError> { 223 | let mut tx: Transaction<'_, Postgres> = sqlx::Acquire::begin(&self.pool).await?; 224 | self.persist_events::(self.query_factory.insert_event(), &mut tx, events) 225 | .await?; 226 | tx.commit().await?; 227 | Ok(()) 228 | } 229 | 230 | pub(crate) async fn insert( 231 | &self, 232 | aggregate_payload: Value, 233 | aggregate_id: String, 234 | current_snapshot: usize, 235 | events: &[SerializedEvent], 236 | ) -> Result<(), PostgresAggregateError> { 237 | let mut tx: Transaction<'_, Postgres> = sqlx::Acquire::begin(&self.pool).await?; 238 | let current_sequence = self 239 | .persist_events::(self.query_factory.insert_event(), &mut tx, events) 240 | .await?; 241 | sqlx::query(self.query_factory.insert_snapshot()) 242 | .bind(A::aggregate_type()) 243 | .bind(aggregate_id.as_str()) 244 | .bind(current_sequence as i32) 245 | .bind(current_snapshot as i32) 246 | .bind(&aggregate_payload) 247 | .execute(&mut *tx) 248 | .await?; 249 | tx.commit().await?; 250 | Ok(()) 251 | } 252 | 253 | pub(crate) async fn update( 254 | &self, 255 | aggregate: Value, 256 | aggregate_id: String, 257 | current_snapshot: usize, 258 | events: &[SerializedEvent], 259 | ) -> Result<(), PostgresAggregateError> { 260 | let mut tx: Transaction<'_, Postgres> = sqlx::Acquire::begin(&self.pool).await?; 261 | let current_sequence = self 262 | .persist_events::(self.query_factory.insert_event(), &mut tx, events) 263 | .await?; 264 | 265 | let aggregate_payload = serde_json::to_value(&aggregate)?; 266 | let result = sqlx::query(self.query_factory.update_snapshot()) 267 | .bind(A::aggregate_type()) 268 | .bind(aggregate_id.as_str()) 269 | .bind(current_sequence as i32) 270 | .bind(current_snapshot as i32) 271 | .bind((current_snapshot - 1) as i32) 272 | .bind(&aggregate_payload) 273 | .execute(&mut *tx) 274 | .await?; 275 | tx.commit().await?; 276 | match result.rows_affected() { 277 | 1 => Ok(()), 278 | _ => Err(PostgresAggregateError::OptimisticLock), 279 | } 280 | } 281 | 282 | fn deser_event(row: PgRow) -> Result { 283 | let aggregate_type: String = row.get("aggregate_type"); 284 | let aggregate_id: String = row.get("aggregate_id"); 285 | let sequence = { 286 | let s: i64 = row.get("sequence"); 287 | s as usize 288 | }; 289 | let event_type: String = row.get("event_type"); 290 | let event_version: String = row.get("event_version"); 291 | let payload: Value = row.get("payload"); 292 | let metadata: Value = row.get("metadata"); 293 | Ok(SerializedEvent::new( 294 | aggregate_id, 295 | sequence, 296 | aggregate_type, 297 | event_type, 298 | event_version, 299 | payload, 300 | metadata, 301 | )) 302 | } 303 | 304 | fn deser_snapshot(&self, row: PgRow) -> Result { 305 | let aggregate_id = row.get("aggregate_id"); 306 | let s: i64 = row.get("last_sequence"); 307 | let current_sequence = s as usize; 308 | let s: i64 = row.get("current_snapshot"); 309 | let current_snapshot = s as usize; 310 | let aggregate: Value = row.get("payload"); 311 | Ok(SerializedSnapshot { 312 | aggregate_id, 313 | aggregate, 314 | current_sequence, 315 | current_snapshot, 316 | }) 317 | } 318 | 319 | pub(crate) async fn persist_events( 320 | &self, 321 | inser_event_query: &str, 322 | tx: &mut Transaction<'_, Postgres>, 323 | events: &[SerializedEvent], 324 | ) -> Result { 325 | let mut current_sequence: usize = 0; 326 | for event in events { 327 | current_sequence = event.sequence; 328 | let event_type = &event.event_type; 329 | let event_version = &event.event_version; 330 | let payload = serde_json::to_value(&event.payload)?; 331 | let metadata = serde_json::to_value(&event.metadata)?; 332 | sqlx::query(inser_event_query) 333 | .bind(A::aggregate_type()) 334 | .bind(event.aggregate_id.as_str()) 335 | .bind(event.sequence as i32) 336 | .bind(event_type) 337 | .bind(event_version) 338 | .bind(&payload) 339 | .bind(&metadata) 340 | .execute(&mut **tx) 341 | .await?; 342 | } 343 | Ok(current_sequence) 344 | } 345 | } 346 | 347 | #[cfg(test)] 348 | mod test { 349 | use cqrs_es::persist::PersistedEventRepository; 350 | 351 | use crate::error::PostgresAggregateError; 352 | use crate::testing::tests::{ 353 | snapshot_context, test_event_envelope, Created, SomethingElse, TestAggregate, TestEvent, 354 | Tested, TEST_CONNECTION_STRING, 355 | }; 356 | use crate::{default_postgress_pool, PostgresEventRepository}; 357 | 358 | #[tokio::test] 359 | async fn event_repositories() { 360 | let pool = default_postgress_pool(TEST_CONNECTION_STRING).await; 361 | let id = uuid::Uuid::new_v4().to_string(); 362 | let event_repo: PostgresEventRepository = 363 | PostgresEventRepository::new(pool.clone()).with_streaming_channel_size(1); 364 | let events = event_repo.get_events::(&id).await.unwrap(); 365 | assert!(events.is_empty()); 366 | 367 | event_repo 368 | .insert_events::(&[ 369 | test_event_envelope(&id, 1, TestEvent::Created(Created { id: id.clone() })), 370 | test_event_envelope( 371 | &id, 372 | 2, 373 | TestEvent::Tested(Tested { 374 | test_name: "a test was run".to_string(), 375 | }), 376 | ), 377 | ]) 378 | .await 379 | .unwrap(); 380 | let events = event_repo.get_events::(&id).await.unwrap(); 381 | assert_eq!(2, events.len()); 382 | events.iter().for_each(|e| assert_eq!(&id, &e.aggregate_id)); 383 | 384 | // Optimistic lock error 385 | let result = event_repo 386 | .insert_events::(&[ 387 | test_event_envelope( 388 | &id, 389 | 3, 390 | TestEvent::SomethingElse(SomethingElse { 391 | description: "this should not persist".to_string(), 392 | }), 393 | ), 394 | test_event_envelope( 395 | &id, 396 | 2, 397 | TestEvent::SomethingElse(SomethingElse { 398 | description: "bad sequence number".to_string(), 399 | }), 400 | ), 401 | ]) 402 | .await 403 | .unwrap_err(); 404 | match result { 405 | PostgresAggregateError::OptimisticLock => {} 406 | _ => panic!("invalid error result found during insert: {}", result), 407 | }; 408 | 409 | let events = event_repo.get_events::(&id).await.unwrap(); 410 | assert_eq!(2, events.len()); 411 | 412 | verify_replay_stream(&id, event_repo).await; 413 | } 414 | 415 | async fn verify_replay_stream(id: &str, event_repo: PostgresEventRepository) { 416 | let mut stream = event_repo 417 | .stream_events::(&id) 418 | .await 419 | .unwrap(); 420 | let mut found_in_stream = 0; 421 | while let Some(_) = stream.next::(&None).await { 422 | found_in_stream += 1; 423 | } 424 | assert_eq!(found_in_stream, 2); 425 | 426 | let mut stream = event_repo 427 | .stream_all_events::() 428 | .await 429 | .unwrap(); 430 | let mut found_in_stream = 0; 431 | while let Some(_) = stream.next::(&None).await { 432 | found_in_stream += 1; 433 | } 434 | assert!(found_in_stream >= 2); 435 | } 436 | 437 | #[tokio::test] 438 | async fn snapshot_repositories() { 439 | let pool = default_postgress_pool(TEST_CONNECTION_STRING).await; 440 | let id = uuid::Uuid::new_v4().to_string(); 441 | let event_repo: PostgresEventRepository = PostgresEventRepository::new(pool.clone()); 442 | let snapshot = event_repo.get_snapshot::(&id).await.unwrap(); 443 | assert_eq!(None, snapshot); 444 | 445 | let test_description = "some test snapshot here".to_string(); 446 | let test_tests = vec!["testA".to_string(), "testB".to_string()]; 447 | event_repo 448 | .insert::( 449 | serde_json::to_value(TestAggregate { 450 | id: id.clone(), 451 | description: test_description.clone(), 452 | tests: test_tests.clone(), 453 | }) 454 | .unwrap(), 455 | id.clone(), 456 | 1, 457 | &vec![], 458 | ) 459 | .await 460 | .unwrap(); 461 | 462 | let snapshot = event_repo.get_snapshot::(&id).await.unwrap(); 463 | assert_eq!( 464 | Some(snapshot_context( 465 | id.clone(), 466 | 0, 467 | 1, 468 | serde_json::to_value(TestAggregate { 469 | id: id.clone(), 470 | description: test_description.clone(), 471 | tests: test_tests.clone(), 472 | }) 473 | .unwrap() 474 | )), 475 | snapshot 476 | ); 477 | 478 | // sequence iterated, does update 479 | event_repo 480 | .update::( 481 | serde_json::to_value(TestAggregate { 482 | id: id.clone(), 483 | description: "a test description that should be saved".to_string(), 484 | tests: test_tests.clone(), 485 | }) 486 | .unwrap(), 487 | id.clone(), 488 | 2, 489 | &vec![], 490 | ) 491 | .await 492 | .unwrap(); 493 | 494 | let snapshot = event_repo.get_snapshot::(&id).await.unwrap(); 495 | assert_eq!( 496 | Some(snapshot_context( 497 | id.clone(), 498 | 0, 499 | 2, 500 | serde_json::to_value(TestAggregate { 501 | id: id.clone(), 502 | description: "a test description that should be saved".to_string(), 503 | tests: test_tests.clone(), 504 | }) 505 | .unwrap() 506 | )), 507 | snapshot 508 | ); 509 | 510 | // sequence out of order or not iterated, does not update 511 | let result = event_repo 512 | .update::( 513 | serde_json::to_value(TestAggregate { 514 | id: id.clone(), 515 | description: "a test description that should not be saved".to_string(), 516 | tests: test_tests.clone(), 517 | }) 518 | .unwrap(), 519 | id.clone(), 520 | 2, 521 | &vec![], 522 | ) 523 | .await 524 | .unwrap_err(); 525 | match result { 526 | PostgresAggregateError::OptimisticLock => {} 527 | _ => panic!("invalid error result found during insert: {}", result), 528 | }; 529 | 530 | let snapshot = event_repo.get_snapshot::(&id).await.unwrap(); 531 | assert_eq!( 532 | Some(snapshot_context( 533 | id.clone(), 534 | 0, 535 | 2, 536 | serde_json::to_value(TestAggregate { 537 | id: id.clone(), 538 | description: "a test description that should be saved".to_string(), 539 | tests: test_tests.clone(), 540 | }) 541 | .unwrap() 542 | )), 543 | snapshot 544 | ); 545 | } 546 | } 547 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | #![forbid(unsafe_code)] 2 | #![deny(missing_docs)] 3 | #![deny(clippy::all)] 4 | #![warn(rust_2018_idioms)] 5 | //! # postgres-es 6 | //! 7 | //! > A Postgres implementation of the `EventStore` trait in [cqrs-es](https://crates.io/crates/cqrs-es). 8 | //! 9 | pub use crate::cqrs::*; 10 | pub use crate::event_repository::*; 11 | pub use crate::types::*; 12 | pub use crate::view_repository::*; 13 | 14 | mod cqrs; 15 | mod error; 16 | mod event_repository; 17 | pub(crate) mod sql_query; 18 | mod testing; 19 | mod types; 20 | mod view_repository; 21 | -------------------------------------------------------------------------------- /src/sql_query.rs: -------------------------------------------------------------------------------- 1 | pub(crate) struct SqlQueryFactory { 2 | event_table: String, 3 | select_events: String, 4 | insert_event: String, 5 | all_events: String, 6 | insert_snapshot: String, 7 | update_snapshot: String, 8 | select_snapshot: String, 9 | } 10 | 11 | impl SqlQueryFactory { 12 | pub fn new(event_table: &str, snapshot_table: &str) -> Self { 13 | Self { 14 | event_table: event_table.to_string(), 15 | select_events: format!(" 16 | SELECT aggregate_type, aggregate_id, sequence, event_type, event_version, payload, metadata 17 | FROM {} 18 | WHERE aggregate_type = $1 AND aggregate_id = $2 19 | ORDER BY sequence", event_table), 20 | insert_event: format!(" 21 | INSERT INTO {} (aggregate_type, aggregate_id, sequence, event_type, event_version, payload, metadata) 22 | VALUES ($1, $2, $3, $4, $5, $6, $7)", event_table), 23 | all_events: format!(" 24 | SELECT aggregate_type, aggregate_id, sequence, event_type, event_version, payload, metadata 25 | FROM {} 26 | WHERE aggregate_type = $1 27 | ORDER BY sequence", event_table), 28 | insert_snapshot: format!(" 29 | INSERT INTO {} (aggregate_type, aggregate_id, last_sequence, current_snapshot, payload) 30 | VALUES ($1, $2, $3, $4, $5)", snapshot_table), 31 | update_snapshot: format!(" 32 | UPDATE {} 33 | SET last_sequence= $3 , payload= $6, current_snapshot= $4 34 | WHERE aggregate_type= $1 AND aggregate_id= $2 AND current_snapshot= $5", snapshot_table), 35 | select_snapshot: format!(" 36 | SELECT aggregate_type, aggregate_id, last_sequence, current_snapshot, payload 37 | FROM {} 38 | WHERE aggregate_type = $1 AND aggregate_id = $2", snapshot_table) 39 | } 40 | } 41 | pub fn select_events(&self) -> &str { 42 | &self.select_events 43 | } 44 | pub fn insert_event(&self) -> &str { 45 | &self.insert_event 46 | } 47 | pub fn insert_snapshot(&self) -> &str { 48 | &self.insert_snapshot 49 | } 50 | pub fn update_snapshot(&self) -> &str { 51 | &self.update_snapshot 52 | } 53 | pub fn select_snapshot(&self) -> &str { 54 | &self.select_snapshot 55 | } 56 | pub fn all_events(&self) -> &str { 57 | &self.all_events 58 | } 59 | pub fn get_last_events(&self, last_sequence: usize) -> String { 60 | format!( 61 | " 62 | SELECT aggregate_type, aggregate_id, sequence, event_type, event_version, payload, metadata 63 | FROM {} 64 | WHERE aggregate_type = $1 AND aggregate_id = $2 AND sequence > {} 65 | ORDER BY sequence", 66 | &self.event_table, last_sequence 67 | ) 68 | } 69 | } 70 | 71 | #[test] 72 | fn test_queries() { 73 | let query_factory = SqlQueryFactory::new("my_events", "my_snapshots"); 74 | assert_eq!( 75 | query_factory.select_events(), 76 | " 77 | SELECT aggregate_type, aggregate_id, sequence, event_type, event_version, payload, metadata 78 | FROM my_events 79 | WHERE aggregate_type = $1 AND aggregate_id = $2 80 | ORDER BY sequence" 81 | ); 82 | assert_eq!(query_factory.insert_event(), " 83 | INSERT INTO my_events (aggregate_type, aggregate_id, sequence, event_type, event_version, payload, metadata) 84 | VALUES ($1, $2, $3, $4, $5, $6, $7)"); 85 | assert_eq!( 86 | query_factory.all_events(), 87 | " 88 | SELECT aggregate_type, aggregate_id, sequence, event_type, event_version, payload, metadata 89 | FROM my_events 90 | WHERE aggregate_type = $1 91 | ORDER BY sequence" 92 | ); 93 | assert_eq!( 94 | query_factory.insert_snapshot(), 95 | " 96 | INSERT INTO my_snapshots (aggregate_type, aggregate_id, last_sequence, current_snapshot, payload) 97 | VALUES ($1, $2, $3, $4, $5)" 98 | ); 99 | assert_eq!( 100 | query_factory.update_snapshot(), 101 | " 102 | UPDATE my_snapshots 103 | SET last_sequence= $3 , payload= $6, current_snapshot= $4 104 | WHERE aggregate_type= $1 AND aggregate_id= $2 AND current_snapshot= $5" 105 | ); 106 | assert_eq!( 107 | query_factory.select_snapshot(), 108 | " 109 | SELECT aggregate_type, aggregate_id, last_sequence, current_snapshot, payload 110 | FROM my_snapshots 111 | WHERE aggregate_type = $1 AND aggregate_id = $2" 112 | ); 113 | assert_eq!( 114 | query_factory.get_last_events(20), 115 | " 116 | SELECT aggregate_type, aggregate_id, sequence, event_type, event_version, payload, metadata 117 | FROM my_events 118 | WHERE aggregate_type = $1 AND aggregate_id = $2 AND sequence > 20 119 | ORDER BY sequence" 120 | ); 121 | } 122 | -------------------------------------------------------------------------------- /src/testing.rs: -------------------------------------------------------------------------------- 1 | #[cfg(test)] 2 | pub(crate) mod tests { 3 | use crate::PostgresViewRepository; 4 | use async_trait::async_trait; 5 | use cqrs_es::persist::{GenericQuery, SerializedEvent, SerializedSnapshot}; 6 | use cqrs_es::{Aggregate, DomainEvent, EventEnvelope, View}; 7 | use serde::{Deserialize, Serialize}; 8 | use serde_json::Value; 9 | use std::fmt::{Display, Formatter}; 10 | 11 | #[derive(Debug, Serialize, Deserialize, PartialEq)] 12 | pub(crate) struct TestAggregate { 13 | pub(crate) id: String, 14 | pub(crate) description: String, 15 | pub(crate) tests: Vec, 16 | } 17 | 18 | #[async_trait] 19 | impl Aggregate for TestAggregate { 20 | type Command = TestCommand; 21 | type Event = TestEvent; 22 | type Error = TestError; 23 | type Services = TestServices; 24 | 25 | fn aggregate_type() -> String { 26 | "TestAggregate".to_string() 27 | } 28 | 29 | async fn handle( 30 | &self, 31 | _command: Self::Command, 32 | _services: &Self::Services, 33 | ) -> Result, Self::Error> { 34 | Ok(vec![]) 35 | } 36 | 37 | fn apply(&mut self, _e: Self::Event) {} 38 | } 39 | 40 | impl Default for TestAggregate { 41 | fn default() -> Self { 42 | TestAggregate { 43 | id: "".to_string(), 44 | description: "".to_string(), 45 | tests: Vec::new(), 46 | } 47 | } 48 | } 49 | 50 | #[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] 51 | pub(crate) enum TestEvent { 52 | Created(Created), 53 | Tested(Tested), 54 | SomethingElse(SomethingElse), 55 | } 56 | 57 | #[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] 58 | pub(crate) struct Created { 59 | pub id: String, 60 | } 61 | 62 | #[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] 63 | pub(crate) struct Tested { 64 | pub test_name: String, 65 | } 66 | 67 | #[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] 68 | pub struct SomethingElse { 69 | pub description: String, 70 | } 71 | 72 | impl DomainEvent for TestEvent { 73 | fn event_type(&self) -> String { 74 | match self { 75 | TestEvent::Created(_) => "Created".to_string(), 76 | TestEvent::Tested(_) => "Tested".to_string(), 77 | TestEvent::SomethingElse(_) => "SomethingElse".to_string(), 78 | } 79 | } 80 | 81 | fn event_version(&self) -> String { 82 | "1.0".to_string() 83 | } 84 | } 85 | 86 | #[derive(Debug, PartialEq)] 87 | pub(crate) struct TestError(String); 88 | 89 | #[derive(Debug)] 90 | pub(crate) struct TestServices; 91 | 92 | impl Display for TestError { 93 | fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { 94 | write!(f, "{}", self.0) 95 | } 96 | } 97 | 98 | impl std::error::Error for TestError {} 99 | 100 | pub(crate) enum TestCommand {} 101 | 102 | pub(crate) type TestQueryRepository = 103 | GenericQuery, TestView, TestAggregate>; 104 | 105 | #[derive(Clone, Debug, Default, Serialize, Deserialize, PartialEq)] 106 | pub(crate) struct TestView { 107 | pub(crate) events: Vec, 108 | } 109 | 110 | impl View for TestView { 111 | fn update(&mut self, event: &EventEnvelope) { 112 | self.events.push(event.payload.clone()); 113 | } 114 | } 115 | 116 | pub(crate) const TEST_CONNECTION_STRING: &str = 117 | "postgresql://test_user:test_pass@127.0.0.1:5432/test"; 118 | 119 | pub(crate) fn test_event_envelope( 120 | id: &str, 121 | sequence: usize, 122 | event: TestEvent, 123 | ) -> SerializedEvent { 124 | let payload: Value = serde_json::to_value(&event).unwrap(); 125 | SerializedEvent { 126 | aggregate_id: id.to_string(), 127 | sequence, 128 | aggregate_type: TestAggregate::aggregate_type().to_string(), 129 | event_type: event.event_type().to_string(), 130 | event_version: event.event_version().to_string(), 131 | payload, 132 | metadata: Default::default(), 133 | } 134 | } 135 | 136 | pub(crate) fn snapshot_context( 137 | aggregate_id: String, 138 | current_sequence: usize, 139 | current_snapshot: usize, 140 | aggregate: Value, 141 | ) -> SerializedSnapshot { 142 | SerializedSnapshot { 143 | aggregate_id, 144 | aggregate, 145 | current_sequence, 146 | current_snapshot, 147 | } 148 | } 149 | } 150 | -------------------------------------------------------------------------------- /src/types.rs: -------------------------------------------------------------------------------- 1 | use crate::PostgresEventRepository; 2 | use cqrs_es::persist::PersistedEventStore; 3 | use cqrs_es::CqrsFramework; 4 | 5 | /// A convenience type for a CqrsFramework backed by 6 | /// [PostgresStore](struct.PostgresStore.html). 7 | pub type PostgresCqrs = CqrsFramework>; 8 | -------------------------------------------------------------------------------- /src/view_repository.rs: -------------------------------------------------------------------------------- 1 | use std::marker::PhantomData; 2 | 3 | use async_trait::async_trait; 4 | use cqrs_es::persist::{PersistenceError, ViewContext, ViewRepository}; 5 | use cqrs_es::{Aggregate, View}; 6 | use sqlx::postgres::PgRow; 7 | use sqlx::{Pool, Postgres, Row}; 8 | 9 | use crate::error::PostgresAggregateError; 10 | 11 | /// A postgres backed query repository for use in backing a `GenericQuery`. 12 | pub struct PostgresViewRepository { 13 | insert_sql: String, 14 | update_sql: String, 15 | select_sql: String, 16 | pool: Pool, 17 | _phantom: PhantomData<(V, A)>, 18 | } 19 | 20 | impl PostgresViewRepository 21 | where 22 | V: View, 23 | A: Aggregate, 24 | { 25 | /// Creates a new `PostgresViewRepository` that will store serialized views in a Postgres table named 26 | /// identically to the `view_name` value provided. This table should be created by the user 27 | /// before using this query repository (see `/db/init.sql` sql initialization file). 28 | /// 29 | /// ``` 30 | /// # use cqrs_es::doc::MyAggregate; 31 | /// # use cqrs_es::persist::doc::MyView; 32 | /// use sqlx::{Pool, Postgres}; 33 | /// use postgres_es::PostgresViewRepository; 34 | /// 35 | /// fn configure_view_repo(pool: Pool) -> PostgresViewRepository { 36 | /// PostgresViewRepository::new("my_view_table", pool) 37 | /// } 38 | /// ``` 39 | pub fn new(view_name: &str, pool: Pool) -> Self { 40 | let insert_sql = format!( 41 | "INSERT INTO {} (payload, version, view_id) VALUES ( $1, $2, $3 )", 42 | view_name 43 | ); 44 | let update_sql = format!( 45 | "UPDATE {} SET payload= $1 , version= $2 WHERE view_id= $3", 46 | view_name 47 | ); 48 | let select_sql = format!( 49 | "SELECT version,payload FROM {} WHERE view_id= $1", 50 | view_name 51 | ); 52 | Self { 53 | insert_sql, 54 | update_sql, 55 | select_sql, 56 | pool, 57 | _phantom: Default::default(), 58 | } 59 | } 60 | } 61 | 62 | #[async_trait] 63 | impl ViewRepository for PostgresViewRepository 64 | where 65 | V: View, 66 | A: Aggregate, 67 | { 68 | async fn load(&self, view_id: &str) -> Result, PersistenceError> { 69 | let row: Option = sqlx::query(&self.select_sql) 70 | .bind(view_id) 71 | .fetch_optional(&self.pool) 72 | .await 73 | .map_err(PostgresAggregateError::from)?; 74 | match row { 75 | None => Ok(None), 76 | Some(row) => { 77 | let view = serde_json::from_value(row.get("payload"))?; 78 | Ok(Some(view)) 79 | } 80 | } 81 | } 82 | 83 | async fn load_with_context( 84 | &self, 85 | view_id: &str, 86 | ) -> Result, PersistenceError> { 87 | let row: Option = sqlx::query(&self.select_sql) 88 | .bind(view_id) 89 | .fetch_optional(&self.pool) 90 | .await 91 | .map_err(PostgresAggregateError::from)?; 92 | match row { 93 | None => Ok(None), 94 | Some(row) => { 95 | let version = row.get("version"); 96 | let view = serde_json::from_value(row.get("payload"))?; 97 | let view_context = ViewContext::new(view_id.to_string(), version); 98 | Ok(Some((view, view_context))) 99 | } 100 | } 101 | } 102 | 103 | async fn update_view(&self, view: V, context: ViewContext) -> Result<(), PersistenceError> { 104 | let sql = match context.version { 105 | 0 => &self.insert_sql, 106 | _ => &self.update_sql, 107 | }; 108 | let version = context.version + 1; 109 | let payload = serde_json::to_value(&view).map_err(PostgresAggregateError::from)?; 110 | sqlx::query(sql.as_str()) 111 | .bind(payload) 112 | .bind(version) 113 | .bind(context.view_instance_id) 114 | .execute(&self.pool) 115 | .await 116 | .map_err(PostgresAggregateError::from)?; 117 | Ok(()) 118 | } 119 | } 120 | 121 | #[cfg(test)] 122 | mod test { 123 | use crate::testing::tests::{ 124 | Created, TestAggregate, TestEvent, TestView, TEST_CONNECTION_STRING, 125 | }; 126 | use crate::{default_postgress_pool, PostgresViewRepository}; 127 | use cqrs_es::persist::{ViewContext, ViewRepository}; 128 | 129 | #[tokio::test] 130 | async fn test_valid_view_repository() { 131 | let pool = default_postgress_pool(TEST_CONNECTION_STRING).await; 132 | let repo = 133 | PostgresViewRepository::::new("test_view", pool.clone()); 134 | let test_view_id = uuid::Uuid::new_v4().to_string(); 135 | 136 | let view = TestView { 137 | events: vec![TestEvent::Created(Created { 138 | id: "just a test event for this view".to_string(), 139 | })], 140 | }; 141 | repo.update_view(view.clone(), ViewContext::new(test_view_id.to_string(), 0)) 142 | .await 143 | .unwrap(); 144 | let (found, context) = repo 145 | .load_with_context(&test_view_id) 146 | .await 147 | .unwrap() 148 | .unwrap(); 149 | assert_eq!(found, view); 150 | let found = repo.load(&test_view_id).await.unwrap().unwrap(); 151 | assert_eq!(found, view); 152 | 153 | let updated_view = TestView { 154 | events: vec![TestEvent::Created(Created { 155 | id: "a totally different view".to_string(), 156 | })], 157 | }; 158 | repo.update_view(updated_view.clone(), context) 159 | .await 160 | .unwrap(); 161 | let found_option = repo.load(&test_view_id).await.unwrap(); 162 | let found = found_option.unwrap(); 163 | 164 | assert_eq!(found, updated_view); 165 | } 166 | } 167 | -------------------------------------------------------------------------------- /tests/lib.rs: -------------------------------------------------------------------------------- 1 | use cqrs_es::doc::{Customer, CustomerEvent}; 2 | use cqrs_es::persist::{PersistedEventStore, SemanticVersionEventUpcaster}; 3 | use cqrs_es::EventStore; 4 | use postgres_es::{default_postgress_pool, PostgresEventRepository}; 5 | use serde_json::Value; 6 | use sqlx::{Pool, Postgres}; 7 | 8 | const TEST_CONNECTION_STRING: &str = "postgresql://test_user:test_pass@127.0.0.1:5432/test"; 9 | 10 | async fn new_test_event_store( 11 | pool: Pool, 12 | ) -> PersistedEventStore { 13 | let repo = PostgresEventRepository::new(pool); 14 | PersistedEventStore::::new_event_store(repo) 15 | } 16 | 17 | #[tokio::test] 18 | async fn commit_and_load_events() { 19 | let pool = default_postgress_pool(TEST_CONNECTION_STRING).await; 20 | let repo = PostgresEventRepository::new(pool); 21 | let event_store = 22 | PersistedEventStore::::new_event_store(repo); 23 | 24 | simple_es_commit_and_load_test(event_store).await; 25 | } 26 | 27 | #[tokio::test] 28 | async fn commit_and_load_events_snapshot_store() { 29 | let pool = default_postgress_pool(TEST_CONNECTION_STRING).await; 30 | let repo = PostgresEventRepository::new(pool); 31 | let event_store = 32 | PersistedEventStore::::new_aggregate_store(repo); 33 | 34 | simple_es_commit_and_load_test(event_store).await; 35 | } 36 | 37 | async fn simple_es_commit_and_load_test( 38 | event_store: PersistedEventStore, 39 | ) { 40 | let id = uuid::Uuid::new_v4().to_string(); 41 | assert_eq!(0, event_store.load_events(id.as_str()).await.unwrap().len()); 42 | let context = event_store.load_aggregate(id.as_str()).await.unwrap(); 43 | 44 | event_store 45 | .commit( 46 | vec![ 47 | CustomerEvent::NameAdded { 48 | name: "test_event_A".to_string(), 49 | }, 50 | CustomerEvent::EmailUpdated { 51 | new_email: "email A".to_string(), 52 | }, 53 | ], 54 | context, 55 | Default::default(), 56 | ) 57 | .await 58 | .unwrap(); 59 | 60 | assert_eq!(2, event_store.load_events(id.as_str()).await.unwrap().len()); 61 | let context = event_store.load_aggregate(id.as_str()).await.unwrap(); 62 | 63 | event_store 64 | .commit( 65 | vec![CustomerEvent::EmailUpdated { 66 | new_email: "email B".to_string(), 67 | }], 68 | context, 69 | Default::default(), 70 | ) 71 | .await 72 | .unwrap(); 73 | assert_eq!(3, event_store.load_events(id.as_str()).await.unwrap().len()); 74 | } 75 | 76 | #[tokio::test] 77 | async fn upcasted_event() { 78 | let pool = default_postgress_pool(TEST_CONNECTION_STRING).await; 79 | let upcaster = SemanticVersionEventUpcaster::new( 80 | "NameAdded", 81 | "1.0.1", 82 | Box::new(|mut event| match event.get_mut("NameAdded").unwrap() { 83 | Value::Object(object) => { 84 | object.insert("name".to_string(), Value::String("UNKNOWN".to_string())); 85 | event 86 | } 87 | _ => panic!("not the expected object"), 88 | }), 89 | ); 90 | let event_store = new_test_event_store(pool) 91 | .await 92 | .with_upcasters(vec![Box::new(upcaster)]); 93 | 94 | let id = "previous_event_in_need_of_upcast".to_string(); 95 | let result = event_store.load_aggregate(id.as_str()).await.unwrap(); 96 | assert_eq!(1, result.current_sequence); 97 | assert_eq!(None, result.current_snapshot); 98 | } 99 | --------------------------------------------------------------------------------