├── package.json ├── .gitignore ├── LICENSE ├── sqlite-event-store.ddl ├── postgres-event-store.ddl ├── test-sqlite.js ├── test-postgres.js └── README.md /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "sql-event-store", 3 | "version": "2.0.0", 4 | "description": "Demonstration of a SQL event store with de-duplication and guaranteed ordering. This event store can be ported to most SQL RDBMS and accessed from an number of writers, including high-load serverless functions. ", 5 | "main": "test-sqlite.js", 6 | "type": "module", 7 | "scripts": { 8 | "test": "test" 9 | }, 10 | "repository": { 11 | "type": "git", 12 | "url": "git+https://github.com/mattbishop/sql-event-store.git" 13 | }, 14 | "keywords": [ 15 | "SQL", 16 | "Event", 17 | "Store", 18 | "Event", 19 | "Source", 20 | "CQRS" 21 | ], 22 | "author": "mattbishop", 23 | "license": "Unlicense", 24 | "bugs": { 25 | "url": "https://github.com/mattbishop/sql-event-store/issues" 26 | }, 27 | "homepage": "https://github.com/mattbishop/sql-event-store#readme", 28 | "dependencies": { 29 | "@electric-sql/pglite": "0.3.1", 30 | "nanoid": "5.1.5", 31 | "sql.js": "1.13.0" 32 | } 33 | } 34 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Logs 2 | logs 3 | *.log 4 | npm-debug.log* 5 | yarn-debug.log* 6 | yarn-error.log* 7 | 8 | # Runtime data 9 | pids 10 | *.pid 11 | *.seed 12 | *.pid.lock 13 | 14 | # Directory for instrumented libs generated by jscoverage/JSCover 15 | lib-cov 16 | 17 | # Coverage directory used by tools like istanbul 18 | coverage 19 | 20 | # nyc test coverage 21 | .nyc_output 22 | 23 | # Grunt intermediate storage (http://gruntjs.com/creating-plugins#storing-task-files) 24 | .grunt 25 | 26 | # Bower dependency directory (https://bower.io/) 27 | bower_components 28 | 29 | # node-waf configuration 30 | .lock-wscript 31 | 32 | # Compiled binary addons (https://nodejs.org/api/addons.html) 33 | build/Release 34 | 35 | # Dependency directories 36 | node_modules/ 37 | jspm_packages/ 38 | 39 | # TypeScript v1 declaration files 40 | typings/ 41 | 42 | # Optional npm cache directory 43 | .npm 44 | 45 | # Optional eslint cache 46 | .eslintcache 47 | 48 | # Optional REPL history 49 | .node_repl_history 50 | 51 | # Output of 'npm pack' 52 | *.tgz 53 | 54 | # Yarn Integrity file 55 | .yarn-integrity 56 | 57 | # dotenv environment variables file 58 | .env 59 | 60 | # next.js build output 61 | .next 62 | .idea/ 63 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | This is free and unencumbered software released into the public domain. 2 | 3 | Anyone is free to copy, modify, publish, use, compile, sell, or 4 | distribute this software, either in source code form or as a compiled 5 | binary, for any purpose, commercial or non-commercial, and by any 6 | means. 7 | 8 | In jurisdictions that recognize copyright laws, the author or authors 9 | of this software dedicate any and all copyright interest in the 10 | software to the public domain. We make this dedication for the benefit 11 | of the public at large and to the detriment of our heirs and 12 | successors. We intend this dedication to be an overt act of 13 | relinquishment in perpetuity of all present and future rights to this 14 | software under copyright law. 15 | 16 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 17 | EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 18 | MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. 19 | IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 | OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 | ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 | OTHER DEALINGS IN THE SOFTWARE. 23 | 24 | For more information, please refer to 25 | -------------------------------------------------------------------------------- /sqlite-event-store.ddl: -------------------------------------------------------------------------------- 1 | -- SQLITE event store 2 | 3 | CREATE TABLE ledger 4 | ( 5 | entity TEXT NOT NULL, 6 | entity_key TEXT NOT NULL, 7 | event TEXT NOT NULL, 8 | data JSONB NOT NULL, 9 | -- can be anything, like a ULID, nanoid, etc. 10 | append_key TEXT NOT NULL UNIQUE, 11 | -- previous event id 12 | -- uuid; null for first event in entity instance; null does not trigger UNIQUE constraint 13 | previous_id TEXT UNIQUE CHECK (event_id LIKE '________-____-4___-____-____________'), 14 | -- uuid 15 | event_id TEXT NOT NULL UNIQUE CHECK (event_id LIKE '________-____-4___-____-____________'), 16 | timestamp INTEGER NOT NULL, 17 | -- sequence for all events in all entities 18 | sequence INTEGER PRIMARY KEY AUTOINCREMENT 19 | ); 20 | 21 | CREATE INDEX entity_index ON ledger (entity, entity_key); 22 | 23 | 24 | -- immutable ledger 25 | CREATE TRIGGER no_delete_ledger 26 | BEFORE DELETE 27 | ON ledger 28 | BEGIN 29 | SELECT RAISE(FAIL, 'Cannot delete events from the ledger'); 30 | END; 31 | 32 | CREATE TRIGGER no_update_ledger 33 | BEFORE UPDATE 34 | ON ledger 35 | BEGIN 36 | SELECT RAISE(FAIL, 'Cannot update events in the ledger'); 37 | END; 38 | 39 | 40 | CREATE VIEW append_event AS 41 | SELECT 42 | entity, 43 | entity_key, 44 | event, 45 | data, 46 | append_key, 47 | previous_id 48 | FROM ledger; 49 | 50 | 51 | CREATE VIEW replay_events AS 52 | SELECT 53 | entity, 54 | entity_key, 55 | event, 56 | data, 57 | strftime('%Y-%m-%dT%H:%M:%fZ', timestamp / 1000.0, 'unixepoch') AS timestamp, 58 | event_id 59 | FROM ledger ORDER BY sequence; 60 | 61 | 62 | -- From Claude 3.7, thanks! 63 | DROP VIEW IF EXISTS uuid4; 64 | CREATE VIEW uuid4 AS 65 | WITH random_128 AS (SELECT randomblob(16) AS bytes) 66 | SELECT lower(printf('%s-%s-4%s-%s%s-%s', 67 | hex(substr(bytes, 1, 4)), 68 | hex(substr(bytes, 5, 2)), 69 | substr(hex(substr(bytes, 7, 2)), 2, 3), 70 | substr('89ab', 1 + (abs(random()) % 4), 1), 71 | substr(hex(substr(bytes, 10, 2)), 2, 3), 72 | hex(substr(bytes, 11, 6)) 73 | )) 74 | AS next 75 | FROM random_128; 76 | 77 | 78 | CREATE TRIGGER generate_event_id_on_append 79 | INSTEAD OF INSERT 80 | ON append_event 81 | FOR EACH ROW 82 | BEGIN 83 | INSERT INTO ledger (entity, entity_key, event, data, append_key, previous_id, event_id, timestamp) 84 | VALUES (NEW.entity, 85 | NEW.entity_key, 86 | NEW.event, 87 | NEW.data, 88 | NEW.append_key, 89 | NEW.previous_id, 90 | (SELECT next FROM uuid4), 91 | CAST((UNIXEPOCH('subsec') * 1000) AS INTEGER)); 92 | END; 93 | 94 | 95 | -- Can only use null previous_id for first event in an entity 96 | CREATE TRIGGER first_event_for_entity 97 | BEFORE INSERT 98 | ON ledger 99 | FOR EACH ROW 100 | WHEN NEW.previous_id IS NULL 101 | AND EXISTS (SELECT true 102 | FROM ledger 103 | WHERE NEW.entity_key = entity_key 104 | AND NEW.entity = entity) 105 | BEGIN 106 | SELECT RAISE(FAIL, 'previous_id can only be null for first entity event'); 107 | END; 108 | 109 | 110 | -- previous_id must be the newest event for the entity. 111 | CREATE TRIGGER previous_id_is_latest_in_entity 112 | BEFORE INSERT 113 | ON ledger 114 | FOR EACH ROW 115 | WHEN NEW.previous_id IS NOT NULL 116 | AND EXISTS (SELECT true 117 | FROM ledger l1 118 | WHERE NEW.previous_id = l1.event_id 119 | AND l1.sequence < (SELECT MAX(l2.sequence) 120 | FROM ledger l2 121 | WHERE NEW.entity = l2.entity 122 | AND NEW.entity_key = l2.entity_key)) 123 | BEGIN 124 | SELECT RAISE(FAIL, 'previous_id must reference the newest event in entity'); 125 | END; 126 | 127 | 128 | -- previous_id must be in the same entity as the event 129 | CREATE TRIGGER previous_id_in_same_entity 130 | BEFORE INSERT 131 | ON ledger 132 | FOR EACH ROW 133 | WHEN NEW.previous_id IS NOT NULL 134 | AND NOT EXISTS (SELECT true 135 | FROM ledger 136 | WHERE NEW.previous_id = event_id 137 | AND NEW.entity_key = entity_key 138 | AND NEW.entity = entity) 139 | BEGIN 140 | SELECT RAISE(FAIL, 'previous_id must be in same entity'); 141 | END; 142 | -------------------------------------------------------------------------------- /postgres-event-store.ddl: -------------------------------------------------------------------------------- 1 | -- Postgres event store 2 | 3 | CREATE TABLE ledger 4 | ( 5 | entity TEXT NOT NULL, 6 | entity_key TEXT NOT NULL, 7 | event TEXT NOT NULL, 8 | data JSONB NOT NULL, 9 | -- can be anything, like a ULID, nanoid, etc. 10 | append_key TEXT NOT NULL UNIQUE, 11 | -- previous event id 12 | -- null for first event in entity instance; null does not trigger UNIQUE constraint 13 | previous_id UUID UNIQUE, 14 | event_id UUID NOT NULL UNIQUE, 15 | timestamp TIMESTAMPTZ NOT NULL DEFAULT CURRENT_TIMESTAMP, 16 | -- sequence for all events in all entities 17 | sequence BIGSERIAL PRIMARY KEY 18 | ); 19 | 20 | CREATE INDEX entity_index ON ledger (entity, entity_key); 21 | 22 | 23 | -- immutable events 24 | CREATE RULE ignore_delete_events AS ON DELETE TO ledger 25 | DO INSTEAD NOTHING; 26 | 27 | CREATE RULE ignore_update_events AS ON UPDATE TO ledger 28 | DO INSTEAD NOTHING; 29 | 30 | 31 | CREATE FUNCTION append_event(entity_in TEXT, 32 | entity_key_in TEXT, 33 | event_in TEXT, 34 | data_in JSONB, 35 | append_key_in TEXT, 36 | previous_id_in UUID DEFAULT NULL) 37 | RETURNS UUID AS 38 | $$ 39 | INSERT INTO ledger (entity, entity_key, event, data, append_key, previous_id) 40 | VALUES (entity_in, entity_key_in, event_in, data_in, append_key_in, previous_id_in) 41 | RETURNING event_id; 42 | $$ 43 | LANGUAGE sql; 44 | 45 | 46 | 47 | CREATE VIEW replay_events AS 48 | SELECT 49 | entity, 50 | entity_key, 51 | event, 52 | data, 53 | timestamp, 54 | event_id 55 | FROM ledger ORDER BY sequence; 56 | 57 | 58 | CREATE FUNCTION replay_events_after(after_event_id UUID) 59 | RETURNS SETOF replay_events AS 60 | $$ 61 | DECLARE 62 | after_sequence BIGINT; 63 | BEGIN 64 | -- Find the sequence number of the specified event_id 65 | SELECT l.sequence INTO after_sequence 66 | FROM ledger l 67 | WHERE l.event_id = after_event_id; 68 | 69 | -- If event_id doesn't exist, raise an error 70 | IF after_sequence IS NULL THEN 71 | RAISE EXCEPTION 'Event with ID % does not exist', after_event_id; 72 | END IF; 73 | 74 | -- Return all events with a higher sequence number 75 | RETURN QUERY 76 | SELECT 77 | l.entity, 78 | l.entity_key, 79 | l.event, 80 | l.data, 81 | l.timestamp, 82 | l.event_id 83 | FROM ledger l 84 | WHERE l.sequence > after_sequence 85 | ORDER BY l.sequence; 86 | END 87 | $$ 88 | LANGUAGE plpgsql; 89 | 90 | 91 | 92 | -- Generates a UUID for each new event. 93 | CREATE FUNCTION generate_event_id() RETURNS trigger AS 94 | $$ 95 | BEGIN 96 | IF (NEW.event_id IS NOT NULL) 97 | THEN 98 | RAISE EXCEPTION 'event_id must not be directly set with INSERT statement, it is generated'; 99 | END IF; 100 | NEW.event_id = gen_random_uuid(); 101 | RETURN NEW; 102 | END 103 | $$ 104 | LANGUAGE plpgsql; 105 | 106 | 107 | CREATE TRIGGER generate_event_id_on_append 108 | BEFORE INSERT 109 | ON ledger 110 | FOR EACH ROW 111 | EXECUTE PROCEDURE generate_event_id(); 112 | 113 | 114 | 115 | CREATE FUNCTION check_first_event_for_entity() RETURNS trigger AS 116 | $$ 117 | BEGIN 118 | IF EXISTS (SELECT true 119 | FROM ledger 120 | WHERE NEW.entity_key = entity_key 121 | AND NEW.entity = entity) 122 | THEN 123 | RAISE EXCEPTION 'previous_id can only be null for first entity event'; 124 | END IF; 125 | RETURN NEW; 126 | END 127 | $$ 128 | LANGUAGE plpgsql; 129 | 130 | 131 | CREATE TRIGGER append_first_event_for_entity 132 | BEFORE INSERT 133 | ON ledger 134 | FOR EACH ROW 135 | WHEN (NEW.previous_id IS NULL) 136 | EXECUTE PROCEDURE check_first_event_for_entity(); 137 | 138 | 139 | 140 | 141 | -- check previous_id rules 142 | CREATE FUNCTION check_append_with_previous_id() RETURNS trigger AS 143 | $$ 144 | BEGIN 145 | IF (NOT EXISTS (SELECT true 146 | FROM ledger 147 | WHERE NEW.previous_id = event_id 148 | AND NEW.entity_key = entity_key 149 | AND NEW.entity = entity)) 150 | THEN 151 | RAISE EXCEPTION 'previous_id must be in the same entity'; 152 | END IF; 153 | 154 | IF (EXISTS (SELECT true 155 | FROM ledger l1 156 | WHERE NEW.previous_id = l1.event_id 157 | AND l1.sequence < (SELECT MAX(l2.sequence) 158 | FROM ledger l2 159 | WHERE NEW.entity = l2.entity 160 | AND NEW.entity_key = l2.entity_key) 161 | 162 | )) 163 | THEN 164 | RAISE EXCEPTION 'previous_id must reference the newest event in entity'; 165 | END IF; 166 | 167 | RETURN NEW; 168 | END 169 | $$ 170 | LANGUAGE plpgsql; 171 | 172 | 173 | CREATE TRIGGER append_with_previous_id 174 | BEFORE INSERT 175 | ON ledger 176 | FOR EACH ROW 177 | WHEN (NEW.previous_id IS NOT NULL) 178 | EXECUTE FUNCTION check_append_with_previous_id(); 179 | -------------------------------------------------------------------------------- /test-sqlite.js: -------------------------------------------------------------------------------- 1 | import { throws, doesNotThrow, strictEqual } from 'node:assert/strict' 2 | import { before, after, test } from 'node:test' 3 | import fs from 'fs' 4 | import initSqlJs from 'sql.js' 5 | import { nanoid } from 'nanoid' 6 | 7 | 8 | /** 9 | This test suite exercises the database schema with incorrect data, duplicate data and other likely real-world problems. 10 | */ 11 | 12 | const thingEntity = 'thing' 13 | const thingCreatedEvent = 'thing-created' 14 | const thingDeletedEvent = 'thing-deleted' 15 | 16 | const tableTennisEntity = 'table-tennis' 17 | const pingEvent = 'ball-pinged' 18 | const pongEvent = 'ball-ponged' 19 | 20 | const thingKey = '1' 21 | const homeTableKey = 'home' 22 | const workTableKey = 'work' 23 | 24 | 25 | async function initDb() { 26 | const SQL = await initSqlJs() 27 | const db = new SQL.Database() 28 | loadDdl(db) 29 | return db 30 | } 31 | 32 | function loadDdl(db) { 33 | const createScript = fs.readFileSync('./sqlite-event-store.ddl', 'utf8') 34 | db.run(createScript) 35 | } 36 | 37 | function shutdownDb(db) { 38 | const data = db.export() 39 | const buffer = Buffer.from(data) 40 | fs.writeFileSync('sqlite-store.db', buffer) 41 | db.close() 42 | } 43 | 44 | test('SQLite', async (ctx) => { 45 | 46 | let db 47 | before(async () => db = await initDb()) 48 | 49 | after(() => shutdownDb(db)) 50 | 51 | await ctx.test('insert events', async (t) => { 52 | const stmt = db.prepare('INSERT INTO append_event (entity, entity_key, event, data, append_key, previous_id) VALUES (?, ?, ?, ?, ?, ?)') 53 | 54 | const appendKey1 = nanoid() 55 | const appendKey2 = nanoid() 56 | let thingEventId1 = nanoid() 57 | let thingEventId2 = nanoid() 58 | 59 | let pingEventHomeId = nanoid() 60 | let pingEventWorkId = nanoid() 61 | 62 | 63 | const data = '{}' 64 | 65 | await t.test('cannot insert empty columns', () => { 66 | throws( 67 | () => stmt.run([null, thingKey, thingCreatedEvent, data, appendKey1]), 68 | /NOT NULL constraint failed: ledger\.entity/, 69 | 'cannot insert null entity') 70 | throws( 71 | () => stmt.run([thingEntity, null, thingCreatedEvent, data, appendKey1]), 72 | /NOT NULL constraint failed: ledger\.entity_key/, 73 | 'cannot insert null entity key') 74 | throws( 75 | () => stmt.run([thingEntity, thingKey, null, data, appendKey1]), 76 | /NOT NULL constraint failed: ledger\.event/, 77 | 'cannot insert null event') 78 | throws( 79 | () => stmt.run([thingEntity, thingKey, thingCreatedEvent, null, appendKey1]), 80 | /NOT NULL constraint failed: ledger\.data/, 81 | 'cannot insert null event data') 82 | 83 | throws( 84 | () => stmt.run([thingEntity, thingKey, thingCreatedEvent, data, null]), 85 | /NOT NULL constraint failed: ledger\.append_key/, 86 | 'cannot insert null append key') 87 | }) 88 | 89 | // Cannot use RETURNING to get the event_id, as sqlite cannot access the generated event_id during INSERT. 90 | // Need to SELECT it like this. 91 | const appendStmt = db.prepare(` 92 | INSERT INTO append_event (entity, entity_key, event, data, append_key, previous_id) 93 | VALUES ($1, $2, $3, $4, $5, $6) 94 | RETURNING (SELECT event_id FROM ledger WHERE append_key = $5) as event_id`) 95 | 96 | await t.test('insert events for an entity', () => { 97 | doesNotThrow(() => [thingEventId1] = appendStmt.get([thingEntity, thingKey, thingCreatedEvent, data, appendKey1, null])) 98 | doesNotThrow(() => [thingEventId2] = appendStmt.get([thingEntity, thingKey, thingDeletedEvent, data, appendKey2, thingEventId1])) 99 | doesNotThrow(() => [pingEventHomeId] = appendStmt.get([tableTennisEntity, homeTableKey, pingEvent, data, nanoid(), null])) 100 | doesNotThrow(() => [pingEventWorkId] = appendStmt.get([tableTennisEntity, workTableKey, pingEvent, data, nanoid(), null])) 101 | }) 102 | 103 | await t.test('previous_id rules', () => { 104 | throws( 105 | () => stmt.run([tableTennisEntity, homeTableKey, pingEvent, data, nanoid(), null]), 106 | /previous_id can only be null for first entity event/, 107 | 'cannot insert multiple null previous ID for an entity') 108 | throws( 109 | () => stmt.run([tableTennisEntity, workTableKey, pongEvent, data, nanoid(), pingEventHomeId]), 110 | /previous_id must be in same entity/, 111 | 'previous ID must be in same entity') 112 | throws( 113 | () => stmt.run([thingEntity, thingKey, thingCreatedEvent, data, nanoid(), thingEventId1]), 114 | /previous_id must reference the newest event in entity/, 115 | 'previous ID must be newest event in entity') 116 | }) 117 | 118 | await t.test('Cannot insert duplicates', () => { 119 | throws( 120 | () => stmt.run([thingEntity, thingKey, thingDeletedEvent, data, appendKey1, thingEventId2]), 121 | /UNIQUE constraint failed: ledger\.append_key/, 122 | 'cannot insert different event for same append key') 123 | throws( 124 | () => stmt.run([thingEntity, thingKey, thingDeletedEvent, data, nanoid(), thingEventId1]), 125 | /previous_id must reference the newest event in entity/, 126 | 'cannot insert different event for same previous ID') 127 | }) 128 | }) 129 | 130 | await ctx.test('cannot delete or update', async (t) => { 131 | await t.test('cannot delete or update events', () => { 132 | throws( 133 | () => db.exec(`DELETE FROM ledger WHERE entity = '${thingEntity}'`), 134 | /Cannot delete events from the ledger/, 135 | 'cannot delete events' 136 | ) 137 | throws( 138 | () => db.exec(`UPDATE ledger SET entity_key = 'fail' WHERE entity = '${thingEntity}'`), 139 | /Cannot update events in the ledger/, 140 | 'cannot update events' 141 | ) 142 | }) 143 | }) 144 | 145 | 146 | await ctx.test('replay events', async (t) => { 147 | await t.test('replay entity events', async () => { 148 | const [{values}] = await db.exec(`SELECT * FROM replay_events WHERE entity = '${thingEntity}'`) 149 | strictEqual(values.length, 2, 'should have two events') 150 | }) 151 | 152 | await t.test('replay single entity event', async () => { 153 | const [{values}] = await db.exec(`SELECT * FROM replay_events WHERE entity = '${tableTennisEntity}' AND entity_key = '${homeTableKey}'`) 154 | strictEqual(values.length, 1, 'should have one event') 155 | }) 156 | }) 157 | }) 158 | -------------------------------------------------------------------------------- /test-postgres.js: -------------------------------------------------------------------------------- 1 | import { rejects, doesNotReject, strictEqual } from 'node:assert/strict' 2 | import { before, after, test } from 'node:test' 3 | import fs from 'fs' 4 | import { nanoid } from 'nanoid' 5 | import { PGlite } from '@electric-sql/pglite' 6 | import { query } from '@electric-sql/pglite/template' 7 | 8 | 9 | /** 10 | This test suite exercises the database schema with incorrect data, duplicate data and other likely real-world problems. 11 | */ 12 | 13 | const thingEntity = 'thing' 14 | const thingCreatedEvent = 'thing-created' 15 | const thingDeletedEvent = 'thing-deleted' 16 | 17 | const tableTennisEntity = 'table-tennis' 18 | const pingEvent = 'ball-pinged' 19 | const pongEvent = 'ball-ponged' 20 | 21 | const thingKey = '1' 22 | const homeTableKey = 'home' 23 | const workTableKey = 'work' 24 | 25 | let thingEventId1 26 | let thingEventId2 27 | 28 | let pingEventHomeId 29 | let pingEventWorkId 30 | 31 | 32 | async function initDb() { 33 | const db = await PGlite.create('memory://') 34 | await loadDdl(db) 35 | return db 36 | } 37 | 38 | async function loadDdl(db) { 39 | const createScript = fs.readFileSync('./postgres-event-store.ddl', 'utf8') 40 | return db.exec(createScript) 41 | } 42 | 43 | async function shutdownDb(db) { 44 | const columns = await db.query("SELECT column_name FROM information_schema.columns WHERE table_name = 'ledger'") 45 | const ret = await db.query("COPY ledger TO '/dev/blob';") 46 | const data = await ret.blob.text() 47 | const body = columns.rows 48 | .map(r => r.column_name) 49 | .join('\t') 50 | + '\n' 51 | + data.replaceAll('\\N', 'null') 52 | fs.writeFileSync('postgres-store.tsv', body) 53 | return db.close() 54 | } 55 | 56 | test('Postgres', async (ctx) => { 57 | 58 | let db 59 | before(async () => db = await initDb()) 60 | 61 | after(() => shutdownDb(db)) 62 | 63 | await ctx.test('insert events', async (t) => { 64 | const stmt = query`INSERT INTO ledger (entity, entity_key, event, data, append_key, previous_id) 65 | VALUES (${'entity'}, ${'entityKey'}, ${'event'}, ${'data'}, ${'appendKey'}, ${'previousId'})` 66 | 67 | const appendKey1 = nanoid() 68 | const appendKey2 = nanoid() 69 | const data = {} 70 | 71 | 72 | await t.test('cannot insert empty columns', async () => { 73 | await rejects( 74 | () => db.query(stmt.query, [null, thingKey, thingCreatedEvent, data, appendKey1, null]), 75 | /error: null value in column "entity" of relation "ledger" violates not-null constraint/, 76 | 'cannot insert null entity') 77 | await rejects( 78 | () => db.query(stmt.query, [thingEntity, null, thingCreatedEvent, data, appendKey1, null]), 79 | /error: null value in column "entity_key" of relation "ledger" violates not-null constraint/, 80 | 'cannot insert null entity key') 81 | await rejects( 82 | () => db.query(stmt.query, [thingEntity, thingKey, null, data, appendKey1, null]), 83 | /error: null value in column "event" of relation "ledger" violates not-null constraint/, 84 | 'cannot insert null event') 85 | await rejects( 86 | () => db.query(stmt.query, [thingEntity, thingKey, thingCreatedEvent, null, appendKey1, null]), 87 | /error: null value in column "data" of relation "ledger" violates not-null constraint/, 88 | 'cannot insert null event data') 89 | await rejects( 90 | () => db.query(stmt.query, [thingEntity, thingKey, thingCreatedEvent, data, null, null]), 91 | /error: null value in column "append_key" of relation "ledger" violates not-null constraint/, 92 | 'cannot insert null append_key') 93 | }) 94 | 95 | await t.test('cannot insert event_id', async () => { 96 | await rejects( 97 | () => db.sql`INSERT INTO ledger (entity, entity_key, event, data, append_key, event_id) 98 | VALUES (${thingEntity}, ${thingKey}, ${thingCreatedEvent}, ${data}, ${appendKey1}, '00000000-0000-4000-8000-000000000000')`, 99 | /error: event_id must not be directly set with INSERT statement, it is generated/, 100 | 'cannot insert event_id') 101 | }) 102 | 103 | await t.test('UUIDs format for IDs', async () => { 104 | await rejects( 105 | () => db.query(stmt.query, [thingEntity, thingKey, thingCreatedEvent, data, appendKey1, 'not-a-uuid']), 106 | /error: invalid input syntax for type uuid: "not-a-uuid"/, 107 | 'previous_id must be a UUID') 108 | }) 109 | 110 | const appendStmt = query`SELECT append_event (${'entity'}, ${'entity_key'}, ${'event'}, ${'data'}, ${'append_key'}, ${'previous_id'}) AS event_id` 111 | 112 | await t.test('insert events for an entity', async () => { 113 | await doesNotReject(async () => { 114 | const {rows:[{event_id}]} = await db.query(appendStmt.query, [thingEntity, thingKey, thingCreatedEvent, data, appendKey1, null]) 115 | thingEventId1 = event_id 116 | }) 117 | await doesNotReject(async () => { 118 | const {rows:[{event_id}]} = await db.query(appendStmt.query, [thingEntity, thingKey, thingDeletedEvent, data, appendKey2, thingEventId1]) 119 | thingEventId2 = event_id 120 | }) 121 | await doesNotReject(async () => { 122 | const {rows:[{event_id}]} = await db.query(appendStmt.query, [tableTennisEntity, homeTableKey, pingEvent, data, nanoid(), null]) 123 | pingEventHomeId = event_id 124 | }) 125 | await doesNotReject(async () => { 126 | const {rows:[{event_id}]} = await db.query(appendStmt.query, [tableTennisEntity, workTableKey, pingEvent, data, nanoid(), null]) 127 | pingEventWorkId = event_id 128 | }) 129 | }) 130 | 131 | await t.test('previous_id rules', async () => { 132 | await rejects( 133 | () => db.query(appendStmt.query, [tableTennisEntity, homeTableKey, pingEvent, data, nanoid(), null]), 134 | /error: previous_id can only be null for first entity event/, 135 | 'cannot insert multiple null previous_id for an entity') 136 | await rejects( 137 | () => db.query(appendStmt.query, [tableTennisEntity, workTableKey, pongEvent, data, nanoid(), pingEventHomeId]), 138 | /error: previous_id must be in the same entity/, 139 | 'previous_id must be in same entity') 140 | await rejects( 141 | () => db.query(appendStmt.query, [thingEntity, thingKey, thingCreatedEvent, data, nanoid(), thingEventId1]), 142 | /error: previous_id must reference the newest event in entity/, 143 | 'previous ID must be newest event in entity') 144 | }) 145 | 146 | await t.test('Cannot insert duplicates', async () => { 147 | await rejects( 148 | () => db.query(appendStmt.query, [thingEntity, thingKey, thingDeletedEvent, data, appendKey1, thingEventId2]), 149 | /error: duplicate key value violates unique constraint "ledger_append_key_key"/, 150 | 'cannot insert different event for same append_key') 151 | await rejects( 152 | () => db.query(appendStmt.query, [thingEntity, thingKey, thingDeletedEvent, data, nanoid(), thingEventId1]), 153 | /error: previous_id must reference the newest event in entity/, 154 | 'cannot insert different event for same previous') 155 | }) 156 | }) 157 | 158 | await ctx.test('cannot delete or update', async (t) => { 159 | 160 | await t.test('cannot delete or update events', async () => { 161 | await doesNotReject( 162 | () => db.query(`DELETE FROM ledger WHERE entity = '${thingEntity}'`), 163 | /Cannot delete events/, 164 | 'cannot delete events' 165 | ) 166 | await doesNotReject( 167 | () => db.query(`UPDATE ledger SET entity_key = 'fail' WHERE entity = '${thingEntity}'`), 168 | /Cannot update events/, 169 | 'cannot update events' 170 | ) 171 | }) 172 | }) 173 | 174 | 175 | await ctx.test('replay events', async (t) => { 176 | await t.test('replay entity events', async () => { 177 | const {rows} = await db.query(`SELECT * FROM replay_events WHERE entity = '${thingEntity}'`) 178 | strictEqual(rows.length, 2, 'should have two events') 179 | }) 180 | 181 | await t.test('replay events for a specific entity', async () => { 182 | const {rows} = await db.query(`SELECT * FROM replay_events WHERE entity = '${tableTennisEntity}' AND entity_key = '${homeTableKey}'`) 183 | strictEqual(rows.length, 1, 'should have one event') 184 | }) 185 | 186 | await t.test('replay events after a specific event', async () => { 187 | const {rows} = await db.query(`SELECT * FROM replay_events_after('${thingEventId1}')`) 188 | strictEqual(rows.length, 3, 'should have three events') 189 | }) 190 | 191 | await t.test('replay events after a specific event, filtered by entity', async () => { 192 | const {rows} = await db.query(`SELECT * FROM replay_events_after('${thingEventId1}') WHERE entity = '${thingEntity}'`) 193 | strictEqual(rows.length, 1, 'should have one event') 194 | }) 195 | }) 196 | }) 197 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # SQL Event Store 2 | Demonstration of a SQL event store with deduplication and guaranteed event ordering. The database rules are intended to prevent incorrect information from entering into an event stream. You are assumed to have familiarity with [event sourcing](https://martinfowler.com/eaaDev/EventSourcing.html). Two DDLs are provided – one for [Postgres](https://www.postgresql.org), one for [SQLite](https://sqlite.org). 3 | 4 | This project uses a node test suite to ensure the DDLs comply with the design requirements. The DDLs can also be ported to most SQL RDBMS and accessed from any number of writers, including high-load serverless functions, without a coordinating “single writer” process. 5 | 6 | ### Installing 7 | 8 | Good news! Nothing to install! Instead, take the DDLs in this project ([Postgres](./postgres-event-store.ddl), [SQLite](./sqlite-event-store.ddl)) and include them in your application’s database definition set. 9 | 10 | ## Usage Model 11 | 12 | Both SQLite and Postgres versions have similar SQL usage models but with one main difference. Postgres provides functions, whereas SQLite only has views. The concepts and naming are similar between the two databases, but slightly different in their use and capabilities. 13 | 14 | ### Appending Events 15 | 16 | In order to manage the business rules of an event-sourced application, one must append events to the ledger. 17 | 18 | #### SQLite 19 | 20 | Append new events by inserting into the `append_event` view. Here is an example: 21 | 22 | ```sql 23 | -- Add an event. Note the RETURNING clause, which returns the generated event_id for the appended event. This is used to append the next event. 24 | INSERT INTO append_event (entity, entity_key, event, data, append_key) -- first event in entity, omit previous_id 25 | VALUES ('game', 'apr-7-2025', 'game started','true', 'an-append-key') 26 | RETURNING (SELECT event_id FROM ledger WHERE append_key = 'an-append-key'); 27 | 28 | -- now insert another event, using the first event's id as the previous_id value 29 | INSERT INTO append_event (entity, entity_key, event, data, append_key, previous_id) 30 | VALUES ('game', 'apr-7-2025', 'game going','true', 'another-append-key', '019612a6-38ac-7108-85fd-33e8081cedaf') 31 | RETURNING (SELECT event_id FROM ledger WHERE append_key = 'another-append-key'); 32 | ``` 33 | 34 | #### Postgres 35 | 36 | Append new events by calling the `append_event` function. Here is an example: 37 | 38 | ```sql 39 | -- Add an event. This function returns the generated event_id for the appended event. 40 | SELECT append_event ('game', 'apr-7-2025', 'game started','true', 'an-append-key', null); 41 | 42 | -- now insert another event, using the first event's id as the previous_id value 43 | SELECT append_event ('game', 'apr-7-2025', 'game going','true', 'another-append-key', '019612a6-38ac-7108-85fd-33e8081cedaf'); 44 | ``` 45 | 46 | ### Replaying Events 47 | 48 | One can replay events in order, without unhelpful data, by using the `replay_events` view. 49 | 50 | #### SQLite / Postgres 51 | 52 | ```sql 53 | -- Replay all the events 54 | SELECT * FROM replay_events; 55 | 56 | -- Replay events from a specific entity 57 | SELECT * FROM replay_events 58 | WHERE entity = 'game' 59 | AND entity_key = '2022 Classic'; 60 | 61 | -- Replay only certain events 62 | SELECT * FROM replay_events 63 | WHERE entity = 'game' 64 | AND entity_key = '2022 Classic' 65 | AND event IN ('game-started', 'game-finished'); 66 | 67 | -- BEWARE the last event_id in this result set may not be the last event for the entity instance, so it 68 | -- cannot be used to append an event. To find the last event for an entity, use this query: 69 | 70 | SELECT event_id FROM ledger 71 | WHERE entity = 'game' 72 | AND entity_key = '2022 Classic' 73 | ORDER BY sequence DESC LIMIT 1; 74 | ``` 75 | 76 | ### Catching Up With New Events 77 | 78 | Your application may want to “catch up” from a previously read event and avoid replaying already-seen events. SQLite and Postgres have different mechanisms to do so. 79 | 80 | #### Catching Up With SQLite 81 | 82 | ```sql 83 | -- Catch up with events after a known event 84 | SELECT * FROM replay_events 85 | WHERE entity = 'game' 86 | AND entity_key = '2022 Classic' 87 | AND sequence > (SELECT sequence 88 | FROM ledger 89 | WHERE event_id = '123e4567-e89b-12d3-a456-426614174000'); 90 | ``` 91 | 92 | The last `WHERE event_id` portion will contain the most recent event processed by your application, and the point in the events where you want to continue from. 93 | 94 | #### Catching Up With Postgres 95 | 96 | Replaying events to catch up after a previous event is a bit easier with Postgres since it has stored functions. The function `replay_events_after` accepts the event ID of the most recent event processed by your application. It returns the same fields as the `replay_events` view described above. 97 | 98 | ```sql 99 | -- Catch up on new events from a specific entity, after a specific event 100 | -- Postgres-only 101 | SELECT * FROM replay_events_after('123e4567-e89b-12d3-a456-426614174000') 102 | WHERE entity = 'game' 103 | AND entity_key = '2022 Classic'; 104 | ``` 105 | 106 | Notice how your application can add WHERE clauses in the replay query to filter for relevant events. 107 | 108 | ### Conceptual Model 109 | 110 | An Event is an unalterable statement of fact that has occurred in the past. It has a name, like `food-eaten`, and it is scoped to an [Entity](https://en.wikiquote.org/wiki/Entity), or an identifiable existence in the world. Entities are individually identified by business-relevant keys that uniquely identify one entity from another. 111 | 112 | In this event store, an event cannot exist without an entity to apply it to. Events can create new entities, and in that first event, the entity key is presented as the identifying key for the newly created entity. 113 | 114 | Events follow other events in a sequence. Within an entity instance, each event has a reference to the previous event, much like a backward-linked list. This expression of previous event ID enables SQL Event Store to guarantee that events are written sequentially, without losing any concurrent appends of other events in for the same entity. 115 | 116 | Appends to other entities do not affect each other, so many events can be appended to many events concurrently without suffering serialization penalties that “single writer” systems can cause. 117 | 118 | ### Design 119 | 120 | - **Append-Only** Once events are created, they cannot be deleted, updated or otherwise modified. This includes entity event definitions. 121 | - **Insertion-Ordered** Events must be consistently replayable in the order they were inserted. 122 | - **Event race conditions are Impossible** The event store prevents a client from writing an event to an entity if another event has been inserted after the client has replayed an event stream. 123 | 124 | ### Client Use Cases 125 | 126 | Event store clients can use basic SQL statements to add and replay events. Clients follow the typical event sourcing pattern: 127 | 128 | 1. Receive a command 129 | 2. [Replay events](#replay-events) to compute current state 130 | 3. Validate entity state for command 131 | 4. [Append](#append-events) a new event 132 | 133 | #### Replay Events 134 | 135 | Clients must always fetch, or replay, the events for an entity before inserting a new event. Replaying events assures the client that the state of the entity is known so that business rules can be applied for the command before an event is appended. For instance, if a command creates a new entity, the replay step will ensure no events have been appended to the entity's key. 136 | 137 | ```sql 138 | SELECT event, 139 | data, 140 | event_id 141 | FROM replay_events 142 | WHERE entity = ? 143 | AND entity_key = ?; 144 | ``` 145 | 146 | If a command produces a subsequent event for an existing entity, the `event_id` of the last event must be used as the `previous_id` of the next event. This design enforces the Event Sourcing pattern of building current read models before appending new events for an entity. 147 | 148 | #### Append Events 149 | 150 | Two types of events can be appended into the event log. The first event for an entity and subsequent events. The distinction is important for the database rules to control for incorrect events, like incorrect entity key or invalid previous event id. 151 | 152 | ##### First Event for an Entity 153 | 154 | In this case, the `previous_id` does not exist, so it is omitted from the insert statement. 155 | 156 | ```sql 157 | -- SQLite version, see above for Postgres 158 | INSERT INTO append_event(entity, 159 | entity_key, 160 | event, 161 | data, 162 | append_key) 163 | VALUES (?, ?, ?, ?, ?); 164 | ``` 165 | 166 | ##### Subsequent Events 167 | 168 | The `previous_id` is the `event_id` of the last event recorded for the specific entity. 169 | 170 | ```sql 171 | -- SQLite version, see above for Postgres 172 | INSERT INTO append_event(entity, 173 | entity_key, 174 | event, 175 | data, 176 | append_key, 177 | previous_id) 178 | VALUES (?, ?, ?, ?, ?, ?); 179 | ``` 180 | 181 | If another event in this entity has been appended using this previous_id, the database will reject the insert and require your application to replay newer events to verify the entity state. Also, if the entity instance has newer events than previous_id, the append will be rejected. [Catch up](#catching-up-with-new-events) with the newest events and run the append again if appropriate. 182 | 183 | ### SQL Table Structure 184 | 185 | #### `ledger` Table 186 | 187 | | Column | Notes | 188 | | ------------- | ------------------------------------------------------------ | 189 | | `entity` | The entity name. | 190 | | `entity_key` | The business identifier for the entity. | 191 | | `event` | The event name. | 192 | | `data` | The event data. Cannot be `null` but can be an empty string. | 193 | | `append_key` | The append key from the client. Database rules ensure an append key can only be used once. Can be a Command ID, or another client-generated unique key for the event append action. Useful for idempotent appends. | 194 | | `previous_id` | The event ID of the immediately-previous event for this entity. If this is the first event for an entity, then it’s value is `NULL`. | 195 | | `event_id` | The event ID. This value is used by the next event append as it's `previous_id` value to guard against a Lost Event problem. It can also be used to select subsequent events during replay. **AUTOPOPULATES—DO NOT INSERT.** | 196 | | `timestamp` | The timestamp the event was inserted into the ledger. **AUTOPOPULATES—DO NOT INSERT.** | 197 | | `sequence` | Overall ledger position for an event. **AUTOPOPULATES—DO NOT INSERT.** | 198 | 199 | The `ledger` table is designed to allow multiple concurrent, uncoordinated writers to safely create events. It expects the client to know the difference between an entity's first event and subsequent events. 200 | 201 | Multiple constraints are applied to this table to ensure bad events do not make their way into the system. This includes duplicated events and append keys, and ensured sequential events. 202 | 203 | ## Running Tests 204 | 205 | Running tests is not necessary but interesting to validate the correctness of the DDLs. One must have [Node](https://nodejs.org) installed (Node 22 is what I used) and then: 206 | 207 | ```bash 208 | > npm install 209 | ``` 210 | 211 | Once it has finished installing the dependencies, run the test for the database you are interested in. 212 | 213 | ### SQLite Event Store 214 | 215 | The [SQLite version](./sqlite-event-store.ddl) of SQL event store was built and tested with SQLite 3.49; it should run on recent versions of SQLite, at least since 2023. 216 | 217 | ```bash 218 | > node --test test-sqlite.js 219 | ``` 220 | 221 | The SQLite test uses [sql.js](https://github.com/kripken/sql.js), the WASM build of SQLite for reliable compilation and test execution. The test will dump the test database to `sqlite-store.db` for your examination. 222 | 223 | ### Postgres Event Store 224 | 225 | The [Postgres version](./postgres-event-store.ddl) of SQL event store has the same behavior as the SQLite version. It was built and tested on Postgres 17 but can be used in other versions. 226 | 227 | The Postgres version can be tested with the [test-postgres.js]() script. Run this file instead of `test-sqlite.js`. It does not need a running Postgres server. Instead, it uses [pglite](https://pglite.dev), a WASM compilation of Postgres 17. 228 | 229 | ```bash 230 | > node --test test-postgres.js 231 | ``` 232 | 233 | The script will dump the test ledger table to `postgres-store.tsv` for your inspection. 234 | --------------------------------------------------------------------------------