├── .rspec ├── spec ├── support │ ├── logging.rb │ ├── warnings.rb │ ├── test_events.rb │ ├── event_helpers.rb │ └── db_helpers.rb ├── event_sourcery │ ├── postgres_spec.rb │ └── postgres │ │ ├── config_spec.rb │ │ ├── optimised_event_poll_waiter_spec.rb │ │ ├── event_store_spec.rb │ │ ├── tracker_spec.rb │ │ ├── table_owner_spec.rb │ │ ├── projector_spec.rb │ │ └── reactor_spec.rb └── spec_helper.rb ├── lib └── event_sourcery │ ├── postgres │ ├── version.rb │ ├── queue_with_interval_callback.rb │ ├── projector.rb │ ├── config.rb │ ├── optimised_event_poll_waiter.rb │ ├── table_owner.rb │ ├── reactor.rb │ ├── tracker.rb │ ├── event_store.rb │ └── schema.rb │ └── postgres.rb ├── Rakefile ├── Gemfile ├── .gitignore ├── bin ├── setup └── console ├── .github └── workflows │ └── test.yml ├── LICENSE.txt ├── script ├── bench_writing_events.rb ├── bench_reading_events.rb └── demonstrate_event_sequence_id_gaps.rb ├── event_sourcery-postgres.gemspec ├── README.md ├── CODE_OF_CONDUCT.md └── CHANGELOG.md /.rspec: -------------------------------------------------------------------------------- 1 | --require spec_helper 2 | -------------------------------------------------------------------------------- /spec/support/logging.rb: -------------------------------------------------------------------------------- 1 | # quiet logging 2 | EventSourcery.config.logger.level = Logger::FATAL 3 | -------------------------------------------------------------------------------- /lib/event_sourcery/postgres/version.rb: -------------------------------------------------------------------------------- 1 | module EventSourcery 2 | module Postgres 3 | VERSION = '0.9.1'.freeze 4 | end 5 | end 6 | -------------------------------------------------------------------------------- /Rakefile: -------------------------------------------------------------------------------- 1 | require 'bundler/gem_tasks' 2 | require 'rspec/core/rake_task' 3 | 4 | RSpec::Core::RakeTask.new(:spec) 5 | 6 | task :default => :spec 7 | -------------------------------------------------------------------------------- /spec/support/warnings.rb: -------------------------------------------------------------------------------- 1 | # frozen_string_literal: true 2 | 3 | if Gem::Version.new(RUBY_VERSION) >= Gem::Version.new('2.7.2') 4 | Warning[:deprecated] = true 5 | end 6 | -------------------------------------------------------------------------------- /spec/support/test_events.rb: -------------------------------------------------------------------------------- 1 | ItemAdded = Class.new(EventSourcery::Event) 2 | ItemRemoved = Class.new(EventSourcery::Event) 3 | TermsAccepted = Class.new(EventSourcery::Event) 4 | -------------------------------------------------------------------------------- /Gemfile: -------------------------------------------------------------------------------- 1 | source 'https://rubygems.org' 2 | 3 | ruby '>= 2.2.0' 4 | 5 | gemspec 6 | 7 | gem 'event_sourcery', git: 'https://github.com/envato/event_sourcery.git', branch: 'main' 8 | -------------------------------------------------------------------------------- /spec/event_sourcery/postgres_spec.rb: -------------------------------------------------------------------------------- 1 | RSpec.describe EventSourcery::Postgres do 2 | it 'has a version number' do 3 | expect(EventSourcery::Postgres::VERSION).not_to be nil 4 | end 5 | end 6 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /.bundle/ 2 | /.ruby-version 3 | /.yardoc 4 | /Gemfile.lock 5 | /_yardoc/ 6 | /coverage/ 7 | /doc/ 8 | /pkg/ 9 | /spec/reports/ 10 | /tmp/ 11 | /vendor/ 12 | 13 | # rspec failure tracking 14 | .rspec_status 15 | -------------------------------------------------------------------------------- /bin/setup: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -euo pipefail 3 | IFS=$'\n\t' 4 | set -vx 5 | 6 | bundle install 7 | 8 | echo 9 | echo "--- Preparing test databases" 10 | echo 11 | 12 | dropdb event_sourcery_test || echo 0 13 | createdb event_sourcery_test 14 | 15 | # Do any other automated setup that you need to do here 16 | -------------------------------------------------------------------------------- /bin/console: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env ruby 2 | 3 | require "bundler/setup" 4 | require "event_sourcery/postgres" 5 | 6 | # You can add fixtures and/or initialization code here to make experimenting 7 | # with your gem easier. You can also use a different console, if you like. 8 | 9 | # (If you use this, don't forget to add pry to your Gemfile!) 10 | # require "pry" 11 | # Pry.start 12 | 13 | require "irb" 14 | IRB.start(__FILE__) 15 | -------------------------------------------------------------------------------- /spec/support/event_helpers.rb: -------------------------------------------------------------------------------- 1 | module EventHelpers 2 | def new_event(aggregate_id: SecureRandom.uuid, type: 'test_event', body: {}, id: nil, version: 1, created_at: nil, uuid: SecureRandom.uuid) 3 | EventSourcery::Event.new(id: id, 4 | aggregate_id: aggregate_id, 5 | type: type, 6 | body: body, 7 | version: version, 8 | created_at: created_at, 9 | uuid: uuid) 10 | end 11 | end 12 | 13 | RSpec.configure do |config| 14 | config.include EventHelpers 15 | end 16 | -------------------------------------------------------------------------------- /lib/event_sourcery/postgres.rb: -------------------------------------------------------------------------------- 1 | require 'sequel' 2 | 3 | Sequel.default_timezone = :utc 4 | 5 | require 'event_sourcery' 6 | require 'event_sourcery/postgres/version' 7 | require 'event_sourcery/postgres/config' 8 | require 'event_sourcery/postgres/queue_with_interval_callback' 9 | require 'event_sourcery/postgres/schema' 10 | require 'event_sourcery/postgres/optimised_event_poll_waiter' 11 | require 'event_sourcery/postgres/event_store' 12 | require 'event_sourcery/postgres/table_owner' 13 | require 'event_sourcery/postgres/projector' 14 | require 'event_sourcery/postgres/reactor' 15 | require 'event_sourcery/postgres/tracker' 16 | 17 | module EventSourcery 18 | module Postgres 19 | def self.configure 20 | yield config 21 | end 22 | 23 | def self.config 24 | @config ||= Config.new 25 | end 26 | end 27 | end 28 | -------------------------------------------------------------------------------- /.github/workflows/test.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: tests 3 | on: [ push, pull_request ] 4 | jobs: 5 | test: 6 | name: Test (Ruby ${{ matrix.ruby }}) 7 | runs-on: ubuntu-latest 8 | strategy: 9 | matrix: 10 | ruby: [ '2.6', '2.7', '3.0', '3.1', '3.2', '3.3', '3.4' ] 11 | steps: 12 | - uses: actions/checkout@v4 13 | - name: Set up Ruby ${{ matrix.ruby }} 14 | uses: ruby/setup-ruby@v1 15 | with: 16 | ruby-version: ${{ matrix.ruby }} 17 | bundler-cache: true 18 | - name: RSpec 19 | run: bundle exec rspec 20 | env: 21 | POSTGRESQL_URL: postgres://postgres:secretdb@localhost:25432/ 22 | services: 23 | postgres: 24 | image: postgres 25 | env: 26 | POSTGRES_DB: event_sourcery_test 27 | POSTGRES_PASSWORD: secretdb 28 | ports: 29 | - 25432:5432 30 | 31 | -------------------------------------------------------------------------------- /lib/event_sourcery/postgres/queue_with_interval_callback.rb: -------------------------------------------------------------------------------- 1 | module EventSourcery 2 | module Postgres 3 | class QueueWithIntervalCallback < ::Queue 4 | attr_accessor :callback 5 | 6 | def initialize(callback: proc {}, callback_interval: EventSourcery::Postgres.config.callback_interval_if_no_new_events, poll_interval: 0.1) 7 | @callback = callback 8 | @callback_interval = callback_interval 9 | @poll_interval = poll_interval 10 | super() 11 | end 12 | 13 | def pop(non_block_without_callback = false) 14 | return super if non_block_without_callback 15 | pop_with_interval_callback 16 | end 17 | 18 | private 19 | 20 | def pop_with_interval_callback 21 | time = Time.now 22 | loop do 23 | return pop(true) unless empty? 24 | if @callback_interval && Time.now > time + @callback_interval 25 | @callback.call 26 | time = Time.now 27 | end 28 | sleep @poll_interval 29 | end 30 | end 31 | end 32 | end 33 | end 34 | -------------------------------------------------------------------------------- /LICENSE.txt: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2017 Envato 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in 13 | all copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 21 | THE SOFTWARE. 22 | -------------------------------------------------------------------------------- /spec/support/db_helpers.rb: -------------------------------------------------------------------------------- 1 | module DBHelpers 2 | extend self 3 | 4 | def db_connection 5 | $db_connection ||= new_db_connection 6 | end 7 | 8 | module_function def new_db_connection 9 | Sequel.connect("#{postgres_url}event_sourcery_test").extension(:pg_json) 10 | end 11 | 12 | module_function def postgres_url 13 | ENV.fetch('POSTGRESQL_URL', 'postgres://127.0.0.1:5432/') 14 | end 15 | 16 | def reset_database 17 | db_connection.execute('truncate table aggregates') 18 | %w(events events_without_optimistic_locking).each do |_| 19 | db_connection.execute('truncate table events') 20 | db_connection.execute('alter sequence events_id_seq restart with 1') 21 | end 22 | end 23 | 24 | def recreate_database 25 | db_connection.execute('drop table if exists events') 26 | db_connection.execute('drop table if exists aggregates') 27 | db_connection.execute('drop table if exists projector_tracker') 28 | EventSourcery::Postgres::Schema.create_event_store(db: db_connection) 29 | EventSourcery::Postgres::Schema.create_projector_tracker(db: db_connection) 30 | end 31 | 32 | def release_advisory_locks(connection = db_connection) 33 | connection.fetch('SELECT pg_advisory_unlock_all();').to_a 34 | end 35 | end 36 | 37 | RSpec.configure do |config| 38 | config.include(DBHelpers) 39 | config.before(:suite) { DBHelpers.recreate_database } 40 | config.before(:example) { DBHelpers.reset_database } 41 | config.after(:example) { DBHelpers.release_advisory_locks } 42 | end 43 | -------------------------------------------------------------------------------- /script/bench_writing_events.rb: -------------------------------------------------------------------------------- 1 | # Usage: 2 | # 3 | # ❯ bundle exec ruby script/bench_writing_events.rb 4 | # Warming up -------------------------------------- 5 | # event_store.sink 6 | # 70.000 i/100ms 7 | # Calculating ------------------------------------- 8 | # event_store.sink 9 | # 522.007 (±10.9%) i/s - 2.590k in 5.021909s 10 | # 11 | # ^ results from running on a 2016 MacBook 12 | 13 | require 'benchmark/ips' 14 | require 'securerandom' 15 | require 'sequel' 16 | require 'event_sourcery/postgres' 17 | 18 | pg_uri = ENV.fetch('POSTGRESQL_URL', 'postgres://127.0.0.1:5432/').dup 19 | pg_uri << 'event_sourcery_test' 20 | pg_connection = Sequel.connect(pg_uri) 21 | 22 | EventSourcery.configure do |config| 23 | config.postgres.event_store_database = pg_connection 24 | config.postgres.projections_database = pg_connection 25 | config.logger.level = :fatal 26 | end 27 | 28 | def create_schema(pg_connection) 29 | pg_connection.execute 'drop table if exists events' 30 | pg_connection.execute 'drop table if exists aggregates' 31 | EventSourcery::Postgres::Schema.create_event_store(db: pg_connection) 32 | end 33 | 34 | create_schema(pg_connection) 35 | event_store = EventSourcery::Postgres::EventStore.new(pg_connection) 36 | 37 | def new_event 38 | EventSourcery::Event.new(type: :item_added, 39 | aggregate_id: SecureRandom.uuid, 40 | body: { 'something' => 'simple' }) 41 | end 42 | 43 | Benchmark.ips do |b| 44 | b.report('event_store.sink') do 45 | event_store.sink(new_event) 46 | end 47 | end 48 | -------------------------------------------------------------------------------- /event_sourcery-postgres.gemspec: -------------------------------------------------------------------------------- 1 | # coding: utf-8 2 | lib = File.expand_path('../lib', __FILE__) 3 | $LOAD_PATH.unshift(lib) unless $LOAD_PATH.include?(lib) 4 | require 'event_sourcery/postgres/version' 5 | 6 | Gem::Specification.new do |spec| 7 | spec.name = 'event_sourcery-postgres' 8 | spec.version = EventSourcery::Postgres::VERSION 9 | 10 | spec.authors = ['Envato'] 11 | spec.email = ['rubygems@envato.com'] 12 | 13 | spec.summary = 'Postgres event store for use with EventSourcery' 14 | spec.homepage = 'https://github.com/envato/event_sourcery-postgres' 15 | spec.metadata = { 16 | 'bug_tracker_uri' => 'https://github.com/envato/event_sourcery-postgres/issues', 17 | 'changelog_uri' => 'https://github.com/envato/event_sourcery-postgres/blob/HEAD/CHANGELOG.md', 18 | 'source_code_uri' => 'https://github.com/envato/event_sourcery-postgres', 19 | } 20 | 21 | spec.files = `git ls-files -z`.split("\x0").reject do |f| 22 | f.match(%r{^(\.|bin/|Gemfile|Rakefile|script/|spec/)}) 23 | end 24 | spec.bindir = 'exe' 25 | spec.executables = spec.files.grep(%r{^exe/}) { |f| File.basename(f) } 26 | spec.require_paths = ['lib'] 27 | 28 | spec.required_ruby_version = '>= 2.2.0' 29 | 30 | spec.add_dependency 'sequel', '>= 4.38' 31 | spec.add_dependency 'pg' 32 | spec.add_dependency 'event_sourcery', '>= 0.14.0' 33 | spec.add_development_dependency 'bundler' 34 | spec.add_development_dependency 'rake', '~> 13' 35 | spec.add_development_dependency 'rspec', '~> 3.0' 36 | spec.add_development_dependency 'pry' 37 | spec.add_development_dependency 'benchmark-ips' 38 | end 39 | -------------------------------------------------------------------------------- /spec/event_sourcery/postgres/config_spec.rb: -------------------------------------------------------------------------------- 1 | RSpec.describe EventSourcery::Postgres::Config do 2 | subject(:config) { described_class.new } 3 | 4 | context 'when reading the event_store' do 5 | context 'and an event_store_database is set' do 6 | before do 7 | allow(db_connection).to receive(:extension).with(:pg_json) 8 | config.event_store_database = db_connection 9 | end 10 | 11 | it 'returns a EventSourcery::Postgres::EventStore' do 12 | expect(config.event_store).to be_instance_of(EventSourcery::Postgres::EventStore) 13 | end 14 | 15 | it 'loads pg_json extension on database' do 16 | expect(db_connection).to have_received(:extension).with(:pg_json) 17 | end 18 | end 19 | 20 | context 'and an event_store is set' do 21 | let(:event_store) { double(:event_store) } 22 | before do 23 | config.event_store = event_store 24 | config.event_store_database = nil 25 | end 26 | 27 | it 'returns the event_store' do 28 | expect(config.event_store).to eq(event_store) 29 | end 30 | end 31 | end 32 | 33 | context 'setting the projections database' do 34 | before do 35 | allow(db_connection).to receive(:extension).with(:pg_json) 36 | config.projections_database = db_connection 37 | end 38 | 39 | it 'sets the projections_database' do 40 | expect(config.projections_database).to eq db_connection 41 | end 42 | 43 | it 'sets the event_tracker' do 44 | expect(config.event_tracker).to be_instance_of(EventSourcery::Postgres::Tracker) 45 | end 46 | 47 | it 'loads pg_json extension on database' do 48 | expect(db_connection).to have_received(:extension).with(:pg_json) 49 | end 50 | end 51 | end 52 | -------------------------------------------------------------------------------- /lib/event_sourcery/postgres/projector.rb: -------------------------------------------------------------------------------- 1 | module EventSourcery 2 | module Postgres 3 | module Projector 4 | def self.included(base) 5 | base.include(EventProcessing::EventStreamProcessor) 6 | base.prepend(TableOwner) 7 | base.include(InstanceMethods) 8 | base.class_eval do 9 | alias_method :project, :process 10 | 11 | class << self 12 | alias_method :project, :process 13 | alias_method :projector_name, :processor_name 14 | end 15 | end 16 | end 17 | 18 | module InstanceMethods 19 | def initialize(tracker: EventSourcery::Postgres.config.event_tracker, 20 | db_connection: EventSourcery::Postgres.config.projections_database, 21 | transaction_size: EventSourcery::Postgres.config.projector_transaction_size) 22 | @tracker = tracker 23 | @db_connection = db_connection 24 | @transaction_size = transaction_size 25 | end 26 | 27 | private 28 | 29 | attr_reader :transaction_size 30 | 31 | def process_events(events, subscription_master) 32 | events.each_slice(transaction_size) do |slice_of_events| 33 | subscription_master.shutdown_if_requested 34 | 35 | db_connection.transaction do 36 | slice_of_events.each do |event| 37 | process(event) 38 | EventSourcery.logger.debug { "[#{processor_name}] Processed event: #{event.inspect}" } 39 | end 40 | tracker.processed_event(processor_name, slice_of_events.last.id) 41 | end 42 | end 43 | 44 | EventSourcery.logger.info { "[#{processor_name}] Processed up to event id: #{events.last.id}" } 45 | end 46 | end 47 | end 48 | end 49 | end 50 | -------------------------------------------------------------------------------- /script/bench_reading_events.rb: -------------------------------------------------------------------------------- 1 | # Usage: 2 | # 3 | # ❯ bundle exec ruby script/bench_reading_events.rb 4 | # Creating 10000 events 5 | # Took 42.35533199999918 to create events 6 | # Took 4.9821800000027 to read all events 7 | # ^ results from running on a 2016 MacBook 8 | 9 | require 'benchmark' 10 | require 'securerandom' 11 | require 'sequel' 12 | require 'event_sourcery/postgres' 13 | 14 | pg_uri = ENV.fetch('POSTGRESQL_URL', 'postgres://127.0.0.1:5432/').dup 15 | pg_uri << 'event_sourcery_test' 16 | pg_connection = Sequel.connect(pg_uri) 17 | 18 | EventSourcery.configure do |config| 19 | config.postgres.event_store_database = pg_connection 20 | config.postgres.projections_database = pg_connection 21 | config.logger.level = :fatal 22 | end 23 | 24 | def create_events_schema(pg_connection) 25 | pg_connection.execute 'drop table if exists events' 26 | pg_connection.execute 'drop table if exists aggregates' 27 | EventSourcery::Postgres::Schema.create_event_store(db: pg_connection) 28 | end 29 | 30 | event_store = EventSourcery::Postgres.config.event_store 31 | 32 | EVENT_TYPES = %i[ 33 | item_added 34 | item_removed 35 | item_starred 36 | ].freeze 37 | 38 | def new_event(uuid) 39 | EventSourcery::Event.new(type: EVENT_TYPES.sample, 40 | aggregate_id: uuid, 41 | body: { 'something' => 'simple' }) 42 | end 43 | 44 | create_events_schema(pg_connection) 45 | 46 | NUM_EVENTS = 10_000 47 | puts "Creating #{NUM_EVENTS} events" 48 | time = Benchmark.realtime do 49 | uuid = SecureRandom.uuid 50 | NUM_EVENTS.times { event_store.sink(new_event(uuid)) } 51 | end 52 | puts "Took #{time} to create events" 53 | 54 | seen_events_count = 0 55 | time = Benchmark.realtime do 56 | event_store.subscribe(from_id: 0, subscription_master: EventSourcery::EventStore::SignalHandlingSubscriptionMaster.new) do |events| 57 | seen_events_count += events.count 58 | throw :stop if seen_events_count >= NUM_EVENTS 59 | end 60 | end 61 | puts "Took #{time} to read all events" 62 | -------------------------------------------------------------------------------- /lib/event_sourcery/postgres/config.rb: -------------------------------------------------------------------------------- 1 | module EventSourcery 2 | module Postgres 3 | class Config 4 | attr_accessor :lock_table_to_guarantee_linear_sequence_id_growth, 5 | :write_events_function_name, 6 | :events_table_name, 7 | :aggregates_table_name, 8 | :tracker_table_name, 9 | :callback_interval_if_no_new_events, 10 | :auto_create_projector_tracker, 11 | :event_tracker, 12 | :projector_transaction_size, 13 | :on_events_recorded 14 | 15 | attr_writer :event_store, 16 | :event_source, 17 | :event_sink 18 | 19 | attr_reader :event_store_database, 20 | :projections_database 21 | 22 | def initialize 23 | @lock_table_to_guarantee_linear_sequence_id_growth = true 24 | @write_events_function_name = 'writeEvents' 25 | @events_table_name = :events 26 | @aggregates_table_name = :aggregates 27 | @tracker_table_name = :projector_tracker 28 | @callback_interval_if_no_new_events = 10 29 | @event_store_database = nil 30 | @auto_create_projector_tracker = true 31 | @projector_transaction_size = 1 32 | @on_events_recorded = ->(events) {} 33 | end 34 | 35 | def event_store 36 | @event_store ||= EventStore.new(event_store_database) 37 | end 38 | 39 | def event_source 40 | @event_source ||= ::EventSourcery::EventStore::EventSource.new(event_store) 41 | end 42 | 43 | def event_sink 44 | @event_sink ||= ::EventSourcery::EventStore::EventSink.new(event_store) 45 | end 46 | 47 | def event_store_database=(db_connection) 48 | setup_connection(db_connection) 49 | 50 | @event_store_database = db_connection 51 | end 52 | 53 | def projections_database=(db_connection) 54 | setup_connection(db_connection) 55 | 56 | @projections_database = db_connection 57 | @event_tracker = Postgres::Tracker.new(db_connection) 58 | end 59 | 60 | private 61 | 62 | def setup_connection(db_connection) 63 | return unless db_connection 64 | 65 | db_connection.extension :pg_json 66 | end 67 | end 68 | end 69 | end 70 | -------------------------------------------------------------------------------- /lib/event_sourcery/postgres/optimised_event_poll_waiter.rb: -------------------------------------------------------------------------------- 1 | module EventSourcery 2 | module Postgres 3 | # Optimise poll interval with Postgres listen/notify 4 | class OptimisedEventPollWaiter 5 | ListenThreadDied = Class.new(StandardError) 6 | 7 | def initialize(db_connection:, timeout: 30, after_listen: proc {}) 8 | @db_connection = db_connection 9 | @timeout = timeout 10 | @events_queue = QueueWithIntervalCallback.new 11 | @after_listen = after_listen 12 | end 13 | 14 | def poll(after_listen: proc { }, &block) 15 | @events_queue.callback = proc do 16 | ensure_listen_thread_alive! 17 | block.call 18 | end 19 | start_async(after_listen: after_listen) 20 | catch(:stop) do 21 | block.call 22 | loop do 23 | ensure_listen_thread_alive! 24 | wait_for_new_event_to_appear 25 | clear_new_event_queue 26 | block.call 27 | end 28 | end 29 | ensure 30 | shutdown! 31 | end 32 | 33 | private 34 | 35 | def shutdown! 36 | @listen_thread.kill if @listen_thread.alive? 37 | end 38 | 39 | def ensure_listen_thread_alive! 40 | raise ListenThreadDied unless @listen_thread.alive? 41 | end 42 | 43 | def wait_for_new_event_to_appear 44 | @events_queue.pop 45 | end 46 | 47 | def clear_new_event_queue 48 | @events_queue.clear 49 | end 50 | 51 | def start_async(after_listen: nil) 52 | after_listen_callback = if after_listen 53 | proc do 54 | after_listen.call 55 | @after_listen.call if @after_listen 56 | end 57 | else 58 | @after_listen 59 | end 60 | @listen_thread = Thread.new do 61 | listen_for_new_events(loop: true, 62 | after_listen: after_listen_callback, 63 | timeout: @timeout) 64 | end 65 | end 66 | 67 | def listen_for_new_events(loop: true, after_listen: nil, timeout: 30) 68 | @db_connection.listen('new_event', 69 | loop: loop, 70 | after_listen: after_listen, 71 | timeout: timeout) do |_channel, _pid, _payload| 72 | @events_queue.push(:new_event_arrived) if @events_queue.empty? 73 | end 74 | end 75 | end 76 | end 77 | end 78 | -------------------------------------------------------------------------------- /lib/event_sourcery/postgres/table_owner.rb: -------------------------------------------------------------------------------- 1 | module EventSourcery 2 | module Postgres 3 | module TableOwner 4 | DefaultTableError = Class.new(StandardError) 5 | NoSuchTableError = Class.new(StandardError) 6 | 7 | def self.prepended(base) 8 | base.extend(ClassMethods) 9 | end 10 | 11 | module ClassMethods 12 | # Hash of the tables and their corresponding blocks. 13 | # 14 | # @return [Hash] hash keyed by table names and block values 15 | def tables 16 | @tables ||= {} 17 | end 18 | 19 | # For the given table name assign to give block as the value. 20 | # 21 | # @param name the name of the table 22 | # @param block the block of code to assign for the table 23 | def table(name, &block) 24 | tables[name] = block 25 | end 26 | end 27 | 28 | # Create each table. 29 | def setup 30 | self.class.tables.each do |table_name, schema_block| 31 | prefixed_name = table_name_prefixed(table_name) 32 | @db_connection.create_table?(prefixed_name, &schema_block) 33 | end 34 | super if defined?(super) 35 | end 36 | 37 | # Reset by dropping each table. 38 | def reset 39 | self.class.tables.keys.each do |table_name| 40 | prefixed_name = table_name_prefixed(table_name) 41 | if @db_connection.table_exists?(prefixed_name) 42 | @db_connection.drop_table(prefixed_name, cascade: true) 43 | end 44 | end 45 | super if defined?(super) 46 | setup 47 | end 48 | 49 | # This will truncate all the tables and reset the tracker back to 0, 50 | # done as a transaction. 51 | def truncate 52 | self.class.tables.each do |table_name, _| 53 | @db_connection.transaction do 54 | prefixed_name = table_name_prefixed(table_name) 55 | @db_connection[prefixed_name].truncate 56 | tracker.reset_last_processed_event_id(self.class.processor_name) 57 | end 58 | end 59 | end 60 | 61 | private 62 | 63 | attr_reader :db_connection 64 | attr_accessor :table_prefix 65 | 66 | def table(name = nil) 67 | if name.nil? && self.class.tables.length != 1 68 | raise DefaultTableError, 'You must specify table name when when 0 or multiple tables are defined' 69 | end 70 | 71 | name ||= self.class.tables.keys.first 72 | 73 | unless self.class.tables[name.to_sym] 74 | raise NoSuchTableError, "There is no table with the name '#{name}' defined" 75 | end 76 | 77 | db_connection[table_name_prefixed(name)] 78 | end 79 | 80 | def table_name_prefixed(name) 81 | [table_prefix, name].compact.join('_').to_sym 82 | end 83 | end 84 | end 85 | end 86 | -------------------------------------------------------------------------------- /spec/event_sourcery/postgres/optimised_event_poll_waiter_spec.rb: -------------------------------------------------------------------------------- 1 | RSpec.describe EventSourcery::Postgres::OptimisedEventPollWaiter do 2 | let(:after_listen) { proc {} } 3 | subject(:waiter) { described_class.new(db_connection: db_connection, after_listen: after_listen) } 4 | 5 | before do 6 | allow(EventSourcery::Postgres::QueueWithIntervalCallback).to receive(:new) 7 | .and_return(EventSourcery::Postgres::QueueWithIntervalCallback.new(callback_interval: 0)) 8 | end 9 | 10 | def notify_event_ids(*ids) 11 | ids.each do |id| 12 | db_connection.notify('new_event', payload: id) 13 | end 14 | end 15 | 16 | it 'does an initial call' do 17 | waiter.poll(after_listen: proc {}) do 18 | @called = true 19 | throw :stop 20 | end 21 | 22 | expect(@called).to eq true 23 | end 24 | 25 | it 'calls on new event' do 26 | waiter.poll(after_listen: proc { notify_event_ids(1) }) do 27 | @called = true 28 | throw :stop 29 | end 30 | 31 | expect(@called).to eq true 32 | end 33 | 34 | it 'calls once when multiple events are in the queue' do 35 | waiter.poll(after_listen: proc { notify_event_ids(1, 2) }) do 36 | @called = true 37 | throw :stop 38 | end 39 | 40 | expect(@called).to eq true 41 | end 42 | 43 | context 'when the listening thread dies' do 44 | before do 45 | allow(db_connection).to receive(:listen).and_raise(StandardError) 46 | end 47 | 48 | it 'raise an error' do 49 | quiet_thread_report_on_exception do 50 | expect { 51 | waiter.poll {} 52 | }.to raise_error(described_class::ListenThreadDied) 53 | end 54 | end 55 | end 56 | 57 | context 'when an error is raised' do 58 | let(:thread) { double } 59 | 60 | before { allow(Thread).to receive(:new).and_return(thread) } 61 | 62 | context 'when the listening thread is alive' do 63 | it 'kills the listening thread' do 64 | allow(thread).to receive(:alive?).and_return(true) 65 | expect(thread).to receive(:kill) 66 | 67 | waiter.poll(after_listen: proc { notify_event_ids(1) }) do 68 | @called = true 69 | throw :stop 70 | end 71 | end 72 | end 73 | 74 | context 'when the listening thread is not alive' do 75 | it 'does not try to kill any listening threads' do 76 | allow(thread).to receive(:alive?).and_return(false) 77 | expect(thread).to_not receive(:kill) 78 | 79 | waiter.poll(after_listen: proc { notify_event_ids(1) }) do 80 | @called = true 81 | throw :stop 82 | end 83 | end 84 | end 85 | end 86 | 87 | def quiet_thread_report_on_exception(&block) 88 | if Thread.respond_to?(:report_on_exception) 89 | orig_report_on_exception = Thread.report_on_exception 90 | Thread.report_on_exception = false 91 | 92 | block.call 93 | 94 | Thread.report_on_exception = orig_report_on_exception 95 | else 96 | block.call 97 | end 98 | end 99 | end 100 | -------------------------------------------------------------------------------- /lib/event_sourcery/postgres/reactor.rb: -------------------------------------------------------------------------------- 1 | module EventSourcery 2 | module Postgres 3 | module Reactor 4 | UndeclaredEventEmissionError = Class.new(StandardError) 5 | 6 | def self.included(base) 7 | base.include(EventProcessing::EventStreamProcessor) 8 | base.extend(ClassMethods) 9 | base.prepend(TableOwner) 10 | base.include(InstanceMethods) 11 | end 12 | 13 | module ClassMethods 14 | # Assign the types of events this reactor can emit. 15 | # 16 | # @param event_types the types of events this reactor can emit 17 | def emits_events(*event_types) 18 | @emits_event_types = event_types 19 | end 20 | 21 | # @return [Array] an array of the types of events this reactor can emit 22 | def emit_events 23 | @emits_event_types ||= [] 24 | end 25 | 26 | # This will tell you if this reactor emits any type of event. 27 | # 28 | # @return [true, false] true if this emits events, false if not 29 | def emits_events? 30 | !emit_events.empty? 31 | end 32 | 33 | # Will check if this reactor emits the given type of event. 34 | # 35 | # @param event_type the event type to check 36 | # @return [true, false] true if it does emit the given event false if not 37 | def emits_event?(event_type) 38 | emit_events.include?(event_type) 39 | end 40 | end 41 | 42 | module InstanceMethods 43 | def initialize(tracker: EventSourcery::Postgres.config.event_tracker, 44 | db_connection: EventSourcery::Postgres.config.projections_database, 45 | event_source: EventSourcery::Postgres.config.event_source, 46 | event_sink: EventSourcery::Postgres.config.event_sink) 47 | @tracker = tracker 48 | @event_source = event_source 49 | @event_sink = event_sink 50 | @db_connection = db_connection 51 | if self.class.emits_events? 52 | if event_sink.nil? || event_source.nil? 53 | raise ArgumentError, 'An event sink and source is required for processors that emit events' 54 | end 55 | end 56 | end 57 | end 58 | 59 | private 60 | 61 | attr_reader :event_sink, :event_source 62 | 63 | def emit_event(event_or_hash, &block) 64 | event = if Event === event_or_hash 65 | event_or_hash 66 | else 67 | Event.new(event_or_hash) 68 | end 69 | raise UndeclaredEventEmissionError unless self.class.emits_event?(event.class) 70 | event = event.with(causation_id: _event.uuid, correlation_id: _event.correlation_id) 71 | invoke_action_and_emit_event(event, block) 72 | EventSourcery.logger.debug { "[#{self.processor_name}] Emitted event: #{event.inspect}" } 73 | end 74 | 75 | def invoke_action_and_emit_event(event, action) 76 | action.call(event.body) if action 77 | event_sink.sink(event) 78 | end 79 | end 80 | end 81 | end 82 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # EventSourcery::Postgres 2 | 3 | [![Build Status](https://github.com/envato/event_sourcery-postgres/workflows/tests/badge.svg?branch=main)](https://github.com/envato/event_sourcery-postgres/actions?query=workflow%3Atests+branch%3Amain) 4 | 5 | ## Development Status 6 | 7 | EventSourcery is currently being used in production by multiple apps but we 8 | haven't finalized the API yet and things are still moving rapidly. Until we 9 | release a 1.0 things may change without first being deprecated. 10 | 11 | ## Installation 12 | 13 | Add this line to your application's Gemfile: 14 | 15 | ```ruby 16 | gem 'event_sourcery-postgres' 17 | ``` 18 | 19 | ## Configure 20 | 21 | ```ruby 22 | EventSourcery::Postgres.configure do |config| 23 | config.event_store_database = Sequel.connect(...) 24 | config.projections_database = Sequel.connect(...) 25 | config.write_events_function_name = 'writeEvents' 26 | config.events_table_name = :events 27 | config.aggregates_table_name = :aggregates 28 | config.callback_interval_if_no_new_events = 60 29 | end 30 | ``` 31 | 32 | ## Usage 33 | 34 | 35 | ### Event Store 36 | 37 | ```ruby 38 | ItemAdded = EventSourcery::Event 39 | 40 | EventSourcery::Postgres.event_store.sink(ItemAdded.new(aggregate_id: uuid, body: { }})) 41 | EventSourcery::Postgres.event_store.get_next_from(0).each do |event| 42 | puts event.inspect 43 | end 44 | ``` 45 | 46 | ### Projectors & Reactors 47 | 48 | ```ruby 49 | class ItemProjector 50 | include EventSourcery::Postgres::Projector 51 | 52 | table :items do 53 | column :item_uuid, 'UUID NOT NULL' 54 | column :title, 'VARCHAR(255) NOT NULL' 55 | end 56 | 57 | project ItemAdded do |event| 58 | table(:items).insert(item_uuid: event.aggregate_id, 59 | title: event.body.fetch('title')) 60 | end 61 | end 62 | 63 | class UserEmailer 64 | include EventSourcery::Postgres::Reactor 65 | 66 | emits_events SignUpEmailSent 67 | 68 | process UserSignedUp do |event| 69 | emit_event SignUpEmailSent.new(user_id: event.aggregate_id) do 70 | UserMailer.signed_up(...).deliver 71 | end 72 | end 73 | end 74 | 75 | EventSourcery::EventProcessing::ESPRunner.new( 76 | event_processors: [item_projector, user_emailer], 77 | event_store: EventSourcery::Postgres.config.event_store, 78 | stop_on_failure: true, 79 | ).start! 80 | ``` 81 | 82 | 83 | ## Development 84 | 85 | After checking out the repo, run `bin/setup` to install dependencies. (This will install dependencies and recreate the test database.) Then, run `rake spec` to run the tests. You can also run `bin/console` for an interactive prompt that will allow you to experiment. 86 | 87 | To install this gem onto your local machine, run `bundle exec rake install`. 88 | 89 | To release a new version: 90 | 91 | 1. Update the version number in `lib/event_sourcery/postgres/version.rb` 92 | 2. Get this change onto main via the normal PR process 93 | 3. Run `bundle exec rake release`, this will create a git tag for the 94 | version, push tags up to GitHub, and upload the gem to rubygems.org. 95 | 96 | ## Contributing 97 | 98 | Bug reports and pull requests are welcome on GitHub at https://github.com/envato/event_sourcery-postgres. 99 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Contributor Covenant Code of Conduct 2 | 3 | ## Our Pledge 4 | 5 | In the interest of fostering an open and welcoming environment, we as 6 | contributors and maintainers pledge to making participation in our project and 7 | our community a harassment-free experience for everyone, regardless of age, body 8 | size, disability, ethnicity, gender identity and expression, level of experience, 9 | nationality, personal appearance, race, religion, or sexual identity and 10 | orientation. 11 | 12 | ## Our Standards 13 | 14 | Examples of behavior that contributes to creating a positive environment 15 | include: 16 | 17 | * Using welcoming and inclusive language 18 | * Being respectful of differing viewpoints and experiences 19 | * Gracefully accepting constructive criticism 20 | * Focusing on what is best for the community 21 | * Showing empathy towards other community members 22 | 23 | Examples of unacceptable behavior by participants include: 24 | 25 | * The use of sexualized language or imagery and unwelcome sexual attention or 26 | advances 27 | * Trolling, insulting/derogatory comments, and personal or political attacks 28 | * Public or private harassment 29 | * Publishing others' private information, such as a physical or electronic 30 | address, without explicit permission 31 | * Other conduct which could reasonably be considered inappropriate in a 32 | professional setting 33 | 34 | ## Our Responsibilities 35 | 36 | Project maintainers are responsible for clarifying the standards of acceptable 37 | behavior and are expected to take appropriate and fair corrective action in 38 | response to any instances of unacceptable behavior. 39 | 40 | Project maintainers have the right and responsibility to remove, edit, or 41 | reject comments, commits, code, wiki edits, issues, and other contributions 42 | that are not aligned to this Code of Conduct, or to ban temporarily or 43 | permanently any contributor for other behaviors that they deem inappropriate, 44 | threatening, offensive, or harmful. 45 | 46 | ## Scope 47 | 48 | This Code of Conduct applies both within project spaces and in public spaces 49 | when an individual is representing the project or its community. Examples of 50 | representing a project or community include using an official project e-mail 51 | address, posting via an official social media account, or acting as an appointed 52 | representative at an online or offline event. Representation of a project may be 53 | further defined and clarified by project maintainers. 54 | 55 | ## Enforcement 56 | 57 | Instances of abusive, harassing, or otherwise unacceptable behavior may be 58 | reported by contacting the project team at odindutton@gmail.com. All 59 | complaints will be reviewed and investigated and will result in a response that 60 | is deemed necessary and appropriate to the circumstances. The project team is 61 | obligated to maintain confidentiality with regard to the reporter of an incident. 62 | Further details of specific enforcement policies may be posted separately. 63 | 64 | Project maintainers who do not follow or enforce the Code of Conduct in good 65 | faith may face temporary or permanent repercussions as determined by other 66 | members of the project's leadership. 67 | 68 | ## Attribution 69 | 70 | This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, 71 | available at [http://contributor-covenant.org/version/1/4][version] 72 | 73 | [homepage]: http://contributor-covenant.org 74 | [version]: http://contributor-covenant.org/version/1/4/ 75 | -------------------------------------------------------------------------------- /spec/event_sourcery/postgres/event_store_spec.rb: -------------------------------------------------------------------------------- 1 | require 'timeout' 2 | 3 | RSpec.describe EventSourcery::Postgres::EventStore do 4 | let(:supports_versions) { true } 5 | subject(:event_store) { described_class.new(db_connection) } 6 | 7 | include_examples 'an event store' 8 | 9 | describe '#sink' do 10 | let(:on_events_recorded_proc) { double("OnEventsRecordedProc", call: nil) } 11 | 12 | before do 13 | allow(EventSourcery::Postgres.config) 14 | .to receive(:on_events_recorded) 15 | .and_return(on_events_recorded_proc) 16 | end 17 | 18 | it 'notifies about a new event' do 19 | event_id = nil 20 | Timeout.timeout(1) do 21 | db_connection.listen('new_event', loop: false, after_listen: proc { add_event }) do |channel, pid, payload| 22 | event_id = Integer(payload) 23 | end 24 | end 25 | 26 | expect(event_id).not_to be_nil 27 | expect(on_events_recorded_proc).to have_received(:call) 28 | end 29 | end 30 | 31 | describe '#get_events_for_aggregate_id' do 32 | context 'when aggregate_id is a string' do 33 | include_examples 'gets events for a specific aggregate id' do 34 | let(:uuid) { aggregate_id } 35 | end 36 | end 37 | 38 | context 'when aggregate_id is convertible to a string' do 39 | include_examples 'gets events for a specific aggregate id' do 40 | let(:uuid) { double(to_str: aggregate_id) } 41 | end 42 | end 43 | end 44 | 45 | describe '#subscribe' do 46 | let(:event) { new_event(aggregate_id: aggregate_id) } 47 | let(:subscription_master) { spy(EventSourcery::EventStore::SignalHandlingSubscriptionMaster) } 48 | 49 | it 'notifies of new events' do 50 | event_store.subscribe(from_id: 0, 51 | after_listen: proc { event_store.sink(event) }, 52 | subscription_master: subscription_master) do |events| 53 | @events = events 54 | throw :stop 55 | end 56 | expect(@events.count).to eq 1 57 | expect(@events.first.aggregate_id).to eq aggregate_id 58 | end 59 | end 60 | 61 | context 'aggregates table version' do 62 | def save_event(expected_version: nil) 63 | event_store.sink(new_event(aggregate_id: aggregate_id, 64 | type: :billing_details_provided, 65 | body: { my_event: 'data' }), 66 | expected_version: expected_version) 67 | end 68 | 69 | def add_event 70 | event_store.sink(new_event(aggregate_id: aggregate_id)) 71 | end 72 | 73 | def last_event 74 | event_store.get_next_from(0).last 75 | end 76 | 77 | def aggregate_version 78 | result = db_connection[:aggregates]. 79 | where(aggregate_id: aggregate_id). 80 | first 81 | result[:version] if result 82 | end 83 | 84 | context "when the aggregate doesn't exist" do 85 | context 'and the expected version is correct - 0' do 86 | it 'saves the event with and sets the aggregate version to version 1' do 87 | save_event(expected_version: 0) 88 | expect(aggregate_version).to eq 1 89 | end 90 | end 91 | 92 | context 'with no expected version' do 93 | it 'saves the event with and sets the aggregate version to version 1' do 94 | save_event 95 | expect(aggregate_version).to eq 1 96 | end 97 | end 98 | end 99 | 100 | context 'when the aggregate exists' do 101 | before { add_event } 102 | 103 | context 'with a correct expected version - 1' do 104 | it 'saves the event with and sets the aggregate version to version 2' do 105 | save_event 106 | expect(aggregate_version).to eq 2 107 | end 108 | end 109 | 110 | context 'with no aggregate version' do 111 | it 'automatically sets the version on the event and aggregate' do 112 | save_event 113 | expect(aggregate_version).to eq 2 114 | end 115 | end 116 | end 117 | 118 | context 'when a database error occurs that is not a concurrency error' do 119 | before do 120 | allow(db_connection).to receive(:run).and_raise(Sequel::DatabaseError) 121 | end 122 | 123 | it 'raises it' do 124 | expect { add_event }.to raise_error(Sequel::DatabaseError) 125 | end 126 | end 127 | end 128 | end 129 | -------------------------------------------------------------------------------- /lib/event_sourcery/postgres/tracker.rb: -------------------------------------------------------------------------------- 1 | module EventSourcery 2 | module Postgres 3 | # This will set up a persisted event id tracker for processors. 4 | class Tracker 5 | 6 | def initialize(db_connection = EventSourcery::Postgres.config.projections_database, 7 | table_name: EventSourcery::Postgres.config.tracker_table_name, 8 | obtain_processor_lock: true) 9 | @db_connection = db_connection 10 | @table_name = table_name.to_sym 11 | @obtain_processor_lock = obtain_processor_lock 12 | end 13 | 14 | # Set up the given processor. 15 | # This will create the projector tracker table if it does not exist. 16 | # If given a processor_name it will then attempt to get a lock on the db. 17 | # 18 | # @param processor_name the name of the processor 19 | def setup(processor_name = nil) 20 | create_table_if_not_exists if EventSourcery::Postgres.config.auto_create_projector_tracker 21 | 22 | unless tracker_table_exists? 23 | raise UnableToLockProcessorError, 'Projector tracker table does not exist' 24 | end 25 | 26 | if processor_name 27 | create_track_entry_if_not_exists(processor_name) 28 | if @obtain_processor_lock 29 | obtain_global_lock_on_processor(processor_name) 30 | end 31 | end 32 | end 33 | 34 | # This will updated the tracker table to the given event id value 35 | # for the given processor name. 36 | # 37 | # @param processor_name the name of the processor to update 38 | # @param event_id the event id number to update to 39 | def processed_event(processor_name, event_id) 40 | table. 41 | where(name: processor_name.to_s). 42 | update(last_processed_event_id: event_id) 43 | true 44 | end 45 | 46 | # This allows you to process an event and update the tracker table in 47 | # a single transaction. Will yield the given block first then update the 48 | # the tracker table to the give event id for the given processor name. 49 | # 50 | # @param processor_name the name of the processor to update 51 | # @param event_id the event id number to update to 52 | def processing_event(processor_name, event_id) 53 | @db_connection.transaction do 54 | yield 55 | processed_event(processor_name, event_id) 56 | end 57 | end 58 | 59 | # This will reset the tracker to the start (0) for the given processor name. 60 | # 61 | # @param processor_name the name of the processor to reset to 0 62 | def reset_last_processed_event_id(processor_name) 63 | table.where(name: processor_name.to_s).update(last_processed_event_id: 0) 64 | end 65 | 66 | # This will return the last processed event id for the given processor name. 67 | # 68 | # @param processor_name the name of the processor you want to look up 69 | # @return [Int, nil] the value of the last event_id processed 70 | def last_processed_event_id(processor_name) 71 | track_entry = table.where(name: processor_name.to_s).first 72 | track_entry[:last_processed_event_id] if track_entry 73 | end 74 | 75 | # Will return an array of all known tracked processors. 76 | # 77 | # @return [Array] array of all known tracked processors 78 | def tracked_processors 79 | table.select_map(:name) 80 | end 81 | 82 | private 83 | 84 | def obtain_global_lock_on_processor(processor_name) 85 | lock_obtained = @db_connection.fetch("select pg_try_advisory_lock(#{@track_entry_id})").to_a.first[:pg_try_advisory_lock] 86 | if lock_obtained == false 87 | raise UnableToLockProcessorError, "Unable to get a lock on #{processor_name} #{@track_entry_id}" 88 | end 89 | end 90 | 91 | def create_table_if_not_exists 92 | unless tracker_table_exists? 93 | EventSourcery.logger.info { "Projector tracker missing - attempting to create 'projector_tracker' table" } 94 | EventSourcery::Postgres::Schema.create_projector_tracker(db: @db_connection, table_name: @table_name) 95 | end 96 | end 97 | 98 | def create_track_entry_if_not_exists(processor_name) 99 | track_entry = table.where(name: processor_name.to_s).first 100 | @track_entry_id = if track_entry 101 | track_entry[:id] 102 | else 103 | table.insert(name: processor_name.to_s, last_processed_event_id: 0) 104 | end 105 | end 106 | 107 | def table 108 | @db_connection[@table_name] 109 | end 110 | 111 | def tracker_table_exists? 112 | @db_connection.table_exists?(@table_name) 113 | end 114 | end 115 | end 116 | end 117 | -------------------------------------------------------------------------------- /spec/spec_helper.rb: -------------------------------------------------------------------------------- 1 | # This file was generated by the `rspec --init` command. Conventionally, all 2 | # specs live under a `spec` directory, which RSpec adds to the `$LOAD_PATH`. 3 | # The generated `.rspec` file contains `--require spec_helper` which will cause 4 | # this file to always be loaded, without a need to explicitly require it in any 5 | # files. 6 | # 7 | # Given that it is always loaded, you are encouraged to keep this file as 8 | # light-weight as possible. Requiring heavyweight dependencies from this file 9 | # will add to the boot time of your test suite on EVERY test run, even for an 10 | # individual file that may not need all of that loaded. Instead, consider making 11 | # a separate helper file that requires the additional dependencies and performs 12 | # the additional setup, and require it from the spec files that actually need 13 | # it. 14 | # 15 | # See http://rubydoc.info/gems/rspec-core/RSpec/Core/Configuration 16 | RSpec.configure do |config| 17 | # rspec-expectations config goes here. You can use an alternate 18 | # assertion/expectation library such as wrong or the stdlib/minitest 19 | # assertions if you prefer. 20 | config.expect_with :rspec do |expectations| 21 | # This option will default to `true` in RSpec 4. It makes the `description` 22 | # and `failure_message` of custom matchers include text for helper methods 23 | # defined using `chain`, e.g.: 24 | # be_bigger_than(2).and_smaller_than(4).description 25 | # # => "be bigger than 2 and smaller than 4" 26 | # ...rather than: 27 | # # => "be bigger than 2" 28 | expectations.include_chain_clauses_in_custom_matcher_descriptions = true 29 | end 30 | 31 | # rspec-mocks config goes here. You can use an alternate test double 32 | # library (such as bogus or mocha) by changing the `mock_with` option here. 33 | config.mock_with :rspec do |mocks| 34 | # Prevents you from mocking or stubbing a method that does not exist on 35 | # a real object. This is generally recommended, and will default to 36 | # `true` in RSpec 4. 37 | mocks.verify_partial_doubles = true 38 | end 39 | 40 | # This option will default to `:apply_to_host_groups` in RSpec 4 (and will 41 | # have no way to turn it off -- the option exists only for backwards 42 | # compatibility in RSpec 3). It causes shared context metadata to be 43 | # inherited by the metadata hash of host groups and examples, rather than 44 | # triggering implicit auto-inclusion in groups with matching metadata. 45 | config.shared_context_metadata_behavior = :apply_to_host_groups 46 | 47 | # This allows you to limit a spec run to individual examples or groups 48 | # you care about by tagging them with `:focus` metadata. When nothing 49 | # is tagged with `:focus`, all examples get run. RSpec also provides 50 | # aliases for `it`, `describe`, and `context` that include `:focus` 51 | # metadata: `fit`, `fdescribe` and `fcontext`, respectively. 52 | config.filter_run_when_matching :focus 53 | 54 | # Allows RSpec to persist some state between runs in order to support 55 | # the `--only-failures` and `--next-failure` CLI options. We recommend 56 | # you configure your source control system to ignore this file. 57 | config.example_status_persistence_file_path = ".rspec_status" 58 | 59 | # Limits the available syntax to the non-monkey patched syntax that is 60 | # recommended. For more details, see: 61 | # - http://rspec.info/blog/2012/06/rspecs-new-expectation-syntax/ 62 | # - http://www.teaisaweso.me/blog/2013/05/27/rspecs-new-message-expectation-syntax/ 63 | # - http://rspec.info/blog/2014/05/notable-changes-in-rspec-3/#zero-monkey-patching-mode 64 | config.disable_monkey_patching! 65 | 66 | # This setting enables warnings. It's recommended, but in some cases may 67 | # be too noisy due to issues in dependencies. 68 | config.warnings = true 69 | 70 | # Many RSpec users commonly either run the entire suite or an individual 71 | # file, and it's useful to allow more verbose output when running an 72 | # individual spec file. 73 | # if config.files_to_run.one? 74 | # Use the documentation formatter for detailed output, 75 | # unless a formatter has already been configured 76 | # (e.g. via a command-line flag). 77 | # config.default_formatter = "doc" 78 | # end 79 | 80 | # Print the 10 slowest examples and example groups at the 81 | # end of the spec run, to help surface which specs are running 82 | # particularly slow. 83 | # config.profile_examples = 10 84 | 85 | # Run specs in random order to surface order dependencies. If you find an 86 | # order dependency and want to debug it, you can fix the order by providing 87 | # the seed, which is printed after each run. 88 | # --seed 1234 89 | config.order = :random 90 | 91 | # Seed global randomization in this process using the `--seed` CLI option. 92 | # Setting this allows you to use `--seed` to deterministically reproduce 93 | # test failures related to randomization by passing the same `--seed` value 94 | # as the one that triggered the failure. 95 | Kernel.srand config.seed 96 | end 97 | 98 | require 'event_sourcery/postgres' 99 | require 'event_sourcery/rspec/event_store_shared_examples' 100 | 101 | Dir.glob(File.join(__dir__, 'support/**/*.rb')) { |f| require f } 102 | -------------------------------------------------------------------------------- /spec/event_sourcery/postgres/tracker_spec.rb: -------------------------------------------------------------------------------- 1 | RSpec.describe EventSourcery::Postgres::Tracker do 2 | subject(:postgres_tracker) { described_class.new(db_connection, table_name: table_name) } 3 | let(:table_name) { :tracker } 4 | let(:processor_name) { 'blah' } 5 | let(:table) { db_connection[table_name] } 6 | let(:track_entry) { table.where(name: processor_name).first } 7 | 8 | after do 9 | release_advisory_locks 10 | end 11 | 12 | def last_processed_event_id 13 | postgres_tracker.last_processed_event_id(processor_name) 14 | end 15 | 16 | def setup_table 17 | db_connection.execute "drop table if exists #{table_name}" 18 | postgres_tracker.setup(processor_name) 19 | end 20 | 21 | describe '#setup' do 22 | before do 23 | db_connection.execute "drop table if exists #{table_name}" 24 | end 25 | 26 | context 'auto create projector tracker enabled' do 27 | it 'creates the table' do 28 | postgres_tracker.setup(processor_name) 29 | expect(db_connection.table_exists?(table_name)).to be_truthy 30 | end 31 | 32 | it "creates an entry for the projector if it doesn't exist" do 33 | postgres_tracker.setup(processor_name) 34 | expect(last_processed_event_id).to eq 0 35 | end 36 | end 37 | 38 | context 'when table_name is a string' do 39 | let(:table_name) { 'tracker' } 40 | 41 | it 'does not raise an error' do 42 | expect { postgres_tracker.setup(processor_name) }.not_to raise_error 43 | end 44 | 45 | it 'creates the table' do 46 | postgres_tracker.setup(processor_name) 47 | expect(db_connection.table_exists?(table_name)).to be_truthy 48 | end 49 | end 50 | 51 | context 'auto create projector tracker disabled' do 52 | before do 53 | allow(EventSourcery::Postgres.config).to receive(:auto_create_projector_tracker).and_return(false) 54 | end 55 | 56 | it 'raises error' do 57 | expect { postgres_tracker.setup(processor_name) }.to raise_error EventSourcery::UnableToLockProcessorError 58 | end 59 | end 60 | end 61 | 62 | describe '#processed_event' do 63 | before { setup_table } 64 | 65 | it 'updates the tracker entry to the given ID' do 66 | postgres_tracker.processed_event(processor_name, 1) 67 | expect(last_processed_event_id).to eq 1 68 | end 69 | end 70 | 71 | describe '#processing_event' do 72 | before { setup_table } 73 | 74 | context 'when the block succeeds' do 75 | it 'marks the event as processed' do 76 | postgres_tracker.processing_event(processor_name, 1) do 77 | end 78 | expect(last_processed_event_id).to eq 1 79 | end 80 | end 81 | 82 | context 'when the block raises' do 83 | it "doesn't mark the event as processed and raises an error" do 84 | expect(last_processed_event_id).to eq 0 85 | expect do 86 | postgres_tracker.processing_event(processor_name, 1) do 87 | raise 'boo' 88 | end 89 | end.to raise_error(RuntimeError) 90 | expect(last_processed_event_id).to eq 0 91 | end 92 | end 93 | 94 | context 'unable to lock tracker row' do 95 | let(:another_database_connection) { new_db_connection } 96 | 97 | it 'raises an error' do 98 | expect do 99 | tracker = described_class.new(another_database_connection, table_name: table_name) 100 | tracker.setup(processor_name) 101 | end.to raise_error(EventSourcery::UnableToLockProcessorError) 102 | end 103 | 104 | context 'with obtain_processor_lock: false' do 105 | it "doesn't raises an error" do 106 | expect do 107 | tracker = described_class.new(another_database_connection, table_name: table_name, obtain_processor_lock: false) 108 | tracker.setup(processor_name) 109 | end.to_not raise_error 110 | end 111 | end 112 | 113 | after { release_advisory_locks(another_database_connection) } 114 | end 115 | end 116 | 117 | describe '#last_processed_event_id' do 118 | before { setup_table } 119 | 120 | it 'starts at 0' do 121 | expect(last_processed_event_id).to eq 0 122 | end 123 | 124 | it 'updates as events are processed' do 125 | postgres_tracker.processed_event(processor_name, 1) 126 | expect(last_processed_event_id).to eq 1 127 | end 128 | end 129 | 130 | describe '#reset_last_processed_event_id' do 131 | before { setup_table } 132 | 133 | it 'resets the last processed event back to 0' do 134 | postgres_tracker.processed_event(processor_name, 1) 135 | postgres_tracker.reset_last_processed_event_id(processor_name) 136 | expect(last_processed_event_id).to eq 0 137 | end 138 | end 139 | 140 | describe '#tracked_processors' do 141 | before do 142 | db_connection.execute "drop table if exists #{table_name}" 143 | postgres_tracker.setup 144 | end 145 | 146 | context 'with two tracked processors' do 147 | before do 148 | postgres_tracker.setup(:one) 149 | postgres_tracker.setup(:two) 150 | end 151 | 152 | it 'returns an array of tracked processors' do 153 | expect(postgres_tracker.tracked_processors).to eq ['one', 'two'] 154 | end 155 | end 156 | 157 | context 'with no tracked processors' do 158 | it 'returns an empty array' do 159 | expect(postgres_tracker.tracked_processors).to eq [] 160 | end 161 | end 162 | end 163 | end 164 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Change Log 2 | 3 | All notable changes to this project will be documented in this file. 4 | 5 | The format is based on [Keep a Changelog](http://keepachangelog.com/) 6 | and this project adheres to [Semantic Versioning](http://semver.org/). 7 | 8 | ## [Unreleased] 9 | ## [0.9.1] - 2022-01-20 10 | 11 | ### Changed 12 | 13 | - Removed the use of `**` for the private `EventStore#build_event` 14 | method signature and places that call it. 15 | 16 | ## [0.9.0] - 2021-11-18 17 | 18 | ### Added 19 | 20 | - Test against Ruby 3.0 in the CI build ([#67]). 21 | 22 | ### Changed 23 | 24 | - Use GitHub Actions for the CI build instead of Travis CI ([#66]). 25 | - This project now uses `main` as its default branch ([#68]). 26 | - Documentation updated to refer to `main` and links updated accordingly. 27 | 28 | ### Removed 29 | - Remove Ruby 2.3, 2.4 and 2.5 from the CI test matrix ([#70]). 30 | 31 | [#66]: https://github.com/envato/event_sourcery-postgres/pull/66 32 | [#67]: https://github.com/envato/event_sourcery-postgres/pull/67 33 | [#68]: https://github.com/envato/event_sourcery-postgres/pull/68 34 | [#70]: https://github.com/envato/event_sourcery-postgres/pull/70 35 | 36 | ## [0.8.1] - 2020-10-02 37 | ### Added 38 | - Add Ruby 2.6 and 2.7 to the CI test matrix. 39 | 40 | ### Removed 41 | - Remove Ruby 2.2 from the CI test matrix. 42 | - Support for Boxen. 43 | 44 | ### Fixed 45 | - Upgrade development dependency Rake to version 13. This resolves 46 | [CVE-2020-8130](https://github.com/advisories/GHSA-jppv-gw3r-w3q8). 47 | 48 | - Resolve warnings raised when running on Ruby 2.7. 49 | 50 | ## [0.8.0] - 2018-08-06 51 | ### Added 52 | - Add a `on_events_recorded` config option, that defaults to a no-op proc, \ 53 | to handle any app specific logic after the events are recoded on `EventStore#sink` 54 | 55 | ## [0.7.0] - 2018-05-23 56 | ### Added 57 | - Add a `projector_transaction_size` config option to control how many events 58 | are processed before the transaction is commited. The default value is 1 to 59 | match the existing behavour. 60 | 61 | We suggest setting this to match the number of events returned from the event 62 | store subscription. This is [now configurable](https://github.com/envato/event_sourcery/pull/197) 63 | in event_sourcery by configuring `subscription_batch_size`. 64 | 65 | ### Removed 66 | - Remove upper bound version restriction on `sequel` gem. Now accepts versions 67 | 5 and higher. 68 | 69 | ## [0.6.0] - 2018-01-02 70 | ### Changed 71 | 72 | - Only send info log after processing a group of events 73 | 74 | ### Removed 75 | - Remove `processes_events` and `projects_events` as these have been [removed 76 | in event_sourcery](https://github.com/envato/event_sourcery/pull/161). 77 | 78 | ## [0.5.0] - 2017-07-27 79 | - First Version of YARD documentation. 80 | - Fix Sequel deprecation by globally loading pg extensions 81 | 82 | ## [0.4.0] - 2017-06-21 83 | ### Changed 84 | - Reactors store the UUID of the event being processed in the `causation_id` 85 | of any emitted events. This replaces the old behaviour of storing id of the 86 | event being processed in a `_driven_by_event_id` attribute in the emitted 87 | event's body. 88 | 89 | ### Added 90 | - Reactors store the correlation id of the event being processed in the 91 | `correlation_id` of any emitted events. 92 | - Added index on the `events` table for `correlation_id` and `causation_id` 93 | columns. 94 | 95 | ## [0.3.0] - 2017-06-16 96 | ### Changed 97 | - The event store persists the event `correlation_id` and `causation_id`. 98 | To facilitate this `correlation_id` and `causation_id` columns have been 99 | added to the `events` table and the `write_events` function has been 100 | altered. Event Sourcery apps will need to ensure these DB changes have 101 | been applied to use this version of Event Sourcery. 102 | - The emit_events method now accepts typed events instead of symbols 103 | - Remove dynamic emit events methods from reactors (e.g. emit_item_added) 104 | 105 | ## [0.2.0] - 2017-06-01 106 | ### Changed 107 | - Make `EventSourcery::Postgres::OptimisedEventPollWaiter#shutdown` private 108 | - Updated `EventSourcery::Postgres::OptimisedEventPollWaiter#poll` to ensure that `#shutdown!` is run when an error is raised 109 | or when the loop stops 110 | 111 | ### Added 112 | - Configure projector tracker table name via `EventSourcery::Postgres.configure` 113 | 114 | ## 0.1.0 - 2017-05-26 115 | ### Changed 116 | - Imported code from the [event_sourcery](https://github.com/envato/event_sourcery). 117 | - Postgres related configuration is through `EventSourcery::Postgres.configure` 118 | instead of `EventSourcery.configure`. 119 | 120 | [Unreleased]: https://github.com/envato/event_sourcery-postgres/compare/v0.9.1...HEAD 121 | [0.9.1]: https://github.com/envato/event_sourcery-postgres/compare/v0.9.0...v0.9.1 122 | [0.9.0]: https://github.com/envato/event_sourcery-postgres/compare/v0.8.1...v0.9.0 123 | [0.8.1]: https://github.com/envato/event_sourcery-postgres/compare/v0.8.0...v0.8.1 124 | [0.8.0]: https://github.com/envato/event_sourcery-postgres/compare/v0.7.0...v0.8.0 125 | [0.7.0]: https://github.com/envato/event_sourcery-postgres/compare/v0.6.0...v0.7.0 126 | [0.6.0]: https://github.com/envato/event_sourcery-postgres/compare/v0.5.0...v0.6.0 127 | [0.5.0]: https://github.com/envato/event_sourcery-postgres/compare/v0.4.0...v0.5.0 128 | [0.4.0]: https://github.com/envato/event_sourcery-postgres/compare/v0.3.0...v0.4.0 129 | [0.3.0]: https://github.com/envato/event_sourcery-postgres/compare/v0.2.0...v0.3.0 130 | [0.2.0]: https://github.com/envato/event_sourcery-postgres/compare/v0.1.0...v0.2.0 131 | -------------------------------------------------------------------------------- /spec/event_sourcery/postgres/table_owner_spec.rb: -------------------------------------------------------------------------------- 1 | RSpec.describe EventSourcery::Postgres::TableOwner do 2 | let(:table_owner_class) do 3 | Class.new do 4 | prepend EventSourcery::Postgres::TableOwner 5 | 6 | def initialize(db_connection) 7 | @db_connection = db_connection 8 | end 9 | 10 | table :sales do 11 | column :uuid, 'UUID' 12 | end 13 | end 14 | end 15 | 16 | subject(:table_owner) { table_owner_class.new(db_connection) } 17 | 18 | after { release_advisory_locks } 19 | 20 | describe '#setup' do 21 | before do 22 | db_connection.execute('DROP TABLE IF EXISTS sales') 23 | end 24 | 25 | it 'creates the defined table' do 26 | table_owner.setup 27 | expect(db_connection[:sales].count).to eq 0 28 | end 29 | 30 | context 'with table_prefix set' do 31 | before do 32 | db_connection.execute('DROP TABLE IF EXISTS my_prefix_sales') 33 | table_owner.send(:table_prefix=, :my_prefix) 34 | end 35 | 36 | it 'creates the defined table' do 37 | table_owner.setup 38 | expect(db_connection[:my_prefix_sales].count).to eq 0 39 | end 40 | end 41 | end 42 | 43 | describe '#reset' do 44 | context 'without dependent tables defined' do 45 | before do 46 | db_connection.execute('DROP TABLE IF EXISTS sales') 47 | table_owner.setup 48 | db_connection[:sales].insert(uuid: SecureRandom.uuid) 49 | end 50 | 51 | it 'recreates tables' do 52 | expect(db_connection[:sales].count).to eq 1 53 | table_owner.reset 54 | expect(db_connection[:sales].count).to eq 0 55 | end 56 | end 57 | 58 | context 'with dependent tables defined' do 59 | let(:table_owner_class) do 60 | Class.new do 61 | prepend EventSourcery::Postgres::TableOwner 62 | 63 | def initialize(db_connection) 64 | @db_connection = db_connection 65 | end 66 | 67 | table :authors do 68 | primary_key :id, type: :Integer 69 | column :uuid, 'UUID' 70 | end 71 | 72 | table :items do 73 | foreign_key :authors_id, :authors 74 | column :created_at, 'timestamp without time zone' 75 | end 76 | end 77 | end 78 | 79 | it 'recreates tables' do 80 | db_connection.execute('DROP TABLE IF EXISTS items') 81 | db_connection.execute('DROP TABLE IF EXISTS authors') 82 | table_owner.setup 83 | db_connection[:authors].insert(id: 1, uuid: SecureRandom.uuid) 84 | db_connection[:items].insert(authors_id: 1, created_at: Time.now) 85 | expect(db_connection[:authors].count).to eq 1 86 | expect(db_connection[:items].count).to eq 1 87 | table_owner.reset 88 | expect(db_connection[:authors].count).to eq 0 89 | expect(db_connection[:items].count).to eq 0 90 | end 91 | end 92 | 93 | context 'with table_prefix set' do 94 | before do 95 | db_connection.execute('DROP TABLE IF EXISTS my_prefix_sales') 96 | table_owner.send(:table_prefix=, :my_prefix) 97 | table_owner.setup 98 | db_connection[:my_prefix_sales].insert(uuid: SecureRandom.uuid) 99 | end 100 | 101 | it 'recreates tables' do 102 | expect(db_connection[:my_prefix_sales].count).to eq 1 103 | table_owner.reset 104 | expect(db_connection[:my_prefix_sales].count).to eq 0 105 | end 106 | end 107 | end 108 | 109 | describe '#table' do 110 | context 'when one table is defined' do 111 | context 'with no arguments' do 112 | it 'returns a dataset' do 113 | expect(table_owner.send(:table)).to be_a Sequel::Postgres::Dataset 114 | end 115 | end 116 | 117 | context 'with the defined table as argument' do 118 | it 'returns a dataset' do 119 | expect(table_owner.send(:table, :sales)).to be_a Sequel::Postgres::Dataset 120 | end 121 | end 122 | 123 | context 'with the wrong name as argument' do 124 | it 'raises an error' do 125 | expect { table_owner.send(:table, :some_non_existent_table) }.to raise_error(EventSourcery::Postgres::TableOwner::NoSuchTableError) 126 | end 127 | end 128 | 129 | context 'when table_prefix is set' do 130 | before do 131 | table_owner.send(:table_prefix=, :my_prefix) 132 | end 133 | 134 | it 'returns a dataset with the prefixed name' do 135 | expect(table_owner.send(:table, :sales).opts[:from]).to eq([:my_prefix_sales]) 136 | end 137 | end 138 | end 139 | 140 | context 'when multiple tables are defined' do 141 | let(:table_owner_class) do 142 | Class.new do 143 | prepend EventSourcery::Postgres::TableOwner 144 | 145 | def initialize(db_connection) 146 | @db_connection = db_connection 147 | end 148 | 149 | table :sales do 150 | column :uuid, 'UUID' 151 | end 152 | 153 | table :invoices do 154 | column :uuid, 'UUID' 155 | end 156 | end 157 | end 158 | 159 | context 'with no arguments' do 160 | it 'raises an error' do 161 | expect { table_owner.send(:table) }.to raise_error(EventSourcery::Postgres::TableOwner::DefaultTableError) 162 | end 163 | end 164 | 165 | context 'with one of the the defined tables as argument' do 166 | it 'returns a dataset' do 167 | expect(table_owner.send(:table, :invoices)).to be_a Sequel::Postgres::Dataset 168 | end 169 | end 170 | 171 | context 'with the wrong name as argument' do 172 | it 'raises an error' do 173 | expect { table_owner.send(:table, :some_non_existent_table) }.to raise_error(EventSourcery::Postgres::TableOwner::NoSuchTableError) 174 | end 175 | end 176 | end 177 | end 178 | end 179 | -------------------------------------------------------------------------------- /spec/event_sourcery/postgres/projector_spec.rb: -------------------------------------------------------------------------------- 1 | RSpec.describe EventSourcery::Postgres::Projector do 2 | let(:projector_class) do 3 | Class.new do 4 | include EventSourcery::Postgres::Projector 5 | processor_name 'test_processor' 6 | 7 | table :profiles do 8 | column :user_uuid, 'UUID NOT NULL' 9 | column :terms_accepted, 'BOOLEAN DEFAULT FALSE' 10 | end 11 | 12 | process TermsAccepted do |event| 13 | @processed_event = event 14 | table.insert(user_uuid: event.aggregate_id, 15 | terms_accepted: true) 16 | end 17 | 18 | attr_reader :processed_event 19 | end 20 | end 21 | let(:projector_name) { 'my_projector' } 22 | let(:tracker) { EventSourcery::Postgres::Tracker.new(db_connection) } 23 | let(:events) { [] } 24 | def new_projector(&block) 25 | Class.new do 26 | include EventSourcery::Postgres::Projector 27 | processor_name 'test_processor' 28 | 29 | table :profiles do 30 | column :user_uuid, 'UUID NOT NULL' 31 | column :terms_accepted, 'BOOLEAN DEFAULT FALSE' 32 | end 33 | 34 | class_eval(&block) if block_given? 35 | 36 | attr_reader :processed_event 37 | end.new(tracker: tracker, db_connection: db_connection) 38 | end 39 | 40 | let(:projector_transaction_size) { 1 } 41 | subject(:projector) do 42 | projector_class.new( 43 | tracker: tracker, 44 | db_connection: db_connection, 45 | transaction_size: projector_transaction_size, 46 | ) 47 | end 48 | let(:aggregate_id) { SecureRandom.uuid } 49 | 50 | after { release_advisory_locks } 51 | 52 | describe '.new' do 53 | let(:projections_database) { double } 54 | let(:event_tracker) { double } 55 | 56 | before do 57 | allow(EventSourcery::Postgres::Tracker).to receive(:new).with(projections_database).and_return(event_tracker) 58 | allow(projections_database).to receive(:extension).with(:pg_json) 59 | 60 | EventSourcery::Postgres.configure do |config| 61 | config.projections_database = projections_database 62 | end 63 | end 64 | 65 | subject(:projector) { projector_class.new } 66 | 67 | it 'uses the configured projections database by default' do 68 | expect(projector.instance_variable_get('@db_connection')).to eq projections_database 69 | end 70 | 71 | it 'uses the inferred event tracker database by default' do 72 | expect(projector.instance_variable_get('@tracker')).to eq event_tracker 73 | end 74 | end 75 | 76 | describe '.projector_name' do 77 | it 'delegates to processor_name' do 78 | expect(projector_class.projector_name).to eq 'test_processor' 79 | end 80 | end 81 | 82 | describe '#project' do 83 | let(:event) { ItemAdded.new } 84 | 85 | it 'processes events with custom classes' do 86 | projector = new_projector do 87 | project ItemAdded do |event| 88 | @processed_event = event 89 | end 90 | end 91 | 92 | projector.project(event) 93 | 94 | expect(projector.processed_event).to eq(event) 95 | end 96 | end 97 | 98 | describe '#process' do 99 | before { projector.reset } 100 | 101 | let(:event) { EventSourcery::Event.new(body: {}, aggregate_id: aggregate_id, type: :terms_accepted, id: 1) } 102 | 103 | it "processes events it's interested in" do 104 | projector.process(event) 105 | expect(projector.processed_event).to eq(event) 106 | end 107 | end 108 | 109 | describe '#subscribe_to' do 110 | let(:event_store) { double(:event_store) } 111 | let(:events) { [new_event(id: 1), new_event(id: 2)] } 112 | let(:subscription_master) { spy(EventSourcery::EventStore::SignalHandlingSubscriptionMaster) } 113 | let(:projector_class) do 114 | Class.new do 115 | include EventSourcery::Postgres::Projector 116 | processor_name 'test_processor' 117 | 118 | table :profiles do 119 | column :user_uuid, 'UUID NOT NULL' 120 | column :terms_accepted, 'BOOLEAN DEFAULT FALSE' 121 | end 122 | 123 | attr_accessor :raise_error, :raise_error_on_event_id 124 | 125 | process do |event| 126 | table.insert(user_uuid: event.aggregate_id, 127 | terms_accepted: true) 128 | raise 'boo' if raise_error || raise_error_on_event_id == event.id 129 | end 130 | end 131 | end 132 | 133 | before do 134 | allow(event_store).to receive(:subscribe).and_yield(events).once 135 | projector.reset 136 | end 137 | 138 | it 'marks the safe shutdown points' do 139 | projector.subscribe_to(event_store, subscription_master: subscription_master) 140 | expect(subscription_master).to have_received(:shutdown_if_requested).twice 141 | end 142 | 143 | context 'when an error occurs processing the event' do 144 | it 'rolls back the projected changes' do 145 | projector.raise_error = true 146 | projector.subscribe_to(event_store, subscription_master: subscription_master) rescue nil 147 | expect(db_connection[:profiles].count).to eq 0 148 | end 149 | end 150 | 151 | context 'with a transaction size of 1' do 152 | it 'rolls back the projected changes for the single event' do 153 | projector.raise_error_on_event_id = 2 154 | projector.subscribe_to(event_store, subscription_master: subscription_master) rescue nil 155 | expect(db_connection[:profiles].count).to eq 1 156 | end 157 | end 158 | 159 | context 'with a transaction size of 2' do 160 | let(:projector_transaction_size) { 2 } 161 | 162 | it 'rolls back the projected changes for both events' do 163 | projector.raise_error_on_event_id = 2 164 | projector.subscribe_to(event_store, subscription_master: subscription_master) rescue nil 165 | expect(db_connection[:profiles].count).to eq 0 166 | end 167 | end 168 | 169 | context 'when an error occurs tracking the position' do 170 | before do 171 | projector.raise_error = false 172 | allow(tracker).to receive(:processed_event).and_raise(StandardError) 173 | end 174 | 175 | it 'rolls back the projected changes' do 176 | projector.subscribe_to(event_store, subscription_master: subscription_master) rescue nil 177 | expect(db_connection[:profiles].count).to eq 0 178 | end 179 | end 180 | 181 | describe 'logging' do 182 | it 'logs debug messages for each processed event' do 183 | expect(EventSourcery.logger).to receive(:debug) do |&block| 184 | expect(block.call).to eq "[test_processor] Processed event: #{events[0].inspect}" 185 | end 186 | expect(EventSourcery.logger).to receive(:debug) do |&block| 187 | expect(block.call).to eq "[test_processor] Processed event: #{events[1].inspect}" 188 | end 189 | 190 | projector.subscribe_to(event_store, subscription_master: subscription_master) 191 | end 192 | 193 | it 'logs an info message with the id of the last processed event' do 194 | expect(EventSourcery.logger).to receive(:info) do |&block| 195 | expect(block.call).to eq "[test_processor] Processed up to event id: 2" 196 | end 197 | 198 | projector.subscribe_to(event_store, subscription_master: subscription_master) 199 | end 200 | end 201 | end 202 | end 203 | -------------------------------------------------------------------------------- /lib/event_sourcery/postgres/event_store.rb: -------------------------------------------------------------------------------- 1 | module EventSourcery 2 | module Postgres 3 | class EventStore 4 | include EventSourcery::EventStore::EachByRange 5 | 6 | def initialize(db_connection, 7 | events_table_name: EventSourcery::Postgres.config.events_table_name, 8 | lock_table: EventSourcery::Postgres.config.lock_table_to_guarantee_linear_sequence_id_growth, 9 | write_events_function_name: EventSourcery::Postgres.config.write_events_function_name, 10 | event_builder: EventSourcery.config.event_builder, 11 | on_events_recorded: EventSourcery::Postgres.config.on_events_recorded) 12 | @db_connection = db_connection 13 | @events_table_name = events_table_name 14 | @write_events_function_name = write_events_function_name 15 | @lock_table = lock_table 16 | @event_builder = event_builder 17 | @on_events_recorded = on_events_recorded 18 | end 19 | 20 | # Like water flowing into a sink eventually it will go down the drain 21 | # into the goodness of the plumbing system. 22 | # So to will the given events you put in this 'sink'. Except the plumbing 23 | # system is the data base events table. 24 | # This can raise db connection errors. 25 | # 26 | # @param event_or_events the event or events to save 27 | # @param expected_version the version to save with the event, default nil 28 | # 29 | # @raise [DatabaseError] if something goes wrong with the database 30 | # @raise [ConcurrencyError] if there was a concurrency conflict 31 | def sink(event_or_events, expected_version: nil) 32 | events = Array(event_or_events) 33 | aggregate_ids = events.map(&:aggregate_id).uniq 34 | raise AtomicWriteToMultipleAggregatesNotSupported unless aggregate_ids.count == 1 35 | sql = write_events_sql(aggregate_ids.first, events, expected_version) 36 | @db_connection.run(sql) 37 | log_events_saved(events) 38 | on_events_recorded.call(events) 39 | true 40 | rescue Sequel::DatabaseError => e 41 | if e.message =~ /Concurrency conflict/ 42 | raise ConcurrencyError, "expected version was not #{expected_version}. Error: #{e.message}" 43 | else 44 | raise 45 | end 46 | end 47 | 48 | # Get the next set of events from the given event id. You can 49 | # specify event types and a limit. 50 | # Default limit is 1000 and the default event types will be all. 51 | # 52 | # @param id the event id to get next events from 53 | # @param event_types the event types to filter, default nil = all 54 | # @param limit the limit to the results, default 1000 55 | # 56 | # @return [Array] array of found events 57 | def get_next_from(id, event_types: nil, limit: 1000) 58 | query = events_table. 59 | order(:id). 60 | where(Sequel.lit('id >= ?', id)). 61 | limit(limit) 62 | query = query.where(type: event_types) if event_types 63 | query.map { |event_row| build_event(event_row) } 64 | end 65 | 66 | # Get last event id for a given event types. 67 | # 68 | # @param event_types the type of event(s) to filter 69 | # 70 | # @return the latest event id 71 | def latest_event_id(event_types: nil) 72 | latest_event = events_table 73 | latest_event = latest_event.where(type: event_types) if event_types 74 | latest_event = latest_event.order(:id).last 75 | if latest_event 76 | latest_event[:id] 77 | else 78 | 0 79 | end 80 | end 81 | 82 | # Get the events for a given aggregate id. 83 | # 84 | # @param aggregate_id the aggregate id to filter for 85 | # 86 | # @return [Array] of found events 87 | def get_events_for_aggregate_id(aggregate_id) 88 | events_table.where(aggregate_id: aggregate_id.to_str).order(:version).map do |event_hash| 89 | build_event(event_hash) 90 | end 91 | end 92 | 93 | # Subscribe to events. 94 | # 95 | # @param from_id subscribe from a starting event id. default will be from the start. 96 | # @param event_types the event_types to subscribe to, default all. 97 | # @param after_listen the after listen call back block. default nil. 98 | # @param subscription_master the subscription master block 99 | def subscribe(from_id:, event_types: nil, after_listen: nil, subscription_master:, &block) 100 | poll_waiter = OptimisedEventPollWaiter.new(db_connection: @db_connection, after_listen: after_listen) 101 | args = { 102 | poll_waiter: poll_waiter, 103 | event_store: self, 104 | from_event_id: from_id, 105 | event_types: event_types, 106 | events_table_name: @events_table_name, 107 | subscription_master: subscription_master, 108 | on_new_events: block 109 | } 110 | EventSourcery::EventStore::Subscription.new(**args).tap(&:start) 111 | end 112 | 113 | private 114 | 115 | attr_reader :on_events_recorded 116 | 117 | def events_table 118 | @db_connection[@events_table_name] 119 | end 120 | 121 | def build_event(data) 122 | @event_builder.build(**data) 123 | end 124 | 125 | def write_events_sql(aggregate_id, events, expected_version) 126 | bodies = sql_literal_array(events, 'json', &:body) 127 | types = sql_literal_array(events, 'varchar', &:type) 128 | created_ats = sql_literal_array(events, 'timestamp without time zone', &:created_at) 129 | event_uuids = sql_literal_array(events, 'uuid', &:uuid) 130 | correlation_ids = sql_literal_array(events, 'uuid', &:correlation_id) 131 | causation_ids = sql_literal_array(events, 'uuid', &:causation_id) 132 | <<-SQL 133 | select #{@write_events_function_name}( 134 | #{sql_literal(aggregate_id.to_str, 'uuid')}, 135 | #{types}, 136 | #{sql_literal(expected_version, 'int')}, 137 | #{bodies}, 138 | #{created_ats}, 139 | #{event_uuids}, 140 | #{correlation_ids}, 141 | #{causation_ids}, 142 | #{sql_literal(@lock_table, 'boolean')} 143 | ); 144 | SQL 145 | end 146 | 147 | def sql_literal_array(events, type, &block) 148 | sql_array = events.map do |event| 149 | to_sql_literal(block.call(event)) 150 | end.join(', ') 151 | "array[#{sql_array}]::#{type}[]" 152 | end 153 | 154 | def sql_literal(value, type) 155 | "#{to_sql_literal(value)}::#{type}" 156 | end 157 | 158 | def to_sql_literal(value) 159 | return 'null' unless value 160 | wrapped_value = if Time === value 161 | value.iso8601(6) 162 | elsif Hash === value 163 | Sequel.pg_json(value) 164 | else 165 | value 166 | end 167 | @db_connection.literal(wrapped_value) 168 | end 169 | 170 | def log_events_saved(events) 171 | events.each do |event| 172 | EventSourcery.logger.debug { "Saved event: #{event.inspect}" } 173 | end 174 | end 175 | end 176 | end 177 | end 178 | -------------------------------------------------------------------------------- /script/demonstrate_event_sequence_id_gaps.rb: -------------------------------------------------------------------------------- 1 | # Demonstrates that sequence IDs may not be inserted linearly with concurrent 2 | # writers. 3 | # 4 | # This script writes events in parallel from a number of forked processes, 5 | # writing events in a continious loop until the program is interrupted. 6 | # The parent process detects gaps in sequence IDs by selecting the last 2 7 | # events based on sequence ID. A gap is detected when the 2 IDs returned from 8 | # that query aren't sequential. The script will proceed to execute 2 subsequent 9 | # queries to see if they show up in the time it takes to complete those before 10 | # moving on. 11 | # 12 | # An easier way to demonstrate this is by using 2 psql consoles: 13 | # 14 | # - Simulate a transaction taking a long time to commit: 15 | # ``` 16 | # begin; 17 | # insert into events (..) values (..); 18 | # ``` 19 | # - Then, in another console: 20 | # ``` 21 | # insert into events (..) values (..); 22 | # select * from events; 23 | # ``` 24 | # 25 | # The result is that event sequence ID 2 is visible, but only when the first 26 | # transaction commits is event sequence ID 1 visible. 27 | # 28 | # Why does this happen? 29 | # 30 | # Sequences in Postgres (and most other DBs) are not transactional, changes 31 | # to the sequence are visible to other transactions immediately. Also, inserts 32 | # from the forked writers may be executed in parallel by postgres. 33 | # 34 | # The process of inserting into a table that has a sequence or serial column is 35 | # to first get the next sequence ID (changing global state), then perform the 36 | # insert statement and later commit. In between these 2 steps the sequence ID 37 | # is taken but not visible in the table until the insert statement is 38 | # committed. Gaps in sequence IDs occur when a process takes a sequence ID and 39 | # commits it while another process is in between those 2 steps. 40 | # 41 | # This means another transaction could have taken the next sequence 42 | # ID and committed before that one commits, resulting in a gap in sequence ID's 43 | # when reading. 44 | # 45 | # Why is this a problem? 46 | # 47 | # Event stream processors use the sequence ID to keep track of where they're up to 48 | # in the events table. If a projector processes an event with sequence ID n, it 49 | # assumes that the next event it needs to process will have a sequence ID > n. 50 | # This approach isn't reliable when sequence IDs appear non-linearly, making it 51 | # possible for event stream processors to skip over events. 52 | # 53 | # How does EventSourcery deal with this? 54 | # 55 | # EventSourcery uses n transaction level advisory lock to synchronise inserts 56 | # to the events table within the writeEvents function. Alternatives: 57 | # 58 | # - Write events from 1 process only (serialize at the application level) 59 | # - Detect gaps when reading events and allow time for in-flight transactions 60 | # (the gaps) to commit. 61 | # - Built in eventual consistency. Selects would be restricted to events older 62 | # than 500ms-1s or the transaction timeout to give enough time for in-flight 63 | # transactions to commit. 64 | # - Only query events when catching up, after that rely on events to be 65 | # delivered through the pub/sub mechanism. Given events would be received out 66 | # of order under concurrent writes there's potential for processors to process 67 | # a given event twice if they shutdown after processing a sequence that was 68 | # part of a gap. 69 | # 70 | # Usage: 71 | # 72 | # ❯ bundle exec ruby script/demonstrate_event_sequence_id_gaps.rb 73 | # 89847: starting to write events89846: starting to write events 74 | 75 | # 89848: starting to write events 76 | # 89849: starting to write events 77 | # 89850: starting to write events 78 | # GAP: 1 missing sequence IDs. 78 != 76 + 1. Missing events showed up after 1 subsequent query. IDs: [77] 79 | # GAP: 1 missing sequence IDs. 168 != 166 + 1. Missing events showed up after 1 subsequent query. IDs: [167] 80 | # GAP: 1 missing sequence IDs. 274 != 272 + 1. Missing events showed up after 1 subsequent query. IDs: [273] 81 | # GAP: 1 missing sequence IDs. 341 != 339 + 1. Missing events showed up after 1 subsequent query. IDs: [340] 82 | # GAP: 1 missing sequence IDs. 461 != 459 + 1. Missing events showed up after 1 subsequent query. IDs: [460] 83 | # GAP: 1 missing sequence IDs. 493 != 491 + 1. Missing events showed up after 1 subsequent query. IDs: [492] 84 | # GAP: 2 missing sequence IDs. 621 != 618 + 1. Missing events showed up after 1 subsequent query. IDs: [619, 620] 85 | 86 | require 'sequel' 87 | require 'securerandom' 88 | require 'event_sourcery/postgres' 89 | 90 | def connect 91 | pg_uri = ENV.fetch('POSTGRESQL_URL', 'postgres://127.0.0.1:5432/').dup 92 | pg_uri << 'event_sourcery_test' 93 | Sequel.connect(pg_uri) 94 | end 95 | 96 | EventSourcery.logger.level = :info 97 | 98 | def new_event 99 | EventSourcery::Event.new(type: :item_added, 100 | aggregate_id: SecureRandom.uuid, 101 | body: { 'something' => 'simple' }) 102 | end 103 | 104 | def create_events_schema(db) 105 | db.execute 'drop table if exists events' 106 | db.execute 'drop table if exists aggregates' 107 | EventSourcery::Postgres::Schema.create_event_store(db: db) 108 | end 109 | 110 | db = connect 111 | create_events_schema(db) 112 | db.disconnect 113 | sleep 0.3 114 | 115 | NUM_WRITER_PROCESSES = 5 116 | NUM_WRITER_PROCESSES.times do 117 | fork do |pid| 118 | stop = false 119 | Signal.trap(:INT) { stop = true } 120 | db = connect 121 | # when lock_table is set to true an advisory lock is used to synchronise 122 | # inserts and no gaps are detected 123 | event_store = EventSourcery::Postgres::EventStore.new(db, lock_table: false) 124 | puts "#{Process.pid}: starting to write events" 125 | event_store.sink(new_event) until stop 126 | end 127 | end 128 | 129 | stop = false 130 | Signal.trap(:INT) { stop = true } 131 | 132 | def wait_for_missing_ids(db, first_sequence, last_sequence, attempt: 1) 133 | missing_ids = db[:events].where(Sequel.lit('id > ? AND id < ?', first_sequence, last_sequence)).order(:id).map {|e| e[:id] } 134 | expected_missing_ids = (first_sequence+1)..(last_sequence-1) 135 | if missing_ids == expected_missing_ids.to_a 136 | print "Missing events showed up after #{attempt} subsequent query. IDs: #{missing_ids}" 137 | else 138 | if attempt < 2 139 | wait_for_missing_ids(db, first_sequence, last_sequence, attempt: attempt + 1) 140 | else 141 | print "Missing events didn't show up after #{attempt} subsequent queries" 142 | end 143 | end 144 | end 145 | 146 | until stop 147 | 148 | # query for the last 2 sequences in the events table 149 | first_sequence, last_sequence = *db[:events]. 150 | order(Sequel.desc(:id)). 151 | select(:id). 152 | limit(2). 153 | map { |e| e[:id] }. 154 | reverse 155 | 156 | next if first_sequence.nil? || last_sequence.nil? 157 | 158 | if last_sequence != first_sequence + 1 159 | num_missing = last_sequence - first_sequence - 1 160 | print "GAP: #{num_missing} missing sequence IDs. #{last_sequence} != #{first_sequence} + 1. " 161 | wait_for_missing_ids(db, first_sequence, last_sequence) 162 | puts 163 | end 164 | end 165 | 166 | Process.waitall 167 | 168 | puts 169 | puts 'Looking for gaps in sequence IDs in events table:' 170 | ids = db[:events].select(:id).order(:id).all.map { |e| e[:id] } 171 | expected_ids = (ids.min..ids.max).to_a 172 | missing_ids = (expected_ids - ids) 173 | if missing_ids.empty? 174 | puts 'No remaining gaps' 175 | else 176 | missing_ids.each do |id| 177 | puts "Unable to find row with sequence ID #{id}" 178 | end 179 | end 180 | -------------------------------------------------------------------------------- /lib/event_sourcery/postgres/schema.rb: -------------------------------------------------------------------------------- 1 | module EventSourcery 2 | module Postgres 3 | module Schema 4 | module_function 5 | 6 | # This will create the event store tables and functions 7 | # (event, aggregates, tracker and create or update functions) 8 | # for the given Postgres database. 9 | # The default will be the one specified in the config. 10 | # 11 | # @param db the Postgres database to use 12 | def create_event_store(db: EventSourcery::Postgres.config.event_store_database, 13 | events_table_name: EventSourcery::Postgres.config.events_table_name, 14 | aggregates_table_name: EventSourcery::Postgres.config.aggregates_table_name, 15 | write_events_function_name: EventSourcery::Postgres.config.write_events_function_name) 16 | create_events(db: db, table_name: events_table_name) 17 | create_aggregates(db: db, table_name: aggregates_table_name) 18 | create_or_update_functions(db: db, events_table_name: events_table_name, function_name: write_events_function_name, aggregates_table_name: aggregates_table_name) 19 | end 20 | 21 | # Create the events table. Needs the database and the table name. 22 | # The defaults will be whats specified in config. 23 | # 24 | # @param db the Postgres database to use 25 | # @param table_name the name of the events table 26 | def create_events(db: EventSourcery::Postgres.config.event_store_database, 27 | table_name: EventSourcery::Postgres.config.events_table_name) 28 | db.run 'CREATE EXTENSION IF NOT EXISTS "uuid-ossp"' 29 | db.create_table(table_name) do 30 | primary_key :id, type: :Bignum 31 | column :uuid, :uuid, null: false, default: Sequel.lit('uuid_generate_v4()') 32 | column :aggregate_id, :uuid, null: false 33 | column :type, :varchar, null: false, size: 255 34 | column :body, :json, null: false 35 | column :version, :bigint, null: false 36 | column :correlation_id, :uuid 37 | column :causation_id, :uuid 38 | column :created_at, :'timestamp without time zone', null: false, default: Sequel.lit("(now() at time zone 'utc')") 39 | index [:aggregate_id, :version], unique: true 40 | index :uuid, unique: true 41 | index :type 42 | index :correlation_id 43 | index :causation_id 44 | index :created_at 45 | end 46 | end 47 | 48 | # Create the aggregates table. Needs the database and the table name. 49 | # The defaults will be whats specified in config. 50 | # 51 | # @param db the Postgres database to use 52 | # @param table_name the name of the aggregates table 53 | def create_aggregates(db: EventSourcery::Postgres.config.event_store_database, 54 | table_name: EventSourcery::Postgres.config.aggregates_table_name) 55 | db.create_table(table_name) do 56 | uuid :aggregate_id, primary_key: true 57 | column :version, :bigint, default: 1 58 | end 59 | end 60 | 61 | # Create the 'create or update' functions. 62 | # Needs the database, table name, function name and aggregates table name. 63 | # The defaults will be whats specified in config. 64 | # 65 | # @param db the Postgres database to use 66 | # @param function_name the name of the write events function 67 | # @param events_table_name the name of the events table 68 | # @param aggregates_table_name the name of the aggregates table 69 | def create_or_update_functions(db: EventSourcery::Postgres.config.event_store_database, 70 | function_name: EventSourcery::Postgres.config.write_events_function_name, 71 | events_table_name: EventSourcery::Postgres.config.events_table_name, 72 | aggregates_table_name: EventSourcery::Postgres.config.aggregates_table_name) 73 | db.run <<-SQL 74 | create or replace function #{function_name}(_aggregateId uuid, 75 | _eventTypes varchar[], 76 | _expectedVersion int, 77 | _bodies json[], 78 | _createdAtTimes timestamp without time zone[], 79 | _eventUUIDs uuid[], 80 | _correlationIds uuid[], 81 | _causationIds uuid[], 82 | _lockTable boolean) returns void as $$ 83 | declare 84 | currentVersion int; 85 | body json; 86 | eventVersion int; 87 | eventId text; 88 | index int; 89 | newVersion int; 90 | numEvents int; 91 | createdAt timestamp without time zone; 92 | begin 93 | numEvents := array_length(_bodies, 1); 94 | select version into currentVersion from #{aggregates_table_name} where aggregate_id = _aggregateId; 95 | if not found then 96 | -- when we have no existing version for this aggregate 97 | if _expectedVersion = 0 or _expectedVersion is null then 98 | -- set the version to 1 if expected version is null or 0 99 | insert into #{aggregates_table_name}(aggregate_id, version) values(_aggregateId, numEvents); 100 | currentVersion := 0; 101 | else 102 | raise 'Concurrency conflict. Current version: 0, expected version: %', _expectedVersion; 103 | end if; 104 | else 105 | if _expectedVersion is null then 106 | -- automatically increment the version 107 | update #{aggregates_table_name} set version = version + numEvents where aggregate_id = _aggregateId returning version into newVersion; 108 | currentVersion := newVersion - numEvents; 109 | else 110 | -- increment the version if it's at our expected version 111 | update #{aggregates_table_name} set version = version + numEvents where aggregate_id = _aggregateId and version = _expectedVersion; 112 | if not found then 113 | -- version was not at expected_version, raise an error. 114 | -- currentVersion may not equal what it did in the database when the 115 | -- above update statement is executed (it may have been incremented by another 116 | -- process) 117 | raise 'Concurrency conflict. Last known current version: %, expected version: %', currentVersion, _expectedVersion; 118 | end if; 119 | end if; 120 | end if; 121 | index := 1; 122 | eventVersion := currentVersion + 1; 123 | if _lockTable then 124 | -- Ensure this transaction is the only one writing events to guarantee 125 | -- linear growth of sequence IDs. 126 | -- Any value that won't conflict with other advisory locks will work. 127 | -- The Postgres tracker currently obtains an advisory lock using it's 128 | -- integer row ID, so values 1 to the number of ESP's in the system would 129 | -- be taken if the tracker is running in the same database as your 130 | -- projections. 131 | perform pg_advisory_xact_lock(-1); 132 | end if; 133 | foreach body IN ARRAY(_bodies) 134 | loop 135 | if _createdAtTimes[index] is not null then 136 | createdAt := _createdAtTimes[index]; 137 | else 138 | createdAt := now() at time zone 'utc'; 139 | end if; 140 | 141 | insert into #{events_table_name} 142 | (uuid, aggregate_id, type, body, version, correlation_id, causation_id, created_at) 143 | values 144 | ( 145 | _eventUUIDs[index], 146 | _aggregateId, 147 | _eventTypes[index], 148 | body, 149 | eventVersion, 150 | _correlationIds[index], 151 | _causationIds[index], 152 | createdAt 153 | ) 154 | returning id into eventId; 155 | 156 | eventVersion := eventVersion + 1; 157 | index := index + 1; 158 | end loop; 159 | perform pg_notify('new_event', eventId); 160 | end; 161 | $$ language plpgsql; 162 | SQL 163 | end 164 | 165 | # Create the projector tracker table. Needs the database and the table name. 166 | # The defaults will be whats specified in config. 167 | # 168 | # @param db the Postgres database to use 169 | # @param table_name the name of the aggregates table 170 | def create_projector_tracker(db: EventSourcery::Postgres.config.projections_database, 171 | table_name: EventSourcery::Postgres.config.tracker_table_name) 172 | db.create_table(table_name) do 173 | primary_key :id, type: :Bignum 174 | column :name, 'varchar(255) not null' 175 | column :last_processed_event_id, 'bigint not null default 0' 176 | index :name, unique: true 177 | end 178 | end 179 | end 180 | end 181 | end 182 | -------------------------------------------------------------------------------- /spec/event_sourcery/postgres/reactor_spec.rb: -------------------------------------------------------------------------------- 1 | RSpec.describe EventSourcery::Postgres::Reactor do 2 | TermsConfirmationEmailSent = Class.new(EventSourcery::Event) 3 | ItemViewed = Class.new(EventSourcery::Event) 4 | EchoEvent = Class.new(EventSourcery::Event) 5 | 6 | let(:reactor_class) do 7 | Class.new do 8 | include EventSourcery::Postgres::Reactor 9 | 10 | process TermsAccepted do |event| 11 | @processed_event = event 12 | end 13 | 14 | attr_reader :processed_event 15 | end 16 | end 17 | let(:reactor_class_with_emit) do 18 | Class.new do 19 | include EventSourcery::Postgres::Reactor 20 | 21 | emits_events TermsConfirmationEmailSent 22 | 23 | process TermsAccepted do |event| 24 | end 25 | end 26 | end 27 | 28 | let(:tracker) { EventSourcery::Memory::Tracker.new } 29 | let(:reactor_name) { 'my_reactor' } 30 | let(:event_store) { EventSourcery::Memory::EventStore.new(events) } 31 | let(:event_source) { EventSourcery::EventStore::EventSource.new(event_store) } 32 | 33 | let(:event_sink) { EventSourcery::EventStore::EventSink.new(event_store) } 34 | let(:aggregate_id) { SecureRandom.uuid } 35 | let(:events) { [] } 36 | subject(:reactor) { reactor_class.new(tracker: tracker, event_source: event_source, event_sink: event_sink) } 37 | 38 | describe '.new' do 39 | let(:event_source) { double } 40 | let(:event_sink) { double } 41 | let(:projections_database) { double } 42 | let(:event_tracker) { double } 43 | 44 | before do 45 | allow(EventSourcery::Postgres::Tracker).to receive(:new).with(projections_database).and_return(event_tracker) 46 | allow(projections_database).to receive(:extension).with(:pg_json) 47 | 48 | EventSourcery::Postgres.configure do |config| 49 | config.event_source = event_source 50 | config.event_sink = event_sink 51 | config.projections_database = projections_database 52 | end 53 | end 54 | 55 | subject(:reactor) { reactor_class.new } 56 | 57 | it 'uses the configured projections database by default' do 58 | expect(reactor.instance_variable_get('@db_connection')).to eq projections_database 59 | end 60 | 61 | it 'uses the inferred event tracker database by default' do 62 | expect(reactor.instance_variable_get('@tracker')).to eq event_tracker 63 | end 64 | 65 | it 'uses the configured event source by default' do 66 | expect(reactor.instance_variable_get('@event_source')).to eq event_source 67 | end 68 | 69 | it 'uses the configured event sink by default' do 70 | expect(reactor.instance_variable_get('@event_sink')).to eq event_sink 71 | end 72 | end 73 | 74 | context "a processor that doesn't emit events" do 75 | it "doesn't require an event sink" do 76 | expect { 77 | reactor_class.new(tracker: tracker, event_source: event_source) 78 | }.to_not raise_error 79 | end 80 | 81 | it "doesn't require an event source" do 82 | expect { 83 | reactor_class.new(tracker: tracker, event_sink: event_sink) 84 | }.to_not raise_error 85 | expect { reactor.setup }.to_not raise_error 86 | end 87 | end 88 | 89 | context 'a processor that does emit events' do 90 | it 'requires an event sink' do 91 | expect { 92 | reactor_class_with_emit.new(tracker, event_source, nil) 93 | }.to raise_error(ArgumentError) 94 | end 95 | 96 | it 'requires an event source' do 97 | expect { 98 | reactor_class_with_emit.new(tracker, nil, event_sink) 99 | }.to raise_error(ArgumentError) 100 | end 101 | end 102 | 103 | describe '#setup' do 104 | it 'sets up the tracker to ensure we have a track entry' do 105 | expect(tracker).to receive(:setup).with(reactor_class.processor_name) 106 | reactor.setup 107 | end 108 | end 109 | 110 | describe '#reset' do 111 | it 'resets last processed event ID' do 112 | reactor.process(TermsAccepted.new(id: 1)) 113 | reactor.reset 114 | expect(tracker.last_processed_event_id(:test_processor)).to eq 0 115 | end 116 | end 117 | 118 | describe '.processes?' do 119 | it 'returns true if the event has been defined' do 120 | expect(reactor_class.processes?('terms_accepted')).to eq true 121 | expect(reactor_class.processes?(:terms_accepted)).to eq true 122 | end 123 | 124 | it "returns false if the event hasn't been defined" do 125 | expect(reactor_class.processes?('item_viewed')).to eq false 126 | expect(reactor_class.processes?(:item_viewed)).to eq false 127 | end 128 | end 129 | 130 | describe '.emits_event?' do 131 | it 'returns true if the event has been defined' do 132 | expect(reactor_class_with_emit.emits_event?(TermsConfirmationEmailSent)).to eq true 133 | end 134 | 135 | it "returns false if the event hasn't been defined" do 136 | expect(reactor_class_with_emit.emits_event?(ItemViewed)).to eq false 137 | end 138 | 139 | it "returns false if the reactor doesn't emit events" do 140 | expect(reactor_class.emits_event?(TermsConfirmationEmailSent)).to eq false 141 | end 142 | end 143 | 144 | describe '#process' do 145 | let(:event) { TermsAccepted.new(id: 1) } 146 | 147 | it "projects events it's interested in" do 148 | reactor.process(event) 149 | expect(reactor.processed_event).to eq(event) 150 | end 151 | 152 | context 'with a reactor that emits events' do 153 | let(:event_1) do 154 | TermsAccepted.new( 155 | id: 1, 156 | aggregate_id: aggregate_id, 157 | body: { time: Time.now }, 158 | correlation_id: SecureRandom.uuid, 159 | ) 160 | end 161 | let(:event_2) do 162 | EchoEvent.new( 163 | id: 2, 164 | aggregate_id: aggregate_id, 165 | body: event_1.body, 166 | correlation_id: event_1.correlation_id, 167 | causation_id: event_1.uuid, 168 | ) 169 | end 170 | let(:event_3) { TermsAccepted.new(id: 3, aggregate_id: aggregate_id, body: { time: Time.now }) } 171 | let(:event_4) { TermsAccepted.new(id: 4, aggregate_id: aggregate_id, body: { time: Time.now }) } 172 | let(:event_5) { TermsAccepted.new(id: 5, aggregate_id: aggregate_id, body: { time: Time.now }) } 173 | let(:event_6) { EchoEvent.new(id: 6, aggregate_id: aggregate_id, body: event_3.body, causation_id: event_3.uuid) } 174 | let(:events) { [event_1, event_2, event_3, event_4] } 175 | let(:action_stub_class) do 176 | Class.new do 177 | def self.action(id) 178 | actioned << id 179 | end 180 | 181 | def self.actioned 182 | @actions ||= [] 183 | end 184 | end 185 | end 186 | let(:reactor_class) do 187 | Class.new do 188 | include EventSourcery::Postgres::Reactor 189 | 190 | emits_events EchoEvent 191 | 192 | process TermsAccepted do |event| 193 | @event = event 194 | emit_event(EchoEvent.new(aggregate_id: event.aggregate_id, body: event.body)) do 195 | TestActioner.action(event.id) 196 | end 197 | end 198 | 199 | attr_reader :event 200 | end 201 | end 202 | 203 | before do 204 | reactor.setup 205 | stub_const('TestActioner', action_stub_class) 206 | end 207 | 208 | def event_count 209 | event_source.get_next_from(0, limit: 100).count 210 | end 211 | 212 | def latest_events(n = 1) 213 | event_source.get_next_from(0, limit: 100)[-n..-1] 214 | end 215 | 216 | context "when the event emitted doesn't take actions" do 217 | let(:reactor_class) do 218 | Class.new do 219 | include EventSourcery::Postgres::Reactor 220 | 221 | emits_events EchoEvent 222 | 223 | process TermsAccepted do |event| 224 | emit_event(EchoEvent.new(aggregate_id: event.aggregate_id, body: event.body)) 225 | end 226 | end 227 | end 228 | 229 | it 'processes the events as usual' do 230 | [event_1, event_2, event_3, event_4, event_5].each do |event| 231 | reactor.process(event) 232 | end 233 | expect(event_count).to eq 8 234 | end 235 | 236 | it 'stores the event causation id' do 237 | reactor.process(event_1) 238 | expect(latest_events(1).first.causation_id).to eq event_1.uuid 239 | end 240 | 241 | it 'stores the event correlation id' do 242 | reactor.process(event_1) 243 | expect(latest_events(1).first.correlation_id).to eq event_1.correlation_id 244 | end 245 | end 246 | 247 | context "when the event emitted hasn't been defined in emit_events" do 248 | let(:reactor_class) do 249 | Class.new do 250 | include EventSourcery::Postgres::Reactor 251 | 252 | emits_events EchoEvent 253 | 254 | process TermsAccepted do |event| 255 | emit_event(ItemViewed.new(aggregate_id: event.aggregate_id, body: event.body)) 256 | end 257 | end 258 | end 259 | 260 | it 'raises an error' do 261 | expect { 262 | reactor.process(event_1) 263 | }.to raise_error(EventSourcery::EventProcessingError) 264 | end 265 | end 266 | 267 | context 'when body is yielded to the emit block' do 268 | let(:events) { [] } 269 | let(:reactor_class) do 270 | Class.new do 271 | include EventSourcery::Postgres::Reactor 272 | 273 | emits_events EchoEvent 274 | 275 | process TermsAccepted do |event| 276 | emit_event(EchoEvent.new(aggregate_id: event.aggregate_id)) do |body| 277 | body[:token] = 'secret-identifier' 278 | end 279 | end 280 | end 281 | end 282 | 283 | it 'can manupulate the event body as part of the action' do 284 | reactor.process(event_1) 285 | expect(latest_events(1).first.body['token']).to eq 'secret-identifier' 286 | end 287 | 288 | it 'stores the event causation id' do 289 | reactor.process(event_1) 290 | expect(latest_events(1).first.causation_id).to eq event_1.uuid 291 | end 292 | 293 | it 'stores the event correlation id' do 294 | reactor.process(event_1) 295 | expect(latest_events(1).first.correlation_id).to eq event_1.correlation_id 296 | end 297 | end 298 | end 299 | end 300 | end 301 | --------------------------------------------------------------------------------