├── .gitignore
├── .scalafmt.conf
├── .travis.yml
├── LICENSE
├── README.adoc
├── build.sbt
├── docker
├── docker-compose.yml
└── postgres-init.sql
├── modules
├── akka-persistence-pg
│ ├── build.sbt
│ └── src
│ │ ├── it
│ │ ├── resources
│ │ │ ├── logback.xml
│ │ │ └── pg-perf-spec.conf
│ │ └── scala
│ │ │ └── testkit
│ │ │ ├── PgAsyncJournalPerfSpec.scala
│ │ │ └── PgAsyncJournalPerfSuite.scala
│ │ ├── main
│ │ ├── resources
│ │ │ ├── application.conf
│ │ │ └── reference.conf
│ │ └── scala
│ │ │ └── akka
│ │ │ └── persistence
│ │ │ └── pg
│ │ │ ├── AkkaPgJdbcTypes.scala
│ │ │ ├── JsonString.scala
│ │ │ ├── PgConfig.scala
│ │ │ ├── PgExtension.scala
│ │ │ ├── PgPostgresProfile.scala
│ │ │ ├── PluginConfig.scala
│ │ │ ├── event
│ │ │ ├── Created.scala
│ │ │ ├── EventStore.scala
│ │ │ ├── EventTagger.scala
│ │ │ ├── EventWrapper.scala
│ │ │ ├── ExtraDBIOSupport.scala
│ │ │ ├── JsonEncoder.scala
│ │ │ └── Tagged.scala
│ │ │ ├── journal
│ │ │ ├── JournalStore.scala
│ │ │ ├── JournalTable.scala
│ │ │ ├── Notifier.scala
│ │ │ ├── PgAsyncWriteJournal.scala
│ │ │ ├── ReadJournalStore.scala
│ │ │ ├── RowIdUpdater.scala
│ │ │ ├── StoreActor.scala
│ │ │ ├── TestPgAsyncWriteJournal.scala
│ │ │ └── WriteStrategy.scala
│ │ │ ├── package.scala
│ │ │ ├── query
│ │ │ ├── PostgresReadJournalProvider.scala
│ │ │ ├── javadsl
│ │ │ │ ├── AllEvents.scala
│ │ │ │ ├── CurrentAllEvents.scala
│ │ │ │ ├── CurrentEventsByTags.scala
│ │ │ │ ├── EventsByTags.scala
│ │ │ │ └── PostgresReadJournal.scala
│ │ │ └── scaladsl
│ │ │ │ ├── AllEvents.scala
│ │ │ │ ├── CurrentAllEvents.scala
│ │ │ │ ├── CurrentEventsByTags.scala
│ │ │ │ ├── EventsByTags.scala
│ │ │ │ └── PostgresReadJournal.scala
│ │ │ ├── snapshot
│ │ │ ├── PgAsyncSnapshotStore.scala
│ │ │ ├── PgSnapshotStore.scala
│ │ │ ├── SnapshotTable.scala
│ │ │ └── TestPgAsyncSnapshotStore.scala
│ │ │ ├── streams
│ │ │ ├── AllEventsPublisherStageLogic.scala
│ │ │ ├── EventsByPersistenceIdPublisherStageLogic.scala
│ │ │ ├── EventsByTagsPublisherStageLogic.scala
│ │ │ ├── EventsPublisherGraphStage.scala
│ │ │ └── EventsPublisherStageLogic.scala
│ │ │ └── util
│ │ │ ├── DummyPersistentActor.scala
│ │ │ ├── PgPluginTestUtil.scala
│ │ │ └── PluginTestConfig.scala
│ │ └── test
│ │ ├── resources
│ │ ├── example-actor-serialization-test.conf
│ │ ├── example-actor-test.conf
│ │ ├── logback.xml
│ │ ├── pg-application.conf
│ │ ├── pg-eventstore-locking.conf
│ │ ├── pg-eventstore-rowid.conf
│ │ ├── pg-eventstore-snapshotencoder.conf
│ │ ├── pg-eventstore.conf
│ │ ├── pg-persist-jndi.conf
│ │ ├── pg-persistall.conf
│ │ ├── pg-readmodelupdate.conf
│ │ ├── pg-writestrategy-base.conf
│ │ ├── pg-writestrategy-locking.conf
│ │ ├── pg-writestrategy-rowid.conf
│ │ ├── pg-writestrategy-st.conf
│ │ ├── pg-writestrategy-tx.conf
│ │ ├── postgres.conf
│ │ └── simplelogger.properties
│ │ └── scala
│ │ └── akka
│ │ └── persistence
│ │ └── pg
│ │ ├── AbstractEventStoreTest.scala
│ │ ├── CustomSerializationTest.scala
│ │ ├── EventStoreTest.scala
│ │ ├── EventStoreWithSnapshotEncoderTest.scala
│ │ ├── ExamplePersistentActorTest.scala
│ │ ├── ExtraDBIOSupportTest.scala
│ │ ├── PersistAllTest.scala
│ │ ├── PersistUsingJndiTest.scala
│ │ ├── ReadModelUpdateActorTest.scala
│ │ ├── TestActor.scala
│ │ ├── TestEventEncoder.scala
│ │ ├── TestEventStore.scala
│ │ ├── TestEventTagger.scala
│ │ ├── TestSnapshotEncoder.scala
│ │ ├── WaitForEvents.scala
│ │ ├── perf
│ │ ├── Messages.scala
│ │ ├── PerfActor.scala
│ │ ├── PerfEventEncoder.scala
│ │ ├── PersistAllActor.scala
│ │ ├── PersistAsyncActor.scala
│ │ ├── RandomDelayPerfActor.scala
│ │ └── ReadModelUpdateActor.scala
│ │ ├── query
│ │ ├── EventStoreQueryNotificationTest.scala
│ │ ├── EventStoreQueryOnIdNotificationTest.scala
│ │ ├── EventStoreQueryOnIdTest.scala
│ │ └── EventStoreQueryTest.scala
│ │ ├── testkit
│ │ ├── PgAsyncJournalSpec.scala
│ │ └── PgSnapshotStoreSpec.scala
│ │ ├── util
│ │ ├── CreateTables.scala
│ │ ├── PersistentActorTest.scala
│ │ └── RecreateSchema.scala
│ │ └── writestrategy
│ │ ├── MissingWriteStrategySuite.scala
│ │ ├── NonMissingWriteStrategySuite.scala
│ │ ├── RowIdUpdatingWriteStrategySuite.scala
│ │ ├── SingleThreadedWriteStrategySuite.scala
│ │ ├── TableLockingWriteStrategySuite.scala
│ │ ├── TransactionalWriteStrategySuite.scala
│ │ └── WriteStrategySuite.scala
└── benchmark
│ └── src
│ └── it
│ ├── resources
│ ├── logback.xml
│ ├── pg-perf-base.conf
│ ├── pg-perf-locking.conf
│ ├── pg-perf-rowid.conf
│ ├── pg-perf-st.conf
│ └── pg-perf-tx.conf
│ └── scala
│ └── gatlin
│ ├── AbstractPersistenceSimulation.scala
│ ├── AskAction.scala
│ ├── AskActionBuilder.scala
│ ├── AskMessage.scala
│ ├── MultiActorPerfSimulation.scala
│ ├── Predef.scala
│ ├── SingleActorPerfSimulation.scala
│ └── simulations
│ └── Simulations.scala
├── project
├── BuildSettings.scala
├── Dependencies.scala
├── build.properties
└── plugins.sbt
├── release.md
├── sonatype.sbt
└── version.sbt
/.gitignore:
--------------------------------------------------------------------------------
1 | # Created by .ignore support plugin (hsz.mobi)
2 | ### Scala template
3 | *.class
4 | *.log
5 |
6 | # sbt specific
7 | .cache
8 | .history
9 | .lib/
10 | dist/*
11 | target/
12 | lib_managed/
13 | src_managed/
14 | project/boot/
15 | project/plugins/project/
16 |
17 | # Scala-IDE specific
18 | .scala_dependencies
19 | .worksheet
20 | .idea
21 |
22 |
23 | # scala metals specific
24 | .metals
25 | .bloop
26 |
--------------------------------------------------------------------------------
/.scalafmt.conf:
--------------------------------------------------------------------------------
1 | version = 2.0.0-RC8
2 |
3 | style = defaultWithAlign
4 |
5 | maxColumn = 120
6 | rewrite.rules = [RedundantBraces, RedundantParens, SortImports]
7 | optIn.breakChainOnFirstMethodDot = true
--------------------------------------------------------------------------------
/.travis.yml:
--------------------------------------------------------------------------------
1 | language: scala
2 |
3 | scala:
4 | - 2.12.16
5 | - 2.13.8
6 | script:
7 | - sbt "++ $TRAVIS_SCALA_VERSION test"
8 |
9 | sudo: false
10 |
11 | jdk: openjdk8
12 |
13 | services:
14 | - postgresql
15 |
16 | addons:
17 | postgresql: '9.4'
18 |
19 | before_script:
20 | - sbt scalafmtCheckAll || { echo "[error] Unformatted code found. Please run 'scalafmtAll' and commit the reformatted code."; false; }
21 | - sbt scalafmtSbtCheck || { echo "[error] Unformatted sbt code found. Please run 'scalafmtSbt' and commit the reformatted code."; false; }
22 | - psql -c 'create database akkapg;' -U postgres
23 | - psql akkapg -c "create extension hstore;" -U postgres
24 | - psql -c "create user akkapg with password 'akkapg';" -U postgres
25 | - psql -c "grant all privileges on database akkapg to akkapg;" -U postgres
26 |
27 | cache:
28 | directories:
29 | - "$HOME/.ivy2"
30 | - "$HOME/.sbt"
31 |
32 | notifications:
33 | email:
34 | recipients:
35 | - peter.mortier@gmail.com
36 | - karel@geovise.com
37 |
38 | #after_success:
39 | #- '[[ $TRAVIS_BRANCH == "develop" ]] && { sbt "++ $TRAVIS_SCALA_VERSION publish"; };'
40 |
41 | env:
42 | global:
43 | - SONATYPE_USERNAME=kmaesen
44 | - secure: ZU+VxelX+z1nvPOJ+62hl4e6W8k3i2zjq/A1Sz1BSfgbjcTF94az+86YbiyCrpKYc6seUWGswneUOBSX6ib1PONFeLMlKwIUyRPXJIYuJxgYeF4uuJnGtSrzrpVxuFAh0e7Ju/p1A/EleKlfdtjdH4Lr9jFoHGKItcxMxVOQWk6NMvopevpD3+2asPY5dYv5rR5sCHT9fpCh0pvYVN9iQRl+0kzYBYUcC8En307USfx8rl7qHFaAiHe69g4JMBoBR2GmSbFfcSaq7/NCSz3qu/wpk7pNA9ieyChQaotrZxdlQLmO75Jo6mqw/8sCZIqw4OrWS7oNmMzZ1MPNsWEJ820vr56+S/ij0Vz8uns3xakxKky3bJ6Oeg8dFQe0Bj76MCN6y/V+aGQcqMcQmIovbskUyPC0KK62K9gMvwOm7zrrvT6xDOpsTSAVW4yZPGDGY6Jc5DJWT1833qgYJMYcAd+pZSnaE4gvINsjqSHieCIGjxtR+3xhqZ+xqyoDFQl3GejKpmVXbiGR+W2WE/GJRHNWQaC6nhg/Cy+zwObDl1B3TwBBwC0xi8FYfi8pUeNgqH5N35e3XWB9JiBRHWbfwnlcJOmquaduKhA6DMntB15EB/9mM2oLSRazvnkCtof45O9Zm2OTkpqbJ3I/jbaoOMKMWXmhLHsBBmvkxicPsao=
45 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | The MIT License (MIT)
2 |
3 | Copyright (c) 2016 WegenenVerkeer
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
--------------------------------------------------------------------------------
/build.sbt:
--------------------------------------------------------------------------------
1 | import BuildSettings._
2 | import Dependencies._
3 |
4 | organization := "be.wegenenverkeer"
5 |
6 | Global / concurrentRestrictions += Tags.limit(Tags.Test, 1)
7 |
8 | lazy val scala212 = "2.12.16"
9 | lazy val scala213 = "2.13.8"
10 | ThisBuild / crossScalaVersions := Seq(scala212, scala213)
11 |
12 | ThisBuild / scalacOptions := {
13 | val commonOptions = Seq(
14 | "-target:jvm-1.8",
15 | "-encoding",
16 | "UTF-8",
17 | "-deprecation", // warning and location for usages of deprecated APIs
18 | "-feature", // warning and location for usages of features that should be imported explicitly
19 | "-unchecked", // additional warnings where generated code depends on assumptions
20 | "-Xlint:-infer-any", // recommended additional warnings
21 | "-Ywarn-value-discard", // Warn when non-Unit expression results are unused
22 | // "-Ywarn-dead-code",
23 | // "-Xfatal-warnings",
24 | "-language:reflectiveCalls",
25 | "-Ydelambdafy:method",
26 | //"-Wconf:cat=lint-multiarg-infix:s" // mute warning on Slick <> operator: https://contributors.scala-lang.org/t/multiarg-infix-application-considered-warty/4490
27 | )
28 |
29 | val scalaVersionSpecificOptions = scalaVersion.value match {
30 | case v: String if v startsWith "2.13" => Seq()
31 | case v: String if v startsWith "2.12" =>
32 | Seq(
33 | "-Ywarn-adapted-args", // Warn if an argument list is modified to match the receiver,
34 | "-Ywarn-inaccessible"
35 | )
36 | }
37 |
38 | commonOptions ++ scalaVersionSpecificOptions
39 | }
40 |
41 | lazy val akkaPersistencePgModule = {
42 |
43 | val mainDeps = Seq(
44 | slick,
45 | slickHikariCp,
46 | postgres,
47 | akkaPersistence,
48 | akkaPersistenceQuery,
49 | akkaActor,
50 | akkaStreams,
51 | akkaTest,
52 | akkaPersistenceTestkit,
53 | slf4jSimple
54 | )
55 |
56 | val It = config("it") extend Test
57 |
58 | Project(
59 | id = "akka-persistence-pg",
60 | base = file("modules/akka-persistence-pg")
61 | ).configs(It)
62 | .settings(Defaults.coreDefaultSettings ++ commonSettings ++ publishSettings)
63 | .settings(Defaults.itSettings: _*)
64 | .settings(crossScalaVersions := (ThisBuild / crossScalaVersions).value)
65 | .settings(libraryDependencies ++= mainDeps ++ mainTestDependencies)
66 | .settings(libraryDependencies ++= {
67 | CrossVersion.partialVersion(scalaVersion.value) match {
68 | case Some((2,n)) if n <= 12 => Seq("org.scala-lang.modules" %% "scala-java8-compat" % "0.9.0")
69 | case _ => Seq("org.scala-lang.modules" %% "scala-java8-compat" % "1.0.0")
70 | }
71 | })
72 |
73 | }
74 |
75 | lazy val benchmarkModule = {
76 |
77 | //val mainDeps = Seq(scalaJava8Compat, gatling % "it", gatlinHighcharts % "it")
78 |
79 | import _root_.io.gatling.sbt.GatlingPlugin
80 |
81 | Project(
82 | id = "benchmark",
83 | base = file("modules/benchmark")
84 | ).dependsOn(akkaPersistencePgModule % "it->test;test->test;compile->compile")
85 | .enablePlugins(GatlingPlugin)
86 | .configs(GatlingIt)
87 | .settings(Defaults.coreDefaultSettings ++ commonSettings ++ Seq(publish / skip := true))
88 | .settings(crossScalaVersions := (ThisBuild / crossScalaVersions).value.filter(_ startsWith "2.13"))
89 | .settings(scalaVersion := crossScalaVersions.value.last)
90 |
91 | }
92 |
93 | val main = Project(
94 | id = "akka-persistence-postgresql",
95 | base = file(".")
96 | ).settings(
97 | Defaults.coreDefaultSettings ++ commonSettings ++
98 | Seq(publishLocal := {}, publish := {}, packagedArtifacts := Map.empty, crossScalaVersions := Seq.empty)
99 | )
100 | .aggregate(akkaPersistencePgModule, benchmarkModule)
101 |
--------------------------------------------------------------------------------
/docker/docker-compose.yml:
--------------------------------------------------------------------------------
1 | version: '2.1'
2 | services:
3 | db:
4 | image: postgres:10
5 | ports:
6 | - "5432:5432"
7 | environment:
8 | - POSTGRES_USER=akkapg
9 | - POSTGRES_PASSWORD=akkapg
10 | volumes:
11 | - ./postgres-init.sql:/docker-entrypoint-initdb.d/init.sql
12 |
--------------------------------------------------------------------------------
/docker/postgres-init.sql:
--------------------------------------------------------------------------------
1 | CREATE EXTENSION IF NOT EXISTS HSTORE;
--------------------------------------------------------------------------------
/modules/akka-persistence-pg/build.sbt:
--------------------------------------------------------------------------------
1 | parallelExecution in Test := false
2 |
--------------------------------------------------------------------------------
/modules/akka-persistence-pg/src/it/resources/logback.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 | %d{HH:mm:ss.SSS} [%-5level] %logger{15} - %msg%n%rEx
7 | false
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
--------------------------------------------------------------------------------
/modules/akka-persistence-pg/src/it/resources/pg-perf-spec.conf:
--------------------------------------------------------------------------------
1 | include "postgres.conf"
2 |
3 | pg-persistence {
4 | db {
5 | user = ${postgres.user}
6 | password = ${postgres.password}
7 | url = "jdbc:postgresql://"${postgres.host}":"${postgres.port}"/"${postgres.name}
8 | connectionPool = "HikariCP"
9 | numThreads = 4
10 | queueSize = 10000
11 | }
12 | schemaName = ${postgres.schema}
13 | pgjson = "jsonb"
14 | writestrategy = "akka.persistence.pg.journal.RowIdUpdatingStrategy"
15 | eventstore {
16 | encoder: "akka.persistence.pg.perf.PerfEventEncoder"
17 | schemaName: ${postgres.schema}
18 | tableName: "journal"
19 | }
20 |
21 | throttler-mailbox = {
22 | mailbox-capacity = 20000
23 | }
24 |
25 | }
26 |
27 | pg-journal {
28 | circuit-breaker {
29 | max-failures = 10
30 | call-timeout = 20s #put circuit breaker higher
31 | reset-timeout = 30s
32 | }
33 | }
34 |
35 |
36 | akka {
37 | loggers = ["akka.event.slf4j.Slf4jLogger"]
38 | loglevel = debug
39 | log-config-on-start = off
40 | stdout-loglevel = warning
41 | test.single-expect-default = 10000
42 |
43 | persistence {
44 | journal.plugin = "pg-journal"
45 | snapshot-store.plugin = "pg-snapshot"
46 |
47 | # we need event publishing for tests
48 | publish-confirmations = on
49 | publish-plugin-commands = on
50 | }
51 |
52 | log-dead-letters = 10
53 | log-dead-letters-during-shutdown = on
54 | }
55 |
56 |
57 |
58 |
--------------------------------------------------------------------------------
/modules/akka-persistence-pg/src/it/scala/testkit/PgAsyncJournalPerfSpec.scala:
--------------------------------------------------------------------------------
1 | package testkit
2 |
3 | import akka.persistence.CapabilityFlag
4 | import akka.persistence.journal.JournalPerfSpec
5 | import akka.persistence.pg.util.{CreateTables, RecreateSchema}
6 | import akka.persistence.pg.{PgConfig, PgExtension}
7 | import com.typesafe.config.ConfigFactory
8 | import org.scalatest.concurrent.ScalaFutures
9 | import org.scalatest.time.{Milliseconds, Second, Span}
10 | import org.slf4j.LoggerFactory
11 |
12 | import scala.concurrent.Future
13 | import scala.concurrent.duration._
14 | import scala.language.postfixOps
15 |
16 | class PgAsyncJournalPerfSpec extends JournalPerfSpec(ConfigFactory.load("pg-perf-spec.conf"))
17 | with RecreateSchema
18 | with ScalaFutures
19 | with CreateTables
20 | with PgConfig {
21 |
22 | val logger = LoggerFactory.getLogger(getClass)
23 |
24 | override implicit val patienceConfig = PatienceConfig(timeout = Span(1, Second), interval = Span(100, Milliseconds))
25 | private val pgExtension: PgExtension = PgExtension(system)
26 | override lazy val pluginConfig = pgExtension.pluginConfig
27 |
28 | override def eventsCount = 5000
29 | override def awaitDurationMillis: Long = 30.seconds toMillis
30 |
31 | override def beforeAll():Unit = {
32 | database.run(recreateSchema
33 | .andThen(createTables)).futureValue
34 | super.beforeAll()
35 | }
36 |
37 | override def afterEach() = {
38 | pgExtension.whenDone(Future.successful(())).futureValue
39 | ()
40 | }
41 |
42 | override def afterAll() = {
43 | pgExtension.terminateWhenReady().futureValue
44 | ()
45 | }
46 |
47 | override protected def supportsRejectingNonSerializableObjects: CapabilityFlag = false
48 | }
49 |
--------------------------------------------------------------------------------
/modules/akka-persistence-pg/src/it/scala/testkit/PgAsyncJournalPerfSuite.scala:
--------------------------------------------------------------------------------
1 | package testkit
2 |
3 | import java.util.UUID
4 |
5 | import akka.actor.{ActorRef, ActorSystem, Props}
6 | import akka.persistence.journal.JournalPerfSpec.{BenchActor, Cmd}
7 | import akka.persistence.pg.util.{CreateTables, RecreateSchema}
8 | import akka.persistence.pg.{PgConfig, PgExtension}
9 | import akka.testkit.TestProbe
10 | import com.typesafe.config.ConfigFactory
11 | import org.scalatest.{BeforeAndAfterAll, BeforeAndAfterEach, FunSuite}
12 | import org.scalatest.concurrent.ScalaFutures
13 | import org.scalatest.time.{Milliseconds, Second, Span}
14 | import org.slf4j.LoggerFactory
15 |
16 | import scala.collection.immutable
17 | import scala.concurrent.Future
18 | import scala.concurrent.duration._
19 | import scala.language.postfixOps
20 |
21 | class PgAsyncJournalPerfSuite extends FunSuite
22 | with RecreateSchema
23 | with ScalaFutures
24 | with CreateTables
25 | with BeforeAndAfterEach
26 | with BeforeAndAfterAll
27 | with PgConfig {
28 |
29 | val logger = LoggerFactory.getLogger(getClass)
30 |
31 | lazy val config = ConfigFactory.load("pg-perf-spec.conf")
32 | implicit val system = ActorSystem("PgAsyncJournalPerfSuite", config)
33 |
34 | override implicit val patienceConfig = PatienceConfig(timeout = Span(1, Second), interval = Span(100, Milliseconds))
35 | private val pgExtension: PgExtension = PgExtension(system)
36 | override lazy val pluginConfig = pgExtension.pluginConfig
37 |
38 | override def beforeAll():Unit = {
39 | database.run(recreateSchema
40 | .andThen(createTables)).futureValue
41 | super.beforeAll()
42 | }
43 |
44 | override def afterEach() = {
45 | logger.info("wait for RowIdUpdater to finish")
46 | pgExtension.whenDone(Future.successful(())).futureValue
47 | logger.info("RowIdUpdater is finished")
48 | }
49 |
50 | override def afterAll() = {
51 | pgExtension.terminateWhenReady().futureValue
52 | ()
53 | }
54 |
55 | private val testProbe = TestProbe()
56 |
57 | def benchActor(pid: String, replyAfter: Int): ActorRef =
58 | system.actorOf(Props(classOf[BenchActor], pid, testProbe.ref, replyAfter))
59 |
60 | def feedAndExpectLast(actor: ActorRef, mode: String, cmnds: immutable.Seq[Int]): Unit = {
61 | cmnds foreach { c ⇒ actor ! Cmd(mode, c) }
62 | testProbe.expectMsg(awaitDuration, cmnds.last)
63 | }
64 |
65 | /** Executes a block of code multiple times (no warm-up) */
66 | def measure(msg: Duration ⇒ String)(block: ⇒ Unit): Unit = {
67 | val measurements = Array.ofDim[Duration](measurementIterations)
68 | var i = 0
69 | while (i < measurementIterations) {
70 | val start = System.nanoTime()
71 |
72 | block
73 |
74 | val stop = System.nanoTime()
75 | val d = (stop - start).nanos
76 | measurements(i) = d
77 | info(msg(d))
78 |
79 | i += 1
80 | }
81 | info(s"Average time: ${(measurements.map(_.toNanos).sum / measurementIterations).nanos.toMillis} ms")
82 | }
83 |
84 | /** Override in order to customize timeouts used for expectMsg, in order to tune the awaits to your journal's perf */
85 | def awaitDurationMillis: Long = 100.seconds.toMillis
86 |
87 | /** Override in order to customize timeouts used for expectMsg, in order to tune the awaits to your journal's perf */
88 | private def awaitDuration: FiniteDuration = awaitDurationMillis.millis
89 |
90 | /** Number of messages sent to the PersistentActor under test for each test iteration */
91 | def eventsCount: Int = 10 * 2000
92 |
93 | /** Number of measurement iterations each test will be run. */
94 | def measurementIterations: Int = 50
95 |
96 | private val commands = Vector(1 to eventsCount: _*)
97 |
98 | test(s"recovering $eventsCount events") {
99 | val pid: String = UUID.randomUUID().toString
100 | val p1 = benchActor(pid, eventsCount)
101 | feedAndExpectLast(p1, "p", commands)
102 |
103 | measure(d ⇒ s"Recovering $eventsCount took ${d.toMillis} ms") {
104 | benchActor(pid, eventsCount)
105 | testProbe.expectMsg(max = awaitDuration, commands.last)
106 | }
107 | }
108 |
109 | }
110 |
--------------------------------------------------------------------------------
/modules/akka-persistence-pg/src/main/resources/application.conf:
--------------------------------------------------------------------------------
1 | akka.stream.materializer {
2 | initial-input-buffer-size = 16
3 | max-input-buffer-size = 1024
4 | }
5 |
--------------------------------------------------------------------------------
/modules/akka-persistence-pg/src/main/resources/reference.conf:
--------------------------------------------------------------------------------
1 | pg-journal {
2 | # class name of the jdbc journal plugin
3 | class = "akka.persistence.pg.journal.PgAsyncWriteJournal"
4 | plugin-dispatcher = "akka.persistence.pg.default-dispatcher"
5 | }
6 |
7 | pg-snapshot {
8 | # class name of the jdbc snapshot store
9 | class = "akka.persistence.pg.snapshot.PgAsyncSnapshotStore"
10 | plugin-dispatcher = "akka.persistence.pg.default-dispatcher"
11 | }
12 |
13 | akka.persistence.pg {
14 |
15 | journal.query {
16 | class = "akka.persistence.pg.query.PostgresReadJournalProvider"
17 | refresh-interval = 3s
18 | max-buffer-size = 100
19 | write-plugin = "pg-journal"
20 | }
21 |
22 | default-dispatcher {
23 | # Dispatcher is the name of the event-based dispatcher
24 | type = Dispatcher
25 | # What kind of ExecutionService to use
26 | executor = "thread-pool-executor"
27 | # Configuration for the thread pool
28 | thread-pool-executor {
29 | core-pool-size-min = 4
30 | core-pool-size-max = 8
31 | }
32 | shutdown-timeout = 10
33 | throughput = 1
34 | }
35 | }
36 |
37 | pg-persistence {
38 | db {
39 | password = "akkapg"
40 | user = "akkapg"
41 | url = "jdbc:postgresql://localhost:5432/akka"
42 | jndiName = "" # if you already have a DB configured somewhere else, you can share it through JNDI,
43 | # when doing so, user, password and url are not needed but maxConnections is required
44 | maxConnections = 4 # maximum number of JDBC connections of the underlying pool
45 | numThreads = 4
46 | queueSize = 1000
47 | connectionTimeout = 1000
48 | validationTimeout = 1000
49 | connectionPool = "HikariCP" # set to "disabled" to disable connection pooling, useful for tests
50 | dataSourceClass = "org.postgresql.ds.PGSimpleDataSource"
51 | #for other optional HikarCP related properties, check the JdbcBackend.forConfig scaladoc
52 | properties = {
53 | prepareThreshold = 1 #enable prepared statement caching on the server side, required because HikariCP does not do prepared statement caching
54 | }
55 | }
56 | pgjson = "json" //allowed values are 'json' and 'jsonb'
57 | schemaName = ""
58 | journalTableName = "journal"
59 | snapshotTableName = "snapshot"
60 | snapshotEncoder: "" #default is no json snapshot encoding, binary serialization
61 | ignoreSnapshotDecodingFailure: false
62 | writestrategy = "akka.persistence.pg.journal.TableLockingWriteStrategy"
63 | eventstore {
64 | encoder: "" #default is no json event encoding, binary serialization
65 | tagger: "default" #default event tagger or 'none' for no tagging or a full classname of a custom EventTagger implementation
66 | useView: false # if false events will be read directly from the journal, if true you need to create a view
67 | #on the journal table that only shows the events
68 | idColumnName: "id"
69 | eventViewSchema: ""
70 | eventViewName: "events"
71 | }
72 |
73 | }
74 |
75 |
76 |
--------------------------------------------------------------------------------
/modules/akka-persistence-pg/src/main/scala/akka/persistence/pg/AkkaPgJdbcTypes.scala:
--------------------------------------------------------------------------------
1 | package akka.persistence.pg
2 |
3 | import java.sql._
4 | import java.time.OffsetDateTime
5 | import java.time.format.{DateTimeFormatter, DateTimeFormatterBuilder}
6 | import java.time.temporal.ChronoField
7 |
8 | import org.postgresql.util.HStoreConverter
9 | import slick.ast.Library.SqlOperator
10 | import slick.ast.{FieldSymbol, TypedType}
11 | import slick.jdbc.{JdbcType, JdbcTypesComponent, PostgresProfile}
12 | import slick.lifted.ExtensionMethods
13 |
14 | import scala.collection.JavaConverters._
15 | import scala.language.implicitConversions
16 | import scala.reflect.ClassTag
17 |
18 | trait AkkaPgJdbcTypes extends JdbcTypesComponent { driver: PostgresProfile =>
19 |
20 | def pgjson: String
21 |
22 | import driver.api._
23 |
24 | private[this] class GenericJdbcType[T](
25 | val sqlTypeName: String,
26 | fnFromString: String => T,
27 | fnToString: T => String = (r: T) => r.toString,
28 | val sqlType: Int = java.sql.Types.OTHER,
29 | zero: T = null.asInstanceOf[T],
30 | override val hasLiteralForm: Boolean = false
31 | )(implicit override val classTag: ClassTag[T])
32 | extends DriverJdbcType[T] {
33 |
34 | override def sqlTypeName(sym: Option[FieldSymbol]): String = sqlTypeName
35 |
36 | override def getValue(r: ResultSet, idx: Int): T = {
37 | val value = r.getString(idx)
38 | if (r.wasNull) zero else fnFromString(value)
39 | }
40 |
41 | override def setValue(v: T, p: PreparedStatement, idx: Int): Unit = p.setObject(idx, toStr(v), java.sql.Types.OTHER)
42 |
43 | override def updateValue(v: T, r: ResultSet, idx: Int): Unit = r.updateObject(idx, toStr(v), java.sql.Types.OTHER)
44 |
45 | override def valueToSQLLiteral(v: T): String = if (v == null) "NULL" else s"'${fnToString(v)}'"
46 |
47 | private def toStr(v: T) = if (v == null) null else fnToString(v)
48 | }
49 |
50 | protected def fromInfinitable[T](max: T, min: T, parse: String => T): String => T = {
51 | case "infinity" => max
52 | case "-infinity" => min
53 | case finite => parse(finite)
54 | }
55 |
56 | protected def toInfinitable[T](max: T, min: T, format: T => String): T => String = {
57 | case `max` => "infinity"
58 | case `min` => "-infinity"
59 | case finite => format(finite)
60 | }
61 |
62 | private val date2TzDateTimeFormatter =
63 | new DateTimeFormatterBuilder()
64 | .append(DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss"))
65 | .optionalStart()
66 | .appendFraction(ChronoField.NANO_OF_SECOND, 0, 6, true)
67 | .optionalEnd()
68 | .appendOffset("+HH:mm", "+00")
69 | .toFormatter()
70 |
71 | protected val fromOffsetDateTimeOrInfinity: String => OffsetDateTime =
72 | fromInfinitable(OffsetDateTime.MAX, OffsetDateTime.MIN, OffsetDateTime.parse(_, date2TzDateTimeFormatter))
73 | protected val toOffsetDateTimeOrInfinity: OffsetDateTime => String =
74 | toInfinitable[OffsetDateTime](OffsetDateTime.MAX, OffsetDateTime.MIN, _.format(date2TzDateTimeFormatter))
75 |
76 | trait AkkaPgImplicits {
77 |
78 | implicit val date2TzTimestampTypeMapper: JdbcType[OffsetDateTime] = new GenericJdbcType[OffsetDateTime](
79 | "timestamptz",
80 | fromOffsetDateTimeOrInfinity,
81 | toOffsetDateTimeOrInfinity,
82 | hasLiteralForm = false
83 | )
84 |
85 | implicit val simpleHStoreTypeMapper: JdbcType[Map[String, String]] =
86 | new GenericJdbcType[Map[String, String]](
87 | "hstore",
88 | v => HStoreConverter.fromString(v).asScala.toMap,
89 | v => HStoreConverter.toString(v.asJava),
90 | hasLiteralForm = false
91 | )
92 |
93 | implicit def simpleHStoreColumnExtensionMethods(
94 | c: Rep[Map[String, String]]
95 | ): HStoreColumnExtensionMethods[Map[String, String]] =
96 | new HStoreColumnExtensionMethods[Map[String, String]](c)
97 |
98 | implicit val jsonStringTypeMapper: JdbcType[JsonString] =
99 | new GenericJdbcType[JsonString](
100 | pgjson,
101 | v => JsonString(v),
102 | v => v.value,
103 | hasLiteralForm = false
104 | )
105 |
106 | }
107 |
108 | /** Extension methods for hstore Columns */
109 | class HStoreColumnExtensionMethods[P1](val c: Rep[P1])(implicit tm: JdbcType[Map[String, String]])
110 | extends ExtensionMethods[Map[String, String], P1] {
111 |
112 | val Contains = new SqlOperator("@>")
113 |
114 | protected implicit def b1Type: TypedType[Map[String, String]] = implicitly[TypedType[Map[String, String]]]
115 |
116 | def @>[P2, R](c2: Rep[P2])(implicit om: o#arg[Map[String, String], P2]#to[Boolean, R]): Rep[R] =
117 | om.column(Contains, n, c2.toNode)
118 | }
119 |
120 | }
121 |
--------------------------------------------------------------------------------
/modules/akka-persistence-pg/src/main/scala/akka/persistence/pg/JsonString.scala:
--------------------------------------------------------------------------------
1 | package akka.persistence.pg
2 |
3 | /**
4 | * a simple wrapper around a json string representation
5 | * @param value the wrapped json string
6 | */
7 | case class JsonString(value: String)
8 |
--------------------------------------------------------------------------------
/modules/akka-persistence-pg/src/main/scala/akka/persistence/pg/PgConfig.scala:
--------------------------------------------------------------------------------
1 | package akka.persistence.pg
2 |
3 | import slick.jdbc.JdbcBackend
4 |
5 | trait PgConfig {
6 |
7 | def pluginConfig: PluginConfig
8 | lazy val driver: PgPostgresProfile = pluginConfig.pgPostgresProfile
9 | lazy val database: JdbcBackend.DatabaseDef = pluginConfig.database
10 |
11 | }
12 |
--------------------------------------------------------------------------------
/modules/akka-persistence-pg/src/main/scala/akka/persistence/pg/PgExtension.scala:
--------------------------------------------------------------------------------
1 | package akka.persistence.pg
2 |
3 | import akka.actor._
4 | import akka.pattern.ask
5 | import akka.persistence.Persistence
6 | import akka.persistence.pg.journal.RowIdUpdater.IsBusy
7 |
8 | import scala.reflect.ClassTag
9 | import akka.util.Timeout
10 | import scala.concurrent.duration._
11 | import scala.concurrent.Future
12 | import scala.language.postfixOps
13 |
14 | object PgExtension extends ExtensionId[PgExtension] with ExtensionIdProvider {
15 |
16 | override def createExtension(system: ExtendedActorSystem): PgExtension = new PgExtension(system)
17 |
18 | override def lookup = PgExtension
19 |
20 | }
21 |
22 | class PgExtension(system: ExtendedActorSystem) extends Extension {
23 |
24 | val persistence = Persistence(system)
25 |
26 | val pluginConfig = PluginConfig(system)
27 |
28 | system.registerOnTermination {
29 | pluginConfig.shutdownDataSource()
30 | }
31 |
32 | def whenDone[T](t: => Future[T]): Future[T] = {
33 |
34 | import system.dispatcher
35 |
36 | implicit val timeout = Timeout(5 seconds)
37 |
38 | def isBusy: Future[Boolean] =
39 | system
40 | .actorSelection("/user/AkkaPgRowIdUpdater")
41 | .resolveOne()
42 | .flatMap { rowIdUpdater =>
43 | rowIdUpdater ? IsBusy
44 | }
45 | .mapTo[Boolean]
46 | .recover {
47 | case e: ActorNotFound => false
48 | }
49 |
50 | def go(): Future[T] = isBusy.flatMap {
51 | case true => Thread.sleep(100); go()
52 | case false => t
53 | }
54 |
55 | go()
56 |
57 | }
58 |
59 | def terminateWhenReady(): Future[Terminated] = whenDone(system.terminate())
60 |
61 | def getClassFor[T: ClassTag](s: String): Class[_ <: T] =
62 | system.dynamicAccess.getClassFor[T](s).getOrElse(sys.error(s"could not find class with name $s"))
63 |
64 | }
65 |
--------------------------------------------------------------------------------
/modules/akka-persistence-pg/src/main/scala/akka/persistence/pg/PgPostgresProfile.scala:
--------------------------------------------------------------------------------
1 | package akka.persistence.pg
2 |
3 | import slick.jdbc.PostgresProfile
4 |
5 | trait PgPostgresProfile extends PostgresProfile with AkkaPgJdbcTypes {
6 |
7 | override val api = new API with AkkaPgImplicits {}
8 |
9 | }
10 |
11 | class PgPostgresProfileImpl(override val pgjson: String) extends PgPostgresProfile
12 |
--------------------------------------------------------------------------------
/modules/akka-persistence-pg/src/main/scala/akka/persistence/pg/PluginConfig.scala:
--------------------------------------------------------------------------------
1 | package akka.persistence.pg
2 |
3 | import java.util.Properties
4 |
5 | import akka.actor.{ActorContext, ActorSystem}
6 | import akka.persistence.pg.event._
7 | import akka.persistence.pg.journal.WriteStrategy
8 | import com.typesafe.config.{Config, ConfigFactory}
9 | import org.postgresql.ds.PGSimpleDataSource
10 | import slick.jdbc.JdbcBackend
11 | import slick.util.AsyncExecutor
12 |
13 | import scala.collection.JavaConverters._
14 |
15 | object PluginConfig {
16 | def apply(system: ActorSystem) = new PluginConfig(system.settings.config)
17 |
18 | def apply(config: Config) = new PluginConfig(config)
19 |
20 | def asOption(s: String): Option[String] = if (s.isEmpty) None else Some(s)
21 |
22 | def newInstance[T](clazz: String): T =
23 | Thread
24 | .currentThread()
25 | .getContextClassLoader
26 | .loadClass(clazz)
27 | .asInstanceOf[Class[_ <: T]]
28 | .getDeclaredConstructor()
29 | .newInstance()
30 |
31 | }
32 |
33 | class PluginConfig(systemConfig: Config) {
34 | private val config = systemConfig.getConfig("pg-persistence")
35 |
36 | val schema: Option[String] = PluginConfig.asOption(config.getString("schemaName"))
37 | val schemaName: String = schema.fold("")(n => s""""$n"""")
38 |
39 | def getFullName(partialName: String): String = schema.fold(partialName)(s => s""""$s".$partialName""")
40 |
41 | val journalTableName: String = config.getString("journalTableName")
42 | val fullJournalTableName: String = getFullName(journalTableName)
43 |
44 | val snapshotTableName: String = config.getString("snapshotTableName")
45 | val fullSnapshotTableName: String = getFullName(snapshotTableName)
46 |
47 | val snapshotEncoder: JsonEncoder = PluginConfig
48 | .asOption(config.getString("snapshotEncoder"))
49 | .fold(NoneJsonEncoder: JsonEncoder)(PluginConfig.newInstance[JsonEncoder])
50 |
51 | def shutdownDataSource(): Unit = database.close()
52 |
53 | val jsonType: String = config.getString("pgjson")
54 |
55 | val pgPostgresProfile = new PgPostgresProfileImpl(jsonType match {
56 | case "jsonb" => "jsonb"
57 | case "json" => "json"
58 | case a: String => sys.error(s"unsupported value for pgjson '$a'. Only 'json' or 'jsonb' supported")
59 | })
60 |
61 | lazy val database: JdbcBackend.DatabaseDef = createDatabase
62 |
63 | lazy val dbConfig: Config = config.getConfig("db")
64 |
65 | lazy val numThreads: Int = dbConfig.getInt("numThreads")
66 | lazy val maxConnections: Int =
67 | if (dbConfig.hasPath("maxConnections")) dbConfig.getInt("maxConnections") else numThreads
68 |
69 | def createDatabase: JdbcBackend.DatabaseDef = {
70 | def asyncExecutor(name: String): AsyncExecutor =
71 | AsyncExecutor(s"$name", numThreads, numThreads, dbConfig.getInt("queueSize"), maxConnections)
72 |
73 | val db = PluginConfig.asOption(dbConfig.getString("jndiName")) match {
74 | case Some(jndiName) =>
75 | JdbcBackend.Database.forName(jndiName, Some(maxConnections), asyncExecutor(jndiName))
76 |
77 | case None =>
78 | dbConfig.getString("connectionPool") match {
79 |
80 | case "disabled" =>
81 | val simpleDataSource = new PGSimpleDataSource()
82 | simpleDataSource.setUrl(dbConfig.getString("url"))
83 | simpleDataSource.setUser(dbConfig.getString("user"))
84 | simpleDataSource.setPassword(dbConfig.getString("password"))
85 | simpleDataSource.setPrepareThreshold(1)
86 | JdbcBackend.Database.forDataSource(simpleDataSource, None, asyncExecutor("akkapg-unpooled"))
87 |
88 | case _ =>
89 | //Slick's Database.forConfig does NOT use the 'url' when also configuring using a JDBC DataSource instead of a JDBC Driver class
90 | val props = new Properties()
91 | org.postgresql.Driver.parseURL(dbConfig.getString("url"), new Properties()).asScala foreach {
92 | case ("PGDBNAME", v) => props.put("databaseName", v)
93 | case ("PGHOST", v) => props.put("serverName", v)
94 | case ("PGPORT", v) => props.put("portNumber", v)
95 | case (k, v) => props.put(k, v)
96 | }
97 | val urlConfig = ConfigFactory.parseProperties(props).atPath("properties")
98 | val sourceConfig = dbConfig.withFallback(urlConfig).withoutPath("url").atPath("akkapg-pooled")
99 | JdbcBackend.Database.forConfig("akkapg-pooled", sourceConfig)
100 | }
101 |
102 | }
103 | db
104 | }
105 |
106 | lazy val eventStoreConfig: EventStoreConfig =
107 | EventStoreConfig(config.getConfig("eventstore"), schema, journalTableName)
108 |
109 | lazy val eventStore: Option[EventStore] = {
110 | PluginConfig.asOption(eventStoreConfig.cfg.getString("class")) map { storeName =>
111 | val storeClazz =
112 | Thread.currentThread.getContextClassLoader.loadClass(storeName).asInstanceOf[Class[_ <: EventStore]]
113 | storeClazz.getConstructor(classOf[PluginConfig]).newInstance(this)
114 | }
115 | }
116 |
117 | def writeStrategy(context: ActorContext): WriteStrategy = {
118 | val clazz = config.getString("writestrategy")
119 | val writeStrategyClazz =
120 | Thread.currentThread().getContextClassLoader.loadClass(clazz).asInstanceOf[Class[_ <: WriteStrategy]]
121 | writeStrategyClazz.getConstructor(classOf[PluginConfig], classOf[ActorSystem]).newInstance(this, context.system)
122 | }
123 |
124 | lazy val idForQuery: String =
125 | if (config.getString("writestrategy") == "akka.persistence.pg.journal.RowIdUpdatingStrategy") "rowid"
126 | else "id"
127 |
128 | lazy val ignoreSnapshotDecodingFailure: Boolean =
129 | config.getBoolean("ignoreSnapshotDecodingFailure")
130 |
131 | }
132 |
133 | case class EventStoreConfig(cfg: Config, schema: Option[String], journalTableName: String) {
134 | val idColumnName: String = cfg.getString("idColumnName")
135 | val useView: Boolean = cfg.getBoolean("useView")
136 |
137 | val schemaName: Option[String] = if (useView) PluginConfig.asOption(cfg.getString("schemaName")) else schema
138 |
139 | val tableName: String = if (useView) cfg.getString("tableName") else journalTableName
140 |
141 | val eventEncoder: JsonEncoder = PluginConfig
142 | .asOption(cfg.getString("encoder"))
143 | .fold(NoneJsonEncoder: JsonEncoder)(PluginConfig.newInstance[JsonEncoder])
144 |
145 | val eventTagger: EventTagger = PluginConfig.asOption(cfg.getString("tagger")) match {
146 | case None => NotTagged
147 | case Some("default") => DefaultTagger
148 | case Some(clazz) => PluginConfig.newInstance[EventTagger](clazz)
149 | }
150 |
151 | }
152 |
--------------------------------------------------------------------------------
/modules/akka-persistence-pg/src/main/scala/akka/persistence/pg/event/Created.scala:
--------------------------------------------------------------------------------
1 | package akka.persistence.pg.event
2 |
3 | import java.time.OffsetDateTime
4 |
5 | trait Created {
6 |
7 | def created: OffsetDateTime
8 |
9 | }
10 |
--------------------------------------------------------------------------------
/modules/akka-persistence-pg/src/main/scala/akka/persistence/pg/event/EventStore.scala:
--------------------------------------------------------------------------------
1 | package akka.persistence.pg.event
2 |
3 | import java.time.OffsetDateTime
4 |
5 | import akka.persistence.pg.{EventStoreConfig, JsonString, PgConfig}
6 | import slick.dbio.Effect.{Read, Transactional}
7 | import slick.jdbc.{ResultSetConcurrency, ResultSetType}
8 |
9 | case class Event(
10 | id: Long,
11 | persistenceId: String,
12 | sequenceNr: Long,
13 | uuid: String,
14 | created: OffsetDateTime,
15 | tags: Map[String, String],
16 | className: String,
17 | event: JsonString
18 | )
19 |
20 | case class StoredEvent(persistenceId: String, event: Any)
21 |
22 | trait EventStore {
23 | self: PgConfig =>
24 |
25 | def eventStoreConfig: EventStoreConfig = pluginConfig.eventStoreConfig
26 |
27 | import driver.api._
28 |
29 | //This is basically just a another mapping on the same journal table, ideally you would create a DB view
30 | class EventsTable(tag: Tag)
31 | extends Table[Event](tag, pluginConfig.eventStoreConfig.schemaName, pluginConfig.eventStoreConfig.tableName) {
32 |
33 | def id = column[Long](pluginConfig.idForQuery)
34 | def persistenceId = column[String]("persistenceid")
35 | def sequenceNr = column[Long]("sequencenr")
36 | def uuid = column[String]("uuid")
37 | def created =
38 | column[OffsetDateTime]("created")(date2TzTimestampTypeMapper) // use explicit type because slick 3.3.x introduced its own which would use varchar for the db column
39 | def tags = column[Map[String, String]]("tags")
40 | def className = column[String]("manifest")
41 | def event = column[JsonString]("event")
42 |
43 | def * = (id, persistenceId, sequenceNr, uuid, created, tags, className, event) <> (Event.tupled, Event.unapply)
44 |
45 | }
46 |
47 | val events: TableQuery[EventsTable] = TableQuery[EventsTable]
48 |
49 | /**
50 | * find all events for a specific persistenceId
51 | * @param persistenceId the persistenceId
52 | */
53 | def findEvents(persistenceId: String): Query[EventsTable, Event, Seq] =
54 | events
55 | .filter(_.persistenceId === persistenceId)
56 | .filter(_.event.?.isDefined)
57 |
58 | /**
59 | * find all events starting from a specific id with specific tags
60 | * @param fromId the id to start from
61 | * @param tags the tags that must be present
62 | * @param max maximum number of events to return
63 | * @return the list of corresponding events
64 | */
65 | def findEvents(
66 | fromId: Long,
67 | tags: Map[String, String] = Map.empty,
68 | max: Long = Long.MaxValue
69 | ): Query[EventsTable, Event, Seq] =
70 | events
71 | .filter(_.id >= fromId)
72 | .filter(_.event.?.isDefined)
73 | .filter(_.tags @> tags.bind)
74 | .sortBy(_.id)
75 | .take(max)
76 |
77 | def allEvents(): DBIOAction[Seq[Event], Streaming[Event], Read with Transactional] =
78 | findEvents(0).result
79 | .withStatementParameters(
80 | rsType = ResultSetType.ForwardOnly,
81 | rsConcurrency = ResultSetConcurrency.ReadOnly,
82 | fetchSize = 1000
83 | )
84 | .transactionally
85 |
86 | def toDomainEvent[T](e: Event): T = {
87 | val clazz = Class.forName(e.className)
88 | eventStoreConfig.eventEncoder.fromJson((e.event, clazz)).asInstanceOf[T]
89 | }
90 |
91 | }
92 |
--------------------------------------------------------------------------------
/modules/akka-persistence-pg/src/main/scala/akka/persistence/pg/event/EventTagger.scala:
--------------------------------------------------------------------------------
1 | package akka.persistence.pg.event
2 |
3 | trait EventTagger {
4 |
5 | /**
6 | * @param event the event/message (argument of persist call)
7 | * @return the tags and to persist.
8 | */
9 | def tags(event: Any): Map[String, String]
10 |
11 | }
12 |
13 | object NotTagged extends EventTagger {
14 |
15 | override def tags(event: Any) = Map.empty
16 |
17 | }
18 |
19 | object DefaultTagger extends EventTagger {
20 |
21 | override def tags(event: Any) = event match {
22 | case t: Tagged => t.tags
23 | case _ => Map.empty
24 | }
25 |
26 | }
27 |
--------------------------------------------------------------------------------
/modules/akka-persistence-pg/src/main/scala/akka/persistence/pg/event/EventWrapper.scala:
--------------------------------------------------------------------------------
1 | package akka.persistence.pg.event
2 |
3 | trait EventWrapper[E] {
4 |
5 | def event: E
6 |
7 | }
8 |
--------------------------------------------------------------------------------
/modules/akka-persistence-pg/src/main/scala/akka/persistence/pg/event/ExtraDBIOSupport.scala:
--------------------------------------------------------------------------------
1 | package akka.persistence.pg.event
2 |
3 | import slick.dbio.DBIO
4 |
5 | trait ExtraDBIOSupport {
6 |
7 | def extraDBIO: DBIO[_]
8 | def failureHandler: PartialFunction[Throwable, Unit]
9 |
10 | }
11 |
--------------------------------------------------------------------------------
/modules/akka-persistence-pg/src/main/scala/akka/persistence/pg/event/JsonEncoder.scala:
--------------------------------------------------------------------------------
1 | package akka.persistence.pg.event
2 |
3 | import akka.persistence.pg.JsonString
4 |
5 | trait JsonEncoder {
6 |
7 | /**
8 | * A partial function that serializes an event to a json representation
9 | * @return the json representation
10 | */
11 | def toJson: PartialFunction[Any, JsonString]
12 |
13 | /**
14 | * A partial function that deserializes an event from some json representation
15 | * @return the event
16 | */
17 | def fromJson: PartialFunction[(JsonString, Class[_]), AnyRef]
18 |
19 | }
20 |
21 | object NoneJsonEncoder extends JsonEncoder {
22 |
23 | override def toJson: PartialFunction[Any, JsonString] = PartialFunction.empty[Any, JsonString]
24 |
25 | override def fromJson: PartialFunction[(JsonString, Class[_]), AnyRef] =
26 | PartialFunction.empty[(JsonString, Class[_]), AnyRef]
27 |
28 | }
29 |
--------------------------------------------------------------------------------
/modules/akka-persistence-pg/src/main/scala/akka/persistence/pg/event/Tagged.scala:
--------------------------------------------------------------------------------
1 | package akka.persistence.pg.event
2 |
3 | trait Tagged {
4 |
5 | def tags: Map[String, String]
6 |
7 | }
8 |
--------------------------------------------------------------------------------
/modules/akka-persistence-pg/src/main/scala/akka/persistence/pg/journal/JournalStore.scala:
--------------------------------------------------------------------------------
1 | package akka.persistence.pg.journal
2 |
3 | import java.time.OffsetDateTime
4 | import java.util.UUID
5 |
6 | import akka.persistence.PersistentRepr
7 | import akka.persistence.pg.event._
8 | import akka.persistence.pg.{EventTag, JsonString, PgConfig, PgExtension}
9 | import akka.serialization.{Serialization, Serializers}
10 |
11 | import scala.util.Try
12 |
13 | /**
14 | * The journal/event store: it stores persistent messages.
15 | * Either payload or event must be NOT NULL
16 | */
17 | trait JournalStore extends JournalTable {
18 | self: PgConfig =>
19 |
20 | def serialization: Serialization
21 | def pgExtension: PgExtension
22 | def eventEncoder: JsonEncoder = pluginConfig.eventStoreConfig.eventEncoder
23 | def eventTagger: EventTagger = pluginConfig.eventStoreConfig.eventTagger
24 |
25 | import driver.api._
26 |
27 | case class ExtraDBIOInfo(action: DBIO[_], failureHandler: PartialFunction[Throwable, Unit])
28 | case class JournalEntryInfo(entry: JournalEntry, payload: Any, extraDBIOInfo: Option[ExtraDBIOInfo])
29 |
30 | private[this] def serializePayload(payload: Any): (Option[JsonString], Option[Array[Byte]]) =
31 | if (eventEncoder.toJson.isDefinedAt(payload)) {
32 | val json = eventEncoder.toJson(payload)
33 | require(
34 | eventEncoder.fromJson.isDefinedAt((json, payload.getClass)),
35 | s"You MUST always be able to decode what you encoded, fromJson method is incomplete for ${payload.getClass}"
36 | )
37 | (Some(json), None)
38 | } else {
39 | val o: AnyRef = payload.asInstanceOf[AnyRef]
40 | (None, Some(serialization.findSerializerFor(o).toBinary(o)))
41 | }
42 |
43 | /**
44 | * Returns the timestamp an event was created.
45 | * By default this will return the created timestamp if your event extens Created, otherwise it returns the current time.
46 | * @param event any event
47 | * @return the timestamp this event was created
48 | */
49 | def getCreated(event: Any): OffsetDateTime = event match {
50 | case e: Created => e.created
51 | case _ => OffsetDateTime.now()
52 | }
53 |
54 | /**
55 | * Returns a unique id for this event. By default this just generates a new UUID.
56 | * @param event any event
57 | * @return the unique id of the event
58 | */
59 | def getUuid(event: Any): String =
60 | UUID.randomUUID.toString
61 |
62 | def toJournalEntries(messages: Seq[PersistentRepr]): Try[Seq[JournalEntryInfo]] =
63 | Try {
64 | messages map { message =>
65 | val event = message.payload match {
66 | case w: EventWrapper[_] => w.event
67 | case _ => message.payload
68 | }
69 | val tags: Map[String, String] = eventTagger.tags(message.payload)
70 | val update: Option[ExtraDBIOInfo] = message.payload match {
71 | case r: ExtraDBIOSupport => Some(ExtraDBIOInfo(r.extraDBIO, r.failureHandler))
72 | case _ => None
73 | }
74 |
75 | val (payloadAsJson, payloadAsBytes) = serializePayload(event)
76 | JournalEntryInfo(
77 | JournalEntry(
78 | None,
79 | None,
80 | message.persistenceId,
81 | message.sequenceNr,
82 | deleted = false,
83 | payloadAsBytes,
84 | if (payloadAsJson.nonEmpty) {
85 | event.getClass.getName
86 | } else {
87 | event match {
88 | case ref: AnyRef =>
89 | val s = serialization.findSerializerFor(ref)
90 | s"${s.identifier}:${Serializers.manifestFor(s, ref)}"
91 | case _ => event.getClass.getName
92 | }
93 | },
94 | getUuid(event),
95 | message.writerUuid,
96 | getCreated(event),
97 | tags,
98 | payloadAsJson
99 | ),
100 | event,
101 | update
102 | )
103 | }
104 | }
105 |
106 | def toPersistentRepr(entry: JournalEntry): PersistentRepr = {
107 | def toRepr(a: Any) =
108 | PersistentRepr(
109 | payload = a,
110 | sequenceNr = entry.sequenceNr,
111 | persistenceId = entry.persistenceId,
112 | manifest = entry.manifest,
113 | deleted = entry.deleted,
114 | sender = null, //sender ActorRef
115 | writerUuid = entry.writerUuid
116 | )
117 |
118 | (entry.payload, entry.json) match {
119 | case (Some(payload), _) =>
120 | toRepr(entry.serializerId match {
121 | case None => serialization.deserialize(payload, pgExtension.getClassFor[AnyRef](entry.manifest)).get
122 | case Some(id) => serialization.deserialize(payload, id, entry.manifest).get
123 | })
124 | case (_, Some(event)) => toRepr(eventEncoder.fromJson((event, pgExtension.getClassFor[Any](entry.manifest))))
125 | case (None, None) => sys.error(s"""both payload and event are null for journal table entry
126 | with id=${entry.id}, (persistenceid='${entry.persistenceId}' and sequencenr='${entry.sequenceNr}')
127 | This should NEVER happen!""")
128 | }
129 | }
130 |
131 | /**
132 | * build a 'or' filter for tags
133 | * will select Events containing at least one of the EventTags
134 | */
135 | protected def tagsFilter(tags: Set[EventTag]): JournalTable => Rep[Boolean] = { table: JournalTable =>
136 | {
137 | tags
138 | .map { case (tagKey, tagValue) => table.tags @> Map(tagKey -> tagValue.value).bind }
139 | .reduceLeftOption(_ || _)
140 | .getOrElse(false: Rep[Boolean])
141 | }
142 | }
143 |
144 | }
145 |
--------------------------------------------------------------------------------
/modules/akka-persistence-pg/src/main/scala/akka/persistence/pg/journal/JournalTable.scala:
--------------------------------------------------------------------------------
1 | package akka.persistence.pg.journal
2 |
3 | import java.time.OffsetDateTime
4 |
5 | import akka.persistence.pg.{JsonString, PgConfig}
6 |
7 | import scala.util.Try
8 |
9 | case class JournalEntry(
10 | id: Option[Long],
11 | rowid: Option[Long],
12 | persistenceId: String,
13 | sequenceNr: Long,
14 | deleted: Boolean,
15 | payload: Option[Array[Byte]],
16 | serializerId_manifest: String,
17 | uuid: String,
18 | writerUuid: String,
19 | created: OffsetDateTime,
20 | tags: Map[String, String],
21 | json: Option[JsonString]
22 | ) {
23 |
24 | lazy val serializerId
25 | : Option[Int] = Try(serializerId_manifest.substring(0, serializerId_manifest.indexOf(':')).toInt).toOption
26 | lazy val manifest = serializerId_manifest.substring(serializerId_manifest.indexOf(':') + 1)
27 |
28 | }
29 |
30 | /**
31 | * The journal/event store: it stores persistent messages.
32 | * Either payload or event must be NOT NULL
33 | */
34 | trait JournalTable {
35 | self: PgConfig =>
36 |
37 | import driver.api._
38 |
39 | case class JournalEntryWithExtraDBIO(entry: JournalEntry, extraDBIO: Seq[DBIO[_]])
40 |
41 | class JournalTable(tag: Tag) extends Table[JournalEntry](tag, pluginConfig.schema, pluginConfig.journalTableName) {
42 |
43 | def id = column[Long]("id", O.AutoInc)
44 | def rowid = column[Option[Long]]("rowid")
45 | def persistenceId = column[String]("persistenceid")
46 | def sequenceNr = column[Long]("sequencenr")
47 | def deleted = column[Boolean]("deleted", O.Default(false))
48 | def payload = column[Option[Array[Byte]]]("payload")
49 | def manifest = column[String]("manifest")
50 | def uuid = column[String]("uuid")
51 | def writerUuid = column[String]("writeruuid")
52 | def created = column[OffsetDateTime]("created", O.Default(OffsetDateTime.now()))(date2TzTimestampTypeMapper)
53 | def tags = column[Map[String, String]]("tags", O.Default(Map.empty))
54 | def event = column[Option[JsonString]]("event")
55 |
56 | def idForQuery =
57 | if (pluginConfig.idForQuery == "rowid") rowid
58 | else id.?
59 |
60 | def pk = primaryKey(s"${pluginConfig.journalTableName}_pk", (persistenceId, sequenceNr))
61 |
62 | def * =
63 | (id.?, rowid, persistenceId, sequenceNr, deleted, payload, manifest, uuid, writerUuid, created, tags, event) <>
64 | (JournalEntry.tupled, JournalEntry.unapply)
65 |
66 | }
67 |
68 | val journals: TableQuery[JournalTable] = TableQuery[JournalTable]
69 |
70 | }
71 |
--------------------------------------------------------------------------------
/modules/akka-persistence-pg/src/main/scala/akka/persistence/pg/journal/Notifier.scala:
--------------------------------------------------------------------------------
1 | package akka.persistence.pg.journal
2 |
3 | class Notifier(entries: Seq[JournalEntry], writeJournal: PgAsyncWriteJournal) {
4 |
5 | def eventsAvailable(): Unit =
6 | writeJournal.notifyEventsAvailable(entries)
7 |
8 | }
9 |
--------------------------------------------------------------------------------
/modules/akka-persistence-pg/src/main/scala/akka/persistence/pg/journal/ReadJournalStore.scala:
--------------------------------------------------------------------------------
1 | package akka.persistence.pg.journal
2 |
3 | import akka.NotUsed
4 | import akka.persistence.PersistentRepr
5 | import akka.persistence.pg.{EventTag, PgConfig}
6 | import akka.stream.scaladsl.Source
7 | import slick.jdbc.{ResultSetConcurrency, ResultSetType}
8 |
9 | trait ReadJournalStore extends JournalStore { self: PgConfig =>
10 |
11 | import driver.api._
12 |
13 | def currentPersistenceIds(): Source[String, NotUsed] = {
14 | val query = journals
15 | .map(_.persistenceId)
16 | .distinct
17 | .result
18 | .withStatementParameters(
19 | rsType = ResultSetType.ForwardOnly,
20 | rsConcurrency = ResultSetConcurrency.ReadOnly,
21 | fetchSize = 1000
22 | )
23 | .transactionally
24 |
25 | val publisher = database.stream(query)
26 | Source.fromPublisher(publisher)
27 | }
28 |
29 | def currentEvents(
30 | persistenceId: String,
31 | fromSequenceNr: Long,
32 | toSequenceNr: Long
33 | ): Source[PersistentRepr, NotUsed] = {
34 | val query = journals
35 | .filter(_.persistenceId === persistenceId)
36 | .filter(_.sequenceNr >= fromSequenceNr)
37 | .filter(_.sequenceNr <= toSequenceNr)
38 | .sortBy(_.sequenceNr)
39 | .result
40 | .withStatementParameters(
41 | rsType = ResultSetType.ForwardOnly,
42 | rsConcurrency = ResultSetConcurrency.ReadOnly,
43 | fetchSize = 1000
44 | )
45 | .transactionally
46 |
47 | val publisher = database.stream(query)
48 | Source.fromPublisher(publisher).map(toPersistentRepr)
49 | }
50 |
51 | def currentEvents(
52 | fromSequenceNr: Long,
53 | toSequenceNr: Long,
54 | maybeTags: Option[Set[EventTag]]
55 | ): Source[PersistentRepr, NotUsed] = {
56 | val tagFilter = maybeTags match {
57 | case Some(tags) => tagsFilter(tags)
58 | case None =>
59 | (_: JournalTable) => true: Rep[Boolean]
60 | }
61 |
62 | val query = journals
63 | .filter(_.idForQuery >= fromSequenceNr)
64 | .filter(_.idForQuery <= toSequenceNr)
65 | .filter(tagFilter)
66 | .sortBy(_.idForQuery)
67 | .result
68 | .withStatementParameters(
69 | rsType = ResultSetType.ForwardOnly,
70 | rsConcurrency = ResultSetConcurrency.ReadOnly,
71 | fetchSize = 1000
72 | )
73 | .transactionally
74 |
75 | val publisher = database.stream(query)
76 | Source.fromPublisher(publisher).map(toPersistentRepr)
77 | }
78 |
79 | }
80 |
--------------------------------------------------------------------------------
/modules/akka-persistence-pg/src/main/scala/akka/persistence/pg/journal/RowIdUpdater.scala:
--------------------------------------------------------------------------------
1 | package akka.persistence.pg.journal
2 |
3 | import akka.actor._
4 | import akka.pattern.pipe
5 | import akka.persistence.pg.journal.RowIdUpdater.{IsBusy, UpdateRowIds}
6 | import akka.persistence.pg.PluginConfig
7 |
8 | import scala.collection.immutable.Queue
9 | import scala.concurrent.Future
10 | import scala.util.control.NonFatal
11 |
12 | object RowIdUpdater {
13 |
14 | case class UpdateRowIds(notifier: Notifier)
15 | case object IsBusy
16 |
17 | def props(pluginConfig: PluginConfig) = Props(new RowIdUpdater(pluginConfig))
18 |
19 | }
20 |
21 | class RowIdUpdater(pluginConfig: PluginConfig) extends Actor with Stash with ActorLogging {
22 |
23 | import context.dispatcher
24 |
25 | private case object Init
26 | private case object Marker
27 | private case object Done
28 | private case object Continue
29 | private case class MaxRowId(rowid: Long)
30 |
31 | //TODO make configurable
32 | val max = 20000
33 |
34 | var maxRowId: Long = _
35 | var notifiers: Queue[Notifier] = Queue.empty
36 |
37 | //start initializing => find max rowid
38 | self ! Init
39 |
40 | override def receive: Receive = initializing
41 |
42 | def initializing: Receive = {
43 | case IsBusy => sender() ! true
44 | case UpdateRowIds(_) => stash()
45 | case Init =>
46 | findMaxRowId() map { MaxRowId } pipeTo self
47 | ()
48 | case MaxRowId(rowid) =>
49 | maxRowId = rowid
50 | unstashAll()
51 | context become waitingForUpdateRequest
52 | }
53 |
54 | def waitingForUpdateRequest: Receive = {
55 | case UpdateRowIds(notifier) =>
56 | notifiers = notifiers.enqueue(notifier)
57 | self ! Marker
58 | context become ignoreUntilMarker
59 | case IsBusy => sender() ! false
60 | }
61 |
62 | def ignoreUntilMarker: Receive = {
63 | case IsBusy => sender() ! true
64 | case UpdateRowIds(notifier) =>
65 | notifiers = notifiers.enqueue(notifier)
66 | case Marker =>
67 | assignRowIds() map { updated =>
68 | if (updated == max) Continue else Done
69 | } recover {
70 | case NonFatal(t) => log.error(t, "could not update rowids"); Done
71 | } pipeTo self
72 | context become updateRunning
73 | }
74 |
75 | def updateRunning: Receive = {
76 | case IsBusy => sender() ! true
77 | case Done =>
78 | unstashAll()
79 | notifyEventsAvailable()
80 | context become waitingForUpdateRequest
81 | case Continue =>
82 | unstashAll()
83 | notifyEventsAvailable()
84 | self ! Marker
85 | context become ignoreUntilMarker
86 | case UpdateRowIds(_) =>
87 | stash()
88 | }
89 |
90 | def notifyEventsAvailable(): Unit = {
91 | notifiers.foreach { _.eventsAvailable() }
92 | notifiers = Queue.empty
93 | }
94 |
95 | import pluginConfig.pgPostgresProfile.api._
96 |
97 | def findMaxRowId(): Future[Long] =
98 | pluginConfig.database
99 | .run(sql"""SELECT COALESCE(MAX(rowid), 0::bigint) FROM #${pluginConfig.fullJournalTableName}""".as[Long])
100 | .map(_(0))
101 |
102 | def assignRowIds(): Future[Int] = {
103 | var updated = 0
104 | pluginConfig.database
105 | .run(
106 | sql"""SELECT id FROM #${pluginConfig.fullJournalTableName} WHERE rowid IS NULL ORDER BY id limit #$max"""
107 | .as[Long]
108 | .flatMap { ids =>
109 | updated += ids.size
110 | if (updated > 0) {
111 | val values = ids
112 | .map { id =>
113 | maxRowId += 1
114 | s"($id, $maxRowId)"
115 | }
116 | .mkString(",")
117 | sqlu"""UPDATE #${pluginConfig.fullJournalTableName} SET rowid = data_table.rowid
118 | FROM (VALUES #$values) as data_table (id, rowid)
119 | WHERE #${pluginConfig.fullJournalTableName}.id = data_table.id"""
120 | } else {
121 | DBIO.successful(())
122 | }
123 | }
124 | )
125 | .map { _ =>
126 | log.debug("updated rowid for {} rows", updated)
127 | updated
128 | }
129 | }
130 |
131 | }
132 |
--------------------------------------------------------------------------------
/modules/akka-persistence-pg/src/main/scala/akka/persistence/pg/journal/StoreActor.scala:
--------------------------------------------------------------------------------
1 | package akka.persistence.pg.journal
2 |
3 | import akka.actor._
4 | import akka.pattern.pipe
5 | import akka.persistence.pg.PluginConfig
6 | import akka.persistence.pg.journal.StoreActor.{Store, StoreSuccess}
7 |
8 | import scala.util.control.NonFatal
9 | import scala.util.{Failure, Success, Try}
10 |
11 | private object StoreActor {
12 |
13 | def props(pluginConfig: PluginConfig) = Props(new StoreActor(pluginConfig))
14 |
15 | import slick.dbio.DBIO
16 |
17 | case class Store(actions: Seq[DBIO[_]])
18 | case object StoreSuccess
19 | }
20 |
21 | private class StoreActor(pluginConfig: PluginConfig) extends Actor with ActorLogging {
22 |
23 | case class Done(senders: List[ActorRef], result: Try[Unit])
24 | case object Run
25 |
26 | import context.dispatcher
27 | import pluginConfig.pgPostgresProfile.api._
28 |
29 | private var senders: List[ActorRef] = List.empty[ActorRef]
30 | private var actions: Seq[pluginConfig.pgPostgresProfile.api.DBIO[_]] = Seq.empty[DBIO[_]]
31 |
32 | override def receive: Receive = idle
33 |
34 | def idle: Receive = {
35 | case Store(as) =>
36 | this.actions ++= as
37 | this.senders :+= sender()
38 | self ! Run
39 | case Run =>
40 | if (senders.nonEmpty) {
41 | val _senders = senders
42 | pluginConfig.database
43 | .run(DBIO.seq(this.actions: _*).transactionally)
44 | .map { _ =>
45 | Done(_senders, Success(()))
46 | }
47 | .recover { case NonFatal(t) => Done(_senders, Failure(t)) }
48 | .pipeTo(self)
49 | this.actions = Seq.empty
50 | this.senders = List.empty
51 | context become busy
52 | }
53 | }
54 |
55 | def busy: Receive = {
56 | case Done(ss, r) =>
57 | r match {
58 | case Success(_) => ss foreach { _ ! StoreSuccess }
59 | case Failure(t) => ss foreach { _ ! Status.Failure(t) }
60 | }
61 | context become idle
62 | self ! Run
63 | case Store(a) =>
64 | this.actions ++= a
65 | this.senders :+= sender()
66 | }
67 |
68 | }
69 |
--------------------------------------------------------------------------------
/modules/akka-persistence-pg/src/main/scala/akka/persistence/pg/journal/TestPgAsyncWriteJournal.scala:
--------------------------------------------------------------------------------
1 | package akka.persistence.pg.journal
2 |
3 | import akka.persistence.pg.util.PluginTestConfig
4 |
5 | class TestPgAsyncWriteJournal extends PgAsyncWriteJournal {
6 |
7 | override lazy val pluginConfig = new PluginTestConfig(context.system)
8 |
9 | }
10 |
--------------------------------------------------------------------------------
/modules/akka-persistence-pg/src/main/scala/akka/persistence/pg/journal/WriteStrategy.scala:
--------------------------------------------------------------------------------
1 | package akka.persistence.pg.journal
2 |
3 | import java.util.concurrent.TimeUnit
4 |
5 | import akka.actor.{ActorRef, ActorSystem, Status}
6 | import akka.persistence.pg.PluginConfig
7 | import akka.persistence.pg.journal.StoreActor.{Store, StoreSuccess}
8 | import akka.pattern.ask
9 | import akka.util.Timeout
10 |
11 | import scala.concurrent.{ExecutionContext, Future}
12 |
13 | trait WriteStrategy {
14 |
15 | def pluginConfig: PluginConfig
16 | lazy val driver = pluginConfig.pgPostgresProfile
17 |
18 | import driver.api._
19 |
20 | def store(actions: Seq[DBIO[_]], notifier: Notifier)(implicit executionContext: ExecutionContext): Future[Unit]
21 | def system: ActorSystem
22 |
23 | }
24 |
25 | class SingleThreadedBatchWriteStrategy(override val pluginConfig: PluginConfig, override val system: ActorSystem)
26 | extends WriteStrategy {
27 |
28 | import driver.api._
29 | implicit val timeout = Timeout(10, TimeUnit.SECONDS)
30 |
31 | private val eventStoreActor: ActorRef = system.actorOf(StoreActor.props(pluginConfig))
32 |
33 | override def store(actions: Seq[DBIO[_]], notifier: Notifier)(
34 | implicit executionContext: ExecutionContext
35 | ): Future[Unit] =
36 | eventStoreActor ? Store(actions) flatMap {
37 | case StoreSuccess => Future.successful(())
38 | case Status.Failure(t) => Future.failed(t)
39 | } map { _ =>
40 | notifier.eventsAvailable()
41 | }
42 |
43 | }
44 |
45 | /**
46 | * This writestrategy can lead to missing events, only usefull as a benchmarking baseline
47 | *
48 | * @param pluginConfig
49 | * @param system
50 | */
51 | class TransactionalWriteStrategy(override val pluginConfig: PluginConfig, override val system: ActorSystem)
52 | extends WriteStrategy {
53 |
54 | system.log.warning(
55 | """
56 | |!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
57 | |! !
58 | |! TransactionalWriteStrategy is configured: !
59 | |! !
60 | |! A possible, but likely consequence is that while reading events, some events might be missed !
61 | |! This strategy is only useful for benchmarking! !
62 | |! Use with caution, YOLO !!! !
63 | |! !
64 | |!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
65 | """.stripMargin
66 | )
67 |
68 | import pluginConfig.pgPostgresProfile.api._
69 |
70 | def store(actions: Seq[DBIO[_]], notifier: Notifier)(implicit executionContext: ExecutionContext): Future[Unit] =
71 | pluginConfig.database
72 | .run {
73 | DBIO.seq(actions: _*).transactionally
74 | }
75 | .map { _ =>
76 | notifier.eventsAvailable()
77 | }
78 | }
79 |
80 | class TableLockingWriteStrategy(override val pluginConfig: PluginConfig, override val system: ActorSystem)
81 | extends WriteStrategy {
82 |
83 | import pluginConfig.pgPostgresProfile.api._
84 |
85 | def store(actions: Seq[DBIO[_]], notifier: Notifier)(implicit executionContext: ExecutionContext): Future[Unit] =
86 | pluginConfig.database
87 | .run {
88 | DBIO
89 | .seq(
90 | (sqlu"""lock table #${pluginConfig.fullJournalTableName} in share row exclusive mode"""
91 | +: actions): _*
92 | )
93 | .transactionally
94 | }
95 | .map { _ =>
96 | notifier.eventsAvailable()
97 | }
98 |
99 | }
100 |
101 | class RowIdUpdatingStrategy(override val pluginConfig: PluginConfig, override val system: ActorSystem)
102 | extends WriteStrategy {
103 |
104 | import driver.api._
105 |
106 | private val rowIdUpdater: ActorRef = system.actorOf(RowIdUpdater.props(pluginConfig), "AkkaPgRowIdUpdater")
107 |
108 | def store(actions: Seq[DBIO[_]], notifier: Notifier)(implicit executionContext: ExecutionContext): Future[Unit] =
109 | pluginConfig.database
110 | .run(DBIO.seq(actions: _*).transactionally)
111 | .map { _ =>
112 | rowIdUpdater ! RowIdUpdater.UpdateRowIds(notifier)
113 | }
114 |
115 | }
116 |
--------------------------------------------------------------------------------
/modules/akka-persistence-pg/src/main/scala/akka/persistence/pg/package.scala:
--------------------------------------------------------------------------------
1 | package akka.persistence
2 |
3 | package object pg {
4 |
5 | type EventTag = (String, String)
6 | }
7 |
--------------------------------------------------------------------------------
/modules/akka-persistence-pg/src/main/scala/akka/persistence/pg/query/PostgresReadJournalProvider.scala:
--------------------------------------------------------------------------------
1 | package akka.persistence.pg.query
2 |
3 | import akka.actor.ExtendedActorSystem
4 | import akka.persistence.query.ReadJournalProvider
5 | import com.typesafe.config.Config
6 |
7 | class PostgresReadJournalProvider(system: ExtendedActorSystem, config: Config) extends ReadJournalProvider {
8 |
9 | def scaladslReadJournal(): scaladsl.PostgresReadJournal = new scaladsl.PostgresReadJournal(system, config)
10 |
11 | def javadslReadJournal(): javadsl.PostgresReadJournal = new javadsl.PostgresReadJournal(scaladslReadJournal())
12 | }
13 |
--------------------------------------------------------------------------------
/modules/akka-persistence-pg/src/main/scala/akka/persistence/pg/query/javadsl/AllEvents.scala:
--------------------------------------------------------------------------------
1 | package akka.persistence.pg.query.javadsl
2 |
3 | import akka.NotUsed
4 | import akka.persistence.query.EventEnvelope
5 | import akka.persistence.query.javadsl.ReadJournal
6 | import akka.stream.javadsl.Source
7 |
8 | trait AllEvents extends ReadJournal {
9 |
10 | def allEvents(fromRowId: Long, toRowId: Long = Long.MaxValue): Source[EventEnvelope, NotUsed]
11 |
12 | }
13 |
--------------------------------------------------------------------------------
/modules/akka-persistence-pg/src/main/scala/akka/persistence/pg/query/javadsl/CurrentAllEvents.scala:
--------------------------------------------------------------------------------
1 | package akka.persistence.pg.query.javadsl
2 |
3 | import akka.NotUsed
4 | import akka.persistence.query.EventEnvelope
5 | import akka.persistence.query.javadsl.ReadJournal
6 | import akka.stream.javadsl.Source
7 |
8 | trait CurrentAllEvents extends ReadJournal {
9 |
10 | def currentAllEvents(fromRowId: Long, toRowId: Long = Long.MaxValue): Source[EventEnvelope, NotUsed]
11 |
12 | }
13 |
--------------------------------------------------------------------------------
/modules/akka-persistence-pg/src/main/scala/akka/persistence/pg/query/javadsl/CurrentEventsByTags.scala:
--------------------------------------------------------------------------------
1 | package akka.persistence.pg.query.javadsl
2 |
3 | import java.util.{Set => JSet}
4 |
5 | import akka.NotUsed
6 | import akka.persistence.pg.EventTag
7 | import akka.persistence.query.EventEnvelope
8 | import akka.persistence.query.javadsl.ReadJournal
9 | import akka.stream.javadsl.Source
10 |
11 | trait CurrentEventsByTags extends ReadJournal {
12 |
13 | def currentEventsByTags(tags: JSet[EventTag], fromRowId: Long, toRowId: Long): Source[EventEnvelope, NotUsed]
14 |
15 | }
16 |
--------------------------------------------------------------------------------
/modules/akka-persistence-pg/src/main/scala/akka/persistence/pg/query/javadsl/EventsByTags.scala:
--------------------------------------------------------------------------------
1 | package akka.persistence.pg.query.javadsl
2 |
3 | import java.util.{Set => JSet}
4 |
5 | import akka.NotUsed
6 | import akka.persistence.pg.EventTag
7 | import akka.persistence.query.EventEnvelope
8 | import akka.persistence.query.javadsl.ReadJournal
9 | import akka.stream.javadsl.Source
10 |
11 | trait EventsByTags extends ReadJournal {
12 |
13 | def eventsByTags(tags: JSet[EventTag], fromRowId: Long, toRowId: Long): Source[EventEnvelope, NotUsed]
14 |
15 | }
16 |
--------------------------------------------------------------------------------
/modules/akka-persistence-pg/src/main/scala/akka/persistence/pg/query/javadsl/PostgresReadJournal.scala:
--------------------------------------------------------------------------------
1 | package akka.persistence.pg.query.javadsl
2 |
3 | import java.util.{Set => JSet}
4 |
5 | import akka.NotUsed
6 | import akka.persistence.pg.EventTag
7 | import akka.persistence.pg.query.scaladsl.{PostgresReadJournal => ScalaPostgresReadJournal}
8 | import akka.persistence.query.EventEnvelope
9 | import akka.persistence.query.javadsl.{
10 | CurrentEventsByPersistenceIdQuery,
11 | CurrentPersistenceIdsQuery,
12 | EventsByPersistenceIdQuery,
13 | ReadJournal
14 | }
15 | import akka.stream.javadsl.Source
16 |
17 | import scala.collection.JavaConverters._
18 |
19 | class PostgresReadJournal(journal: ScalaPostgresReadJournal)
20 | extends ReadJournal
21 | with EventsByTags
22 | with AllEvents
23 | with EventsByPersistenceIdQuery
24 | with CurrentEventsByTags
25 | with CurrentAllEvents
26 | with CurrentEventsByPersistenceIdQuery
27 | with CurrentPersistenceIdsQuery {
28 |
29 | override def eventsByTags(tags: JSet[EventTag], fromRowId: Long, toRowId: Long): Source[EventEnvelope, NotUsed] =
30 | journal.eventsByTags(tags.asScala.toSet, fromRowId, toRowId).asJava
31 |
32 | override def allEvents(fromRowId: Long, toRowId: Long): Source[EventEnvelope, NotUsed] =
33 | journal.allEvents(fromRowId, toRowId).asJava
34 |
35 | override def eventsByPersistenceId(
36 | persistenceId: String,
37 | fromSequenceNr: Long,
38 | toSequenceNr: Long
39 | ): Source[EventEnvelope, NotUsed] =
40 | journal.eventsByPersistenceId(persistenceId, fromSequenceNr, toSequenceNr).asJava
41 |
42 | override def currentEventsByTags(
43 | tags: JSet[EventTag],
44 | fromRowId: Long,
45 | toRowId: Long
46 | ): Source[EventEnvelope, NotUsed] =
47 | journal.currentEventsByTags(tags.asScala.toSet, fromRowId, toRowId).asJava
48 |
49 | override def currentAllEvents(fromRowId: Long, toRowId: Long): Source[EventEnvelope, NotUsed] =
50 | journal.currentAllEvents(fromRowId, toRowId).asJava
51 |
52 | override def currentEventsByPersistenceId(
53 | persistenceId: String,
54 | fromSequenceNr: Long,
55 | toSequenceNr: Long
56 | ): Source[EventEnvelope, NotUsed] =
57 | journal.currentEventsByPersistenceId(persistenceId, fromSequenceNr, toSequenceNr).asJava
58 |
59 | override def currentPersistenceIds(): Source[String, NotUsed] =
60 | journal.currentPersistenceIds().asJava
61 |
62 | }
63 |
64 | object PostgresReadJournal {
65 |
66 | /**
67 | * The default identifier for [[PostgresReadJournal]] to be used with
68 | * [[akka.persistence.query.PersistenceQuery#readJournalFor]].
69 | *
70 | * The value is `"akka.persistence.pg.journal.query"` and corresponds
71 | * to the absolute path to the read journal configuration entry.
72 | */
73 | final val Identifier = "akka.persistence.pg.journal.query"
74 |
75 | }
76 |
--------------------------------------------------------------------------------
/modules/akka-persistence-pg/src/main/scala/akka/persistence/pg/query/scaladsl/AllEvents.scala:
--------------------------------------------------------------------------------
1 | package akka.persistence.pg.query.scaladsl
2 |
3 | import akka.NotUsed
4 | import akka.persistence.query.EventEnvelope
5 | import akka.persistence.query.scaladsl.ReadJournal
6 | import akka.stream.scaladsl.Source
7 |
8 | trait AllEvents extends ReadJournal {
9 |
10 | def allEvents(fromRowId: Long, toRowId: Long = Long.MaxValue): Source[EventEnvelope, NotUsed]
11 |
12 | }
13 |
--------------------------------------------------------------------------------
/modules/akka-persistence-pg/src/main/scala/akka/persistence/pg/query/scaladsl/CurrentAllEvents.scala:
--------------------------------------------------------------------------------
1 | package akka.persistence.pg.query.scaladsl
2 |
3 | import akka.NotUsed
4 | import akka.persistence.query.EventEnvelope
5 | import akka.persistence.query.scaladsl.ReadJournal
6 | import akka.stream.scaladsl.Source
7 |
8 | trait CurrentAllEvents extends ReadJournal {
9 |
10 | def currentAllEvents(fromRowId: Long, toRowId: Long = Long.MaxValue): Source[EventEnvelope, NotUsed]
11 |
12 | }
13 |
--------------------------------------------------------------------------------
/modules/akka-persistence-pg/src/main/scala/akka/persistence/pg/query/scaladsl/CurrentEventsByTags.scala:
--------------------------------------------------------------------------------
1 | package akka.persistence.pg.query.scaladsl
2 |
3 | import akka.NotUsed
4 | import akka.persistence.pg.EventTag
5 | import akka.persistence.query.EventEnvelope
6 | import akka.persistence.query.scaladsl.ReadJournal
7 | import akka.stream.scaladsl.Source
8 |
9 | trait CurrentEventsByTags extends ReadJournal {
10 |
11 | def currentEventsByTags(tags: Set[EventTag], fromRowId: Long, toRowId: Long): Source[EventEnvelope, NotUsed]
12 |
13 | }
14 |
--------------------------------------------------------------------------------
/modules/akka-persistence-pg/src/main/scala/akka/persistence/pg/query/scaladsl/EventsByTags.scala:
--------------------------------------------------------------------------------
1 | package akka.persistence.pg.query.scaladsl
2 |
3 | import akka.NotUsed
4 | import akka.persistence.pg.EventTag
5 | import akka.persistence.query.EventEnvelope
6 | import akka.persistence.query.scaladsl.ReadJournal
7 | import akka.stream.scaladsl.Source
8 |
9 | trait EventsByTags extends ReadJournal {
10 |
11 | def eventsByTags(tags: Set[EventTag], fromRowId: Long, toRowId: Long): Source[EventEnvelope, NotUsed]
12 |
13 | }
14 |
--------------------------------------------------------------------------------
/modules/akka-persistence-pg/src/main/scala/akka/persistence/pg/query/scaladsl/PostgresReadJournal.scala:
--------------------------------------------------------------------------------
1 | package akka.persistence.pg.query.scaladsl
2 |
3 | import java.net.URLEncoder
4 |
5 | import akka.NotUsed
6 | import akka.actor.ExtendedActorSystem
7 | import akka.persistence.pg.journal.PgAsyncWriteJournal.CancelSubscribers
8 | import akka.persistence.pg.journal.ReadJournalStore
9 | import akka.persistence.pg.{EventTag, PgConfig, PgExtension, PluginConfig}
10 | import akka.persistence.pg.streams.EventsPublisherGraphStage
11 | import akka.persistence.query.scaladsl._
12 | import akka.persistence.query.{EventEnvelope, Offset}
13 | import akka.persistence.{Persistence, PersistentRepr}
14 | import akka.serialization.{Serialization, SerializationExtension}
15 | import akka.stream.scaladsl.Source
16 | import akka.util.ByteString
17 | import com.typesafe.config.Config
18 |
19 | class PostgresReadJournal(system: ExtendedActorSystem, config: Config)
20 | extends ReadJournal
21 | with ReadJournalStore
22 | with PgConfig
23 | with EventsByTags
24 | with AllEvents
25 | with EventsByPersistenceIdQuery
26 | with CurrentEventsByTags
27 | with CurrentAllEvents
28 | with CurrentEventsByPersistenceIdQuery
29 | with CurrentPersistenceIdsQuery {
30 |
31 | private val writeJournalPluginId: String = config.getString("write-plugin")
32 | private val maxBufSize: Int = config.getInt("max-buffer-size")
33 | private val eventAdapters = Persistence(system).adaptersFor(writeJournalPluginId)
34 |
35 | override lazy val pluginConfig: PluginConfig = pgExtension.pluginConfig
36 | override val serialization: Serialization = SerializationExtension(system)
37 | override val pgExtension: PgExtension = PgExtension(system)
38 |
39 | override def eventsByTags(
40 | tags: Set[EventTag],
41 | fromRowId: Long,
42 | toRowId: Long = Long.MaxValue
43 | ): Source[EventEnvelope, NotUsed] = {
44 | val encodedTags = URLEncoder.encode(tags.mkString("-"), ByteString.UTF_8)
45 | Source
46 | .fromMaterializer { (mat, _) =>
47 | Source.fromGraph(
48 | EventsPublisherGraphStage.byTags(fromRowId, toRowId, maxBufSize, writeJournalPluginId, tags)(mat)
49 | )
50 | }
51 | .named(s"eventsByTags-$encodedTags")
52 | .mapMaterializedValue(_ => NotUsed)
53 | }
54 |
55 | override def allEvents(fromRowId: Long, toRowId: Long = Long.MaxValue): Source[EventEnvelope, NotUsed] =
56 | Source
57 | .fromMaterializer { (mat, _) =>
58 | Source.fromGraph(
59 | EventsPublisherGraphStage.allEvents(fromRowId, toRowId, maxBufSize, writeJournalPluginId)(mat)
60 | )
61 | }
62 | .named(s"events-")
63 | .mapMaterializedValue(_ => NotUsed)
64 |
65 | override def eventsByPersistenceId(
66 | persistenceId: String,
67 | fromSequenceNr: Long,
68 | toSequenceNr: Long
69 | ): Source[EventEnvelope, NotUsed] =
70 | Source
71 | .fromMaterializer { (mat, _) =>
72 | Source.fromGraph(
73 | EventsPublisherGraphStage
74 | .byPersistenceId(fromSequenceNr, toSequenceNr, maxBufSize, writeJournalPluginId, persistenceId)(mat)
75 | )
76 | }
77 | .named(s"eventsByPersistenceId-${URLEncoder.encode(persistenceId, ByteString.UTF_8)}")
78 | .mapMaterializedValue(_ => NotUsed)
79 |
80 | override def currentEventsByTags(
81 | tags: Set[EventTag],
82 | fromSequenceNr: Long,
83 | toSequenceNr: Long
84 | ): Source[EventEnvelope, NotUsed] =
85 | currentEvents(fromSequenceNr, toSequenceNr, Some(tags))
86 | .mapConcat(adaptEvents)
87 | .map(persistentReprToEventEnvelope)
88 |
89 | override def currentAllEvents(
90 | fromSequenceNr: Long,
91 | toSequenceNr: Long = Long.MaxValue
92 | ): Source[EventEnvelope, NotUsed] =
93 | currentEvents(fromSequenceNr, toSequenceNr, None)
94 | .mapConcat(adaptEvents)
95 | .map(persistentReprToEventEnvelope)
96 |
97 | override def currentEventsByPersistenceId(
98 | persistenceId: String,
99 | fromSequenceNr: Long,
100 | toSequenceNr: Long
101 | ): Source[EventEnvelope, NotUsed] =
102 | currentEvents(persistenceId, fromSequenceNr, toSequenceNr)
103 | .mapConcat(adaptEvents)
104 | .map(persistentReprToEventEnvelope)
105 |
106 | def cancelAll(): Unit =
107 | Persistence(system).journalFor(writeJournalPluginId) ! CancelSubscribers
108 |
109 | private def adaptEvents(repr: PersistentRepr): List[PersistentRepr] = {
110 | val adapter = eventAdapters.get(repr.payload.getClass)
111 | adapter.fromJournal(repr.payload, repr.manifest).events.map(repr.withPayload).toList
112 | }
113 |
114 | private def persistentReprToEventEnvelope(persistentRepr: PersistentRepr) =
115 | new EventEnvelope(
116 | Offset.sequence(persistentRepr.sequenceNr),
117 | persistentRepr.persistenceId,
118 | persistentRepr.sequenceNr,
119 | persistentRepr.payload,
120 | 0L
121 | )
122 | }
123 |
124 | object PostgresReadJournal {
125 |
126 | /**
127 | * The default identifier for [[PostgresReadJournal]] to be used with
128 | * [[akka.persistence.query.PersistenceQuery#readJournalFor]].
129 | *
130 | * The value is `"akka.persistence.pg.journal.query"` and corresponds
131 | * to the absolute path to the read journal configuration entry.
132 | */
133 | final val Identifier = "akka.persistence.pg.journal.query"
134 |
135 | }
136 |
--------------------------------------------------------------------------------
/modules/akka-persistence-pg/src/main/scala/akka/persistence/pg/snapshot/PgAsyncSnapshotStore.scala:
--------------------------------------------------------------------------------
1 | package akka.persistence.pg.snapshot
2 |
3 | import akka.actor.{ActorLogging, ActorSystem}
4 | import akka.persistence.pg.{JsonString, PgConfig, PgExtension, PluginConfig}
5 | import akka.persistence.serialization.Snapshot
6 | import akka.persistence.{SelectedSnapshot, SnapshotMetadata, SnapshotSelectionCriteria}
7 | import akka.serialization.{Serialization, SerializationExtension}
8 |
9 | import scala.concurrent.{ExecutionContextExecutor, Future}
10 |
11 | class PgAsyncSnapshotStore
12 | extends akka.persistence.snapshot.SnapshotStore
13 | with PgSnapshotStore
14 | with ActorLogging
15 | with PgConfig {
16 |
17 | implicit val system: ActorSystem = context.system
18 | implicit val executionContext: ExecutionContextExecutor = context.system.dispatcher
19 |
20 | override val serialization: Serialization = SerializationExtension(context.system)
21 | override val pgExtension: PgExtension = PgExtension(context.system)
22 | override lazy val pluginConfig: PluginConfig = PgExtension(context.system).pluginConfig
23 |
24 | import driver.api._
25 |
26 | override def loadAsync(
27 | persistenceId: String,
28 | criteria: SnapshotSelectionCriteria
29 | ): Future[Option[SelectedSnapshot]] = {
30 | log.debug(s"loading snapshot for persistenceId: {}, criteria: {}", persistenceId, criteria)
31 | selectMostRecentSnapshotFor(persistenceId, criteria)
32 | }
33 |
34 | override def saveAsync(metadata: SnapshotMetadata, snapshot: Any): Future[Unit] = {
35 | log.debug(s"saving snapshot for metadata {}", metadata)
36 | val (payloadAsJson, payloadAsBytes) = serializeSnapshot(snapshot)
37 | val snapshotEntry = SnapshotEntry(
38 | metadata.persistenceId,
39 | metadata.sequenceNr,
40 | metadata.timestamp,
41 | Some(snapshot.getClass.getName),
42 | payloadAsBytes,
43 | payloadAsJson
44 | )
45 |
46 | //TODO use native upsert
47 | database
48 | .run(snapshotsQuery(metadata).length.result.flatMap { result: Int =>
49 | if (result > 0) {
50 | snapshotsQuery(metadata).update(snapshotEntry)
51 | } else {
52 | snapshots += snapshotEntry
53 | }
54 | })
55 | .map { _ =>
56 | ()
57 | }
58 | }
59 |
60 | override def deleteAsync(metadata: SnapshotMetadata): Future[Unit] = {
61 | log.debug(s"deleting: {}", metadata)
62 | deleteSnapshot(metadata).map { _ =>
63 | log.debug(s"deleted snapshot {}", metadata)
64 | }
65 | }
66 |
67 | override def deleteAsync(persistenceId: String, criteria: SnapshotSelectionCriteria): Future[Unit] = {
68 | log.debug(s"deleting for persistenceId: {} and criteria: {}", persistenceId, criteria)
69 | database.run(selectSnapshotsQuery(persistenceId, criteria).delete).map { deleted =>
70 | log.debug(s"deleted {} snapshots", deleted); ()
71 | }
72 | }
73 |
74 | private[this] def serializeSnapshot(snapshot: Any): (Option[JsonString], Option[Array[Byte]]) =
75 | if (snapshotEncoder.toJson.isDefinedAt(snapshot)) {
76 | val json = snapshotEncoder.toJson(snapshot)
77 | require(
78 | snapshotEncoder.fromJson.isDefinedAt((json, snapshot.getClass)),
79 | s"You MUST always be able to decode what you encoded, fromJson method is incomplete for ${snapshot.getClass}"
80 | )
81 | (Some(json), None)
82 | } else {
83 | (None, Some(serialization.serialize(Snapshot(snapshot)).get))
84 | }
85 |
86 | }
87 |
--------------------------------------------------------------------------------
/modules/akka-persistence-pg/src/main/scala/akka/persistence/pg/snapshot/PgSnapshotStore.scala:
--------------------------------------------------------------------------------
1 | package akka.persistence.pg.snapshot
2 |
3 | import akka.event.LoggingAdapter
4 | import akka.persistence.pg.{PgConfig, PgExtension}
5 | import akka.persistence.pg.event.JsonEncoder
6 | import akka.persistence.serialization.Snapshot
7 | import akka.persistence.{SelectedSnapshot, SnapshotMetadata, SnapshotSelectionCriteria}
8 | import akka.serialization.Serialization
9 |
10 | import scala.concurrent.{ExecutionContext, Future}
11 | import scala.util.{Failure, Success, Try}
12 |
13 | trait PgSnapshotStore extends SnapshotTable {
14 | self: PgConfig =>
15 |
16 | def pgExtension: PgExtension
17 | def serialization: Serialization
18 | def snapshotEncoder: JsonEncoder = pluginConfig.snapshotEncoder
19 | def log: LoggingAdapter
20 |
21 | import driver.api._
22 |
23 | def snapshotsQuery(metadata: SnapshotMetadata): Query[SnapshotTable, SnapshotEntry, Seq] =
24 | snapshots
25 | .filter(_.persistenceId === metadata.persistenceId)
26 | .filter(_.sequenceNr === metadata.sequenceNr)
27 |
28 | def deleteSnapshot(metadata: SnapshotMetadata): Future[Int] =
29 | database.run {
30 | snapshotsQuery(metadata).delete
31 | }
32 |
33 | def selectMostRecentSnapshotFor(persistenceId: String, criteria: SnapshotSelectionCriteria)(
34 | implicit executionContext: ExecutionContext
35 | ): Future[Option[SelectedSnapshot]] =
36 | database.run {
37 | selectSnapshotsQuery(persistenceId, criteria)
38 | .sortBy(_.sequenceNr.desc)
39 | .take(1)
40 | .result
41 | .headOption
42 | } map {
43 | _ flatMap { entry: SnapshotEntry =>
44 | ((entry.payload, entry.json, entry.manifest) match {
45 | case (Some(payload), _, _) =>
46 | deserialize(persistenceId, serialization.deserialize(payload, classOf[Snapshot]).get.data)
47 | case (_, Some(event), Some(manifest)) =>
48 | deserialize(persistenceId, snapshotEncoder.fromJson((event, pgExtension.getClassFor[Any](manifest))))
49 | case _ =>
50 | sys.error(s"""both payload and event are null for snapshot table entry
51 | with persistenceid='${entry.persistenceId}' and sequencenr='${entry.sequenceNr} and timestamp='${entry.timestamp}'
52 | This should NEVER happen!""")
53 | }) map {
54 | SelectedSnapshot(SnapshotMetadata(entry.persistenceId, entry.sequenceNr, entry.timestamp), _)
55 | }
56 |
57 | }
58 | }
59 |
60 | def deserialize(persistenceId: String, snapshot: => Any): Option[Any] =
61 | Try(snapshot) match {
62 | case Success(data) => Some(data)
63 | case Failure(t) =>
64 | if (pluginConfig.ignoreSnapshotDecodingFailure) {
65 | log.warning(
66 | "problem deserializing snapshot with persistenceId '{}' from store using Akka serialization: {}",
67 | persistenceId,
68 | t.getMessage
69 | )
70 | None
71 | } else {
72 | throw t
73 | }
74 |
75 | }
76 |
77 | def selectSnapshotsQuery(
78 | persistenceId: String,
79 | criteria: SnapshotSelectionCriteria
80 | ): Query[SnapshotTable, SnapshotEntry, Seq] =
81 | snapshots
82 | .filter(_.persistenceId === persistenceId)
83 | .filter(_.sequenceNr <= criteria.maxSequenceNr)
84 | .filter(_.timestamp <= criteria.maxTimestamp)
85 |
86 | }
87 |
--------------------------------------------------------------------------------
/modules/akka-persistence-pg/src/main/scala/akka/persistence/pg/snapshot/SnapshotTable.scala:
--------------------------------------------------------------------------------
1 | package akka.persistence.pg.snapshot
2 |
3 | import akka.persistence.pg.{JsonString, PgConfig}
4 |
5 | case class SnapshotEntry(
6 | persistenceId: String,
7 | sequenceNr: Long,
8 | timestamp: Long,
9 | manifest: Option[String],
10 | payload: Option[Array[Byte]],
11 | json: Option[JsonString]
12 | )
13 |
14 | trait SnapshotTable {
15 | self: PgConfig =>
16 |
17 | import driver.api._
18 |
19 | class SnapshotTable(tag: Tag) extends Table[SnapshotEntry](tag, pluginConfig.schema, pluginConfig.snapshotTableName) {
20 |
21 | def persistenceId = column[String]("persistenceid")
22 | def sequenceNr = column[Long]("sequencenr")
23 | def timestamp = column[Long]("timestamp")
24 | def manifest = column[Option[String]]("manifest")
25 | def snapshotBinary = column[Option[Array[Byte]]]("snapshot")
26 | def snapshotJson = column[Option[JsonString]]("json")
27 |
28 | def pk = primaryKey(s"pk_${pluginConfig.snapshotTableName}", (persistenceId, sequenceNr))
29 |
30 | def * =
31 | (persistenceId, sequenceNr, timestamp, manifest, snapshotBinary, snapshotJson) <> (SnapshotEntry.tupled, SnapshotEntry.unapply)
32 | }
33 |
34 | val snapshots: TableQuery[SnapshotTable] = TableQuery[SnapshotTable]
35 |
36 | }
37 |
--------------------------------------------------------------------------------
/modules/akka-persistence-pg/src/main/scala/akka/persistence/pg/snapshot/TestPgAsyncSnapshotStore.scala:
--------------------------------------------------------------------------------
1 | package akka.persistence.pg.snapshot
2 |
3 | import akka.persistence.pg.util.PluginTestConfig
4 |
5 | class TestPgAsyncSnapshotStore extends PgAsyncSnapshotStore {
6 |
7 | override lazy val pluginConfig = new PluginTestConfig(context.system)
8 |
9 | }
10 |
--------------------------------------------------------------------------------
/modules/akka-persistence-pg/src/main/scala/akka/persistence/pg/streams/AllEventsPublisherStageLogic.scala:
--------------------------------------------------------------------------------
1 | package akka.persistence.pg.streams
2 |
3 | import akka.persistence.pg.journal.PgAsyncWriteJournal.{ReplayMessages, ReplayedEventMessage, SubscribeAllEvents}
4 | import akka.persistence.query.{EventEnvelope, Offset}
5 | import akka.stream.{Materializer, SourceShape}
6 |
7 | private[pg] class AllEventsPublisherStageLogic(
8 | shape: SourceShape[EventEnvelope],
9 | fromOffset: Long,
10 | toOffset: Long,
11 | maxBufferSize: Int,
12 | writeJournalPluginId: String
13 | )(implicit materializer: Materializer)
14 | extends EventsPublisherStageLogic(shape, writeJournalPluginId, maxBufferSize, fromOffset, toOffset) {
15 |
16 | override def subscribe(): Unit =
17 | journal ! SubscribeAllEvents
18 |
19 | override def requestReplay(limit: Int): Unit =
20 | journal ! ReplayMessages(currentOffset, toOffset, limit, sender)
21 |
22 | override def replaying: PartialFunction[Any, EventEnvelope] = {
23 |
24 | case ReplayedEventMessage(persistentRepr, offset) =>
25 | currentOffset = offset + 1
26 | new EventEnvelope(
27 | offset = Offset.sequence(offset),
28 | persistenceId = persistentRepr.persistenceId,
29 | sequenceNr = persistentRepr.sequenceNr,
30 | event = persistentRepr.payload,
31 | timestamp = 0L
32 | )
33 |
34 | }
35 | }
36 |
--------------------------------------------------------------------------------
/modules/akka-persistence-pg/src/main/scala/akka/persistence/pg/streams/EventsByPersistenceIdPublisherStageLogic.scala:
--------------------------------------------------------------------------------
1 | package akka.persistence.pg.streams
2 |
3 | import akka.persistence.JournalProtocol.{ReplayMessages, ReplayedMessage}
4 | import akka.persistence.pg.journal.PgAsyncWriteJournal.SubscribePersistenceId
5 | import akka.persistence.query.{EventEnvelope, Offset}
6 | import akka.stream.{Materializer, SourceShape}
7 |
8 | private[pg] class EventsByPersistenceIdPublisherStageLogic(
9 | shape: SourceShape[EventEnvelope],
10 | persistenceId: String,
11 | writeJournalPluginId: String,
12 | maxBufferSize: Int,
13 | fromOffset: Long,
14 | toOffset: Long
15 | )(implicit materializer: Materializer)
16 | extends EventsPublisherStageLogic(shape, writeJournalPluginId, maxBufferSize, fromOffset, toOffset) {
17 |
18 | override def subscribe(): Unit =
19 | journal ! SubscribePersistenceId(persistenceId)
20 |
21 | override def requestReplay(limit: Int): Unit =
22 | journal ! ReplayMessages(currentOffset, toOffset, limit, persistenceId, sender)
23 |
24 | override def replaying: PartialFunction[Any, EventEnvelope] = {
25 | case ReplayedMessage(p) =>
26 | currentOffset = p.sequenceNr + 1
27 | new EventEnvelope(
28 | offset = Offset.sequence(p.sequenceNr),
29 | persistenceId = persistenceId,
30 | sequenceNr = p.sequenceNr,
31 | event = p.payload,
32 | timestamp = 0L
33 | )
34 | }
35 |
36 | }
37 |
--------------------------------------------------------------------------------
/modules/akka-persistence-pg/src/main/scala/akka/persistence/pg/streams/EventsByTagsPublisherStageLogic.scala:
--------------------------------------------------------------------------------
1 | package akka.persistence.pg.streams
2 |
3 | import akka.persistence.pg.EventTag
4 | import akka.persistence.pg.journal.PgAsyncWriteJournal.{ReplayTaggedMessages, ReplayedTaggedMessage, SubscribeTags}
5 | import akka.persistence.query.{EventEnvelope, Offset}
6 | import akka.stream.{Materializer, SourceShape}
7 |
8 | private[pg] class EventsByTagsPublisherStageLogic(
9 | shape: SourceShape[EventEnvelope],
10 | tags: Set[EventTag],
11 | fromOffset: Long,
12 | toOffset: Long,
13 | maxBufferSize: Int,
14 | writeJournalPluginId: String
15 | )(implicit materializer: Materializer)
16 | extends EventsPublisherStageLogic(shape, writeJournalPluginId, maxBufferSize, fromOffset, toOffset) {
17 |
18 | override def subscribe(): Unit =
19 | journal ! SubscribeTags(tags)
20 |
21 | override def requestReplay(limit: Int): Unit =
22 | journal ! ReplayTaggedMessages(currentOffset, toOffset, limit, tags, sender)
23 |
24 | override def replaying: PartialFunction[Any, EventEnvelope] = {
25 |
26 | case ReplayedTaggedMessage(persistentRepr, _, offset) =>
27 | currentOffset = offset + 1
28 | new EventEnvelope(
29 | offset = Offset.sequence(offset),
30 | persistenceId = persistentRepr.persistenceId,
31 | sequenceNr = persistentRepr.sequenceNr,
32 | event = persistentRepr.payload,
33 | timestamp = 0L
34 | )
35 |
36 | }
37 | }
38 |
--------------------------------------------------------------------------------
/modules/akka-persistence-pg/src/main/scala/akka/persistence/pg/streams/EventsPublisherGraphStage.scala:
--------------------------------------------------------------------------------
1 | package akka.persistence.pg.streams
2 |
3 | import akka.persistence.pg.EventTag
4 | import akka.persistence.query.EventEnvelope
5 | import akka.stream.{Attributes, Materializer, Outlet, SourceShape}
6 | import akka.stream.stage.GraphStage
7 |
8 | object EventsPublisherGraphStage {
9 |
10 | def byPersistenceId(
11 | fromOffset: Long,
12 | toOffset: Long,
13 | maxBufferSize: Int,
14 | writeJournalPluginId: String,
15 | persistenceId: String
16 | )(implicit materializer: Materializer): GraphStage[SourceShape[EventEnvelope]] =
17 | new EventsPublisherGraphStage() {
18 | override def createLogic(inheritedAttributes: Attributes): EventsPublisherStageLogic =
19 | new EventsByPersistenceIdPublisherStageLogic(
20 | shape,
21 | persistenceId,
22 | writeJournalPluginId,
23 | maxBufferSize,
24 | fromOffset,
25 | toOffset
26 | )
27 | }
28 |
29 | def byTags(
30 | fromOffset: Long,
31 | toOffset: Long,
32 | maxBufferSize: Int,
33 | writeJournalPluginId: String,
34 | tags: Set[EventTag]
35 | )(implicit materializer: Materializer): GraphStage[SourceShape[EventEnvelope]] =
36 | new EventsPublisherGraphStage() {
37 | override def createLogic(inheritedAttributes: Attributes): EventsPublisherStageLogic =
38 | new EventsByTagsPublisherStageLogic(
39 | shape,
40 | tags,
41 | fromOffset,
42 | toOffset,
43 | maxBufferSize,
44 | writeJournalPluginId
45 | )
46 | }
47 |
48 | def allEvents(
49 | fromOffset: Long,
50 | toOffset: Long,
51 | maxBufferSize: Int,
52 | writeJournalPluginId: String
53 | )(implicit materializer: Materializer): GraphStage[SourceShape[EventEnvelope]] =
54 | new EventsPublisherGraphStage {
55 | override def createLogic(inheritedAttributes: Attributes): EventsPublisherStageLogic =
56 | new AllEventsPublisherStageLogic(
57 | shape,
58 | fromOffset,
59 | toOffset,
60 | maxBufferSize,
61 | writeJournalPluginId
62 | )
63 | }
64 |
65 | }
66 |
67 | private[pg] abstract class EventsPublisherGraphStage() extends GraphStage[SourceShape[EventEnvelope]] {
68 |
69 | val out: Outlet[EventEnvelope] = Outlet("EventsPublisher.out")
70 | val shape: SourceShape[EventEnvelope] = SourceShape(out)
71 |
72 | def createLogic(inheritedAttributes: Attributes): EventsPublisherStageLogic
73 |
74 | }
75 |
--------------------------------------------------------------------------------
/modules/akka-persistence-pg/src/main/scala/akka/persistence/pg/streams/EventsPublisherStageLogic.scala:
--------------------------------------------------------------------------------
1 | package akka.persistence.pg.streams
2 |
3 | import akka.actor.Actor.Receive
4 | import akka.actor.Status.Failure
5 | import akka.actor.{ActorRef, Terminated}
6 | import akka.persistence.JournalProtocol.{RecoverySuccess, ReplayMessagesFailure}
7 | import akka.persistence.Persistence
8 | import akka.persistence.pg.journal.PgAsyncWriteJournal.NewEventAppended
9 | import akka.persistence.query.EventEnvelope
10 | import akka.stream.{Materializer, SourceShape}
11 | import akka.stream.stage.GraphStageLogic.StageActor
12 | import akka.stream.stage.{GraphStageLogic, OutHandler, StageLogging}
13 |
14 | import scala.collection.{mutable => mut}
15 |
16 | private[pg] object EventsPublisherStageLogic {
17 |
18 | class UnhandledMessageException(ref: ActorRef, msg: Any) extends RuntimeException(s"Unhandled message $msg from $ref")
19 |
20 | case object CancelEventsStage
21 |
22 | }
23 |
24 | private[pg] abstract class EventsPublisherStageLogic(
25 | shape: SourceShape[EventEnvelope],
26 | writeJournalPluginId: String,
27 | maxBufferSize: Int,
28 | fromOffset: Long,
29 | toOffset: Long
30 | )(implicit materializer: Materializer)
31 | extends GraphStageLogic(shape)
32 | with StageLogging
33 | with OutHandler {
34 | import EventsPublisherStageLogic._
35 |
36 | val journal: ActorRef = Persistence(materializer.system).journalFor(writeJournalPluginId)
37 |
38 | implicit protected lazy val sender: ActorRef = stageActor.ref
39 |
40 | protected var currentOffset: Long = fromOffset
41 | private var failure: Option[Throwable] = None
42 | private val buffer = mut.Queue[EventEnvelope]()
43 | private var newEventsCount = 0L
44 | private var initialized = false
45 | private var done = false
46 |
47 | override def preStart(): Unit =
48 | stageActorBecome(idle).watch(journal)
49 |
50 | final override def onPull(): Unit = {
51 | if (!initialized) {
52 | initialize()
53 | }
54 | tryPush()
55 | if (buffer.isEmpty) {
56 | failure.foreach(failStage)
57 | }
58 | }
59 |
60 | private val idle: Receive = PartialFunction.empty
61 |
62 | private def replayingBase: Receive = {
63 | case CancelEventsStage =>
64 | log.debug(s"Events publisher was stopped")
65 | completeStage()
66 |
67 | case RecoverySuccess(highestSequenceNr) =>
68 | log.debug(s"completed currOffset [$currentOffset]")
69 | processRecoverySuccess(highestSequenceNr)
70 |
71 | case ReplayMessagesFailure(cause) =>
72 | log.error(cause, s"replay failed due to [${cause.getMessage}]")
73 | failure = Some(cause)
74 |
75 | case NewEventAppended =>
76 | log.debug(s"Received event notification while replaying")
77 | processNewEvent()
78 | }
79 |
80 | protected def enqueue(envelope: EventEnvelope): Unit = {
81 | buffer.enqueue(envelope)
82 | tryPush()
83 | }
84 |
85 | private def tryPush(): Unit =
86 | if (isAvailable(shape.out) && buffer.nonEmpty) {
87 | push(shape.out, buffer.dequeue())
88 | }
89 |
90 | private def processRecoverySuccess(highestSequenceNr: Long): Unit =
91 | if (currentOffset > toOffset && buffer.isEmpty) {
92 | completeStage()
93 | } else {
94 | if (highestSequenceNr > currentOffset || newEventsCount > 0) {
95 | requestNewEvents()
96 | } else {
97 | done = true
98 | }
99 | }
100 |
101 | private def processNewEvent(): Unit = {
102 | newEventsCount += 1
103 | if (done) {
104 | requestNewEvents()
105 | done = false
106 | }
107 | }
108 |
109 | private def requestNewEvents(): Unit = {
110 | val requestedAmount = maxBufferSize - buffer.size
111 | requestReplay(requestedAmount)
112 | newEventsCount = math.max(0, newEventsCount - requestedAmount)
113 | }
114 |
115 | private def initialize(): Unit = {
116 | subscribe()
117 | requestReplay(maxBufferSize)
118 | stageActorBecome(replayingReceive)
119 | initialized = true
120 | }
121 |
122 | private def stageActorBecome(receive: Receive): StageActor = getStageActor {
123 | case (_, msg) if receive.isDefinedAt(msg) => receive(msg)
124 | case (_, Terminated(`journal`)) => completeStage()
125 | case (_, Failure(cause)) => failure = Some(cause)
126 | case (r, m) => failStage(new UnhandledMessageException(r, m))
127 | }
128 |
129 | def subscribe(): Unit
130 | def requestReplay(limit: Int): Unit
131 | def replaying: PartialFunction[Any, EventEnvelope]
132 |
133 | private def replayingReceive: Receive = replayingBase orElse (replaying andThen enqueue _)
134 |
135 | setHandler(shape.out, this)
136 | }
137 |
--------------------------------------------------------------------------------
/modules/akka-persistence-pg/src/main/scala/akka/persistence/pg/util/DummyPersistentActor.scala:
--------------------------------------------------------------------------------
1 | package akka.persistence.pg.util
2 |
3 | import akka.actor.ActorLogging
4 | import akka.persistence.PersistentActor
5 |
6 | case object DummyCommand
7 |
8 | class DummyPersistentActor extends PersistentActor with ActorLogging {
9 |
10 | override def receiveRecover: Receive = {
11 | case a: Any => log.debug("DummyPersistentActor receiveRecover received " + a)
12 | }
13 |
14 | override def receiveCommand: Receive = {
15 | case DummyCommand => log.debug("DummyPersistentActor received DummyCommand")
16 | }
17 |
18 | override def persistenceId: String = "pg-dummy"
19 | }
20 |
--------------------------------------------------------------------------------
/modules/akka-persistence-pg/src/main/scala/akka/persistence/pg/util/PgPluginTestUtil.scala:
--------------------------------------------------------------------------------
1 | package akka.persistence.pg.util
2 |
3 | import java.sql.{Connection, DatabaseMetaData}
4 | import java.util.concurrent.CountDownLatch
5 |
6 | import akka.actor.{ActorSystem, Props}
7 | import slick.jdbc.JdbcBackend
8 |
9 | /**
10 | * test utility for testing PersistentActors with akka-persistence-pg plugin:
11 | * Send your actors messages in a code block wrapped in a withTransactionRollback call
12 | * This will make sure that all persistent messages are stored within a DB transaction that will be rolled back automatically
13 | *
14 | * Important Remarks:
15 | *
16 | * make sure to configure akka-persistence to use the TestPgSyncWriteJournal class in your akka test config:
17 | * pg-journal.class = "akka.persistence.pg.journal.TestPgSyncWriteJournal"
18 | *
19 | * This class is not thread-safe and contains shared global state. Make sure to NOT run your tests in parallel
20 | *
21 | */
22 | object PgPluginTestUtil {
23 |
24 | private[this] var dbLatch: CountDownLatch = new CountDownLatch(1)
25 |
26 | private[pg] var db: RollbackDatabase = _
27 |
28 | /**
29 | * Initialize the global state. This will be called when the TestPgSyncWriteJournal is instantiated by akka-persistence
30 | *
31 | * @param db the database
32 | */
33 | private[pg] def initialize(db: JdbcBackend.DatabaseDef, actorSystem: ActorSystem): RollbackDatabase = {
34 | if (this.db == null) {
35 | this.db = new RollbackDatabase(db)
36 | dbLatch.countDown()
37 | actorSystem.registerOnTermination(uninitialize())
38 | }
39 | this.db
40 | }
41 |
42 | /**
43 | * Unitialize the global state. This will be called when the actor system stops
44 | */
45 | private[pg] def uninitialize() = {
46 | db.database.close()
47 | db = null
48 | dbLatch = new CountDownLatch(1)
49 | }
50 |
51 | /**
52 | * @param f function block to be called within a tx that will be rolled back
53 | * @return the result of the function call
54 | */
55 | def withTransactionRollback[T](f: JdbcBackend.DatabaseDef => T)(implicit system: ActorSystem): T = {
56 | if (db == null) {
57 | //send a dummy message to start up akka-persistence (asynchronously) because akka-persistence only starts when
58 | //a message is sent for the first time to a persistent actor
59 | system.actorOf(Props(classOf[DummyPersistentActor])) ! DummyCommand
60 | }
61 | //wait until akka-persistence is initialized
62 | dbLatch.await()
63 | //create a new session that will eventually be rolled back
64 | db.newRollbackSession()
65 | try {
66 | f(db)
67 | } finally {
68 | db.rollbackAndClose()
69 | }
70 | }
71 |
72 | private class RollbackSession(override val database: JdbcBackend.DatabaseDef) extends JdbcBackend.Session {
73 |
74 | override val conn: Connection = database.source.createConnection()
75 | conn.setAutoCommit(false)
76 |
77 | override def capabilities: JdbcBackend.DatabaseCapabilities = new JdbcBackend.DatabaseCapabilities(this)
78 |
79 | override def metaData: DatabaseMetaData = conn.getMetaData
80 |
81 | override def close(): Unit = {}
82 |
83 | override def endInTransaction(f: => Unit): Unit = {}
84 | override def startInTransaction: Unit = {}
85 |
86 | }
87 |
88 | private[pg] class RollbackDatabase(val database: JdbcBackend.DatabaseDef)
89 | extends JdbcBackend.DatabaseDef(database.source, database.executor) {
90 |
91 | private var session: Option[RollbackSession] = None
92 |
93 | def newRollbackSession(): Unit =
94 | session = Option(new RollbackSession(database))
95 |
96 | @annotation.tailrec
97 | private def retry[T](n: Int)(fn: => T): T =
98 | util.Try { fn } match {
99 | case util.Success(x) => x
100 | case _ if n > 1 => Thread.sleep(50); retry(n - 1)(fn)
101 | case util.Failure(e) => throw e
102 | }
103 |
104 | override def createSession(): JdbcBackend.SessionDef =
105 | try {
106 | retry(5)(session.get)
107 | } catch {
108 | case e: InterruptedException =>
109 | Thread.currentThread().interrupt()
110 | session.get
111 | }
112 |
113 | def rollbackAndClose() = {
114 | session.foreach { s =>
115 | s.conn.rollback()
116 | s.conn.close()
117 | }
118 | session = None
119 | }
120 |
121 | }
122 |
123 | }
124 |
--------------------------------------------------------------------------------
/modules/akka-persistence-pg/src/main/scala/akka/persistence/pg/util/PluginTestConfig.scala:
--------------------------------------------------------------------------------
1 | package akka.persistence.pg.util
2 |
3 | import akka.actor.ActorSystem
4 | import akka.persistence.pg.PluginConfig
5 | import slick.jdbc.JdbcBackend
6 |
7 | class PluginTestConfig(system: ActorSystem) extends PluginConfig(system.settings.config) {
8 |
9 | override lazy val database: JdbcBackend.DatabaseDef = {
10 | PgPluginTestUtil.initialize(createDatabase, system)
11 | }
12 | }
13 |
--------------------------------------------------------------------------------
/modules/akka-persistence-pg/src/test/resources/example-actor-serialization-test.conf:
--------------------------------------------------------------------------------
1 | include "postgres.conf"
2 |
3 | pg-persistence {
4 | db {
5 | user = ${postgres.user}
6 | password = ${postgres.password}
7 | url = "jdbc:postgresql://"${postgres.host}":"${postgres.port}"/"${postgres.name}
8 | connectionPool = "disabled"
9 | }
10 | schemaName = ${postgres.schema}
11 | }
12 |
13 | pg-journal.class: "akka.persistence.pg.journal.TestPgAsyncWriteJournal"
14 | pg-snapshot.class: "akka.persistence.pg.snapshot.TestPgAsyncSnapshotStore"
15 |
16 | akka {
17 | loglevel = warning
18 | log-config-on-start = off
19 | stdout-loglevel = warning
20 |
21 | actor {
22 | serializers {
23 | eventSerializer = "akka.persistence.pg.EventSerializer"
24 | snapshotSerializer = "akka.persistence.pg.SnapshotSerializer"
25 | }
26 |
27 | serialization-bindings {
28 | "akka.persistence.pg.CustomSerializationTest$StringEvent" = eventSerializer
29 | "akka.persistence.pg.CustomSerializationTest$IntEvent" = eventSerializer
30 | "akka.persistence.pg.CustomSerializationTest$MyData" = snapshotSerializer
31 | }
32 | }
33 |
34 | persistence {
35 | journal.plugin = "pg-journal"
36 | snapshot-store.plugin = "pg-snapshot"
37 | }
38 |
39 | log-dead-letters = 10
40 | log-dead-letters-during-shutdown = on
41 | }
42 |
--------------------------------------------------------------------------------
/modules/akka-persistence-pg/src/test/resources/example-actor-test.conf:
--------------------------------------------------------------------------------
1 | include "postgres.conf"
2 |
3 | pg-persistence {
4 | db {
5 | user = ${postgres.user}
6 | password = ${postgres.password}
7 | url = "jdbc:postgresql://"${postgres.host}":"${postgres.port}"/"${postgres.name}
8 | connectionPool = "disabled"
9 | }
10 | schemaName = ${postgres.schema}
11 | }
12 |
13 | pg-journal.class: "akka.persistence.pg.journal.TestPgAsyncWriteJournal"
14 | pg-snapshot.class: "akka.persistence.pg.snapshot.TestPgAsyncSnapshotStore"
15 |
16 | akka {
17 | loglevel = warning
18 | log-config-on-start = off
19 | stdout-loglevel = warning
20 |
21 | persistence {
22 | journal.plugin = "pg-journal"
23 | snapshot-store.plugin = "pg-snapshot"
24 | }
25 |
26 | log-dead-letters = 10
27 | log-dead-letters-during-shutdown = on
28 |
29 | actor {
30 | allow-java-serialization = on
31 | }
32 | }
33 |
--------------------------------------------------------------------------------
/modules/akka-persistence-pg/src/test/resources/logback.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 | %d{HH:mm:ss.SSS} [%-5level] %logger{15} - %msg%n%rEx
7 | false
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
--------------------------------------------------------------------------------
/modules/akka-persistence-pg/src/test/resources/pg-application.conf:
--------------------------------------------------------------------------------
1 | include "postgres.conf"
2 |
3 | pg-persistence {
4 | db {
5 | user = ${postgres.user}
6 | password = ${postgres.password}
7 | url = "jdbc:postgresql://"${postgres.host}":"${postgres.port}"/"${postgres.name}
8 | connectionPool = "disabled"
9 | }
10 | schemaName = ${postgres.schema}
11 | }
12 |
13 | akka {
14 | loglevel = warning
15 | log-config-on-start = off
16 | stdout-loglevel = warning
17 |
18 | persistence {
19 | journal.plugin = "pg-journal"
20 | snapshot-store.plugin = "pg-snapshot"
21 |
22 | # we need event publishing for tests
23 | publish-confirmations = on
24 | publish-plugin-commands = on
25 | }
26 |
27 | log-dead-letters = 10
28 | log-dead-letters-during-shutdown = on
29 | }
30 |
31 |
--------------------------------------------------------------------------------
/modules/akka-persistence-pg/src/test/resources/pg-eventstore-locking.conf:
--------------------------------------------------------------------------------
1 | include "pg-eventstore.conf"
2 |
3 | pg-persistence {
4 | writestrategy = "akka.persistence.pg.journal.TableLockingWriteStrategy"
5 | }
6 |
7 |
--------------------------------------------------------------------------------
/modules/akka-persistence-pg/src/test/resources/pg-eventstore-rowid.conf:
--------------------------------------------------------------------------------
1 | include "pg-eventstore.conf"
2 |
3 | pg-persistence {
4 | writestrategy = "akka.persistence.pg.journal.RowIdUpdatingStrategy"
5 | }
6 |
7 |
--------------------------------------------------------------------------------
/modules/akka-persistence-pg/src/test/resources/pg-eventstore-snapshotencoder.conf:
--------------------------------------------------------------------------------
1 | include "pg-eventstore.conf"
2 |
3 | pg-persistence {
4 | snapshotEncoder = "akka.persistence.pg.TestSnapshotEncoder"
5 | ignoreSnapshotDecodingFailure = true
6 | }
7 |
8 |
--------------------------------------------------------------------------------
/modules/akka-persistence-pg/src/test/resources/pg-eventstore.conf:
--------------------------------------------------------------------------------
1 | include "postgres.conf"
2 |
3 | pg-persistence {
4 | db {
5 | user = ${postgres.user}
6 | password = ${postgres.password}
7 | url = "jdbc:postgresql://"${postgres.host}":"${postgres.port}"/"${postgres.name}
8 | connectionPool = "HikariCP"
9 | }
10 | schemaName = ${postgres.schema}
11 | eventstore {
12 | class: "akka.persistence.pg.TestEventStore"
13 | tagger: "akka.persistence.pg.TestEventTagger"
14 | encoder: "akka.persistence.pg.TestEventEncoder"
15 | schemaName: ${postgres.schema}
16 | tableName: "journal"
17 | }
18 | }
19 |
20 | akka {
21 | loglevel = warning
22 | log-config-on-start = off
23 | stdout-loglevel = warning
24 | test.single-expect-default = 10000
25 |
26 | persistence {
27 | journal.plugin = "pg-journal"
28 | snapshot-store.plugin = "pg-snapshot"
29 |
30 |
31 | pg.journal.query {
32 | class = "akka.persistence.pg.query.PostgresReadJournalProvider"
33 | write-plugin = "pg-journal"
34 | refresh-interval = 3s
35 | max-buffer-size = 100
36 | }
37 |
38 | # we need event publishing for tests
39 | publish-confirmations = on
40 | publish-plugin-commands = on
41 | }
42 |
43 | log-dead-letters = 10
44 | log-dead-letters-during-shutdown = on
45 |
46 | actor {
47 | allow-java-serialization = on
48 | }
49 | }
50 |
51 |
--------------------------------------------------------------------------------
/modules/akka-persistence-pg/src/test/resources/pg-persist-jndi.conf:
--------------------------------------------------------------------------------
1 | include "pg-writestrategy-base.conf"
2 | pg-persistence {
3 | db {
4 | jndiName = "MyDS"
5 | maxConnections = 4
6 | numThreads = 4
7 | queueSize = 1000
8 | }
9 | writestrategy = "akka.persistence.pg.journal.RowIdUpdatingStrategy"
10 | }
11 |
12 | akka {
13 | actor {
14 | allow-java-serialization = on
15 | }
16 | }
17 |
--------------------------------------------------------------------------------
/modules/akka-persistence-pg/src/test/resources/pg-persistall.conf:
--------------------------------------------------------------------------------
1 | include "pg-writestrategy-base.conf"
2 | pg-persistence {
3 | db {
4 | numThreads = 4
5 | queueSize = 1000
6 | connectionPool = "HikariCP"
7 | }
8 | writestrategy = "akka.persistence.pg.journal.RowIdUpdatingStrategy"
9 | }
10 |
11 |
12 |
--------------------------------------------------------------------------------
/modules/akka-persistence-pg/src/test/resources/pg-readmodelupdate.conf:
--------------------------------------------------------------------------------
1 | include "pg-writestrategy-base.conf"
2 | pg-persistence {
3 | db {
4 | numThreads = 2
5 | maxConnections = 2
6 | queueSize = 1000
7 | connectionPool = "disabled"
8 | }
9 | writestrategy = "akka.persistence.pg.journal.RowIdUpdatingStrategy"
10 | }
11 |
12 |
13 |
--------------------------------------------------------------------------------
/modules/akka-persistence-pg/src/test/resources/pg-writestrategy-base.conf:
--------------------------------------------------------------------------------
1 | include "postgres.conf"
2 |
3 | pg-persistence {
4 | db {
5 | user = ${postgres.user}
6 | password = ${postgres.password}
7 | url = "jdbc:postgresql://"${postgres.host}":"${postgres.port}"/"${postgres.name}
8 | connectionPool = "HikariCP"
9 | connectionTimeout = 2000
10 | }
11 | schemaName = ${postgres.schema}
12 | eventstore {
13 | class: "akka.persistence.pg.writestrategy.DefaultEventStore"
14 | encoder: "akka.persistence.pg.perf.PerfEventEncoder"
15 | schemaName: ${postgres.schema}
16 | tableName: "journal"
17 | }
18 | }
19 |
20 | akka.actor.default-dispatcher.shutdown-timeout = 5
21 | akka {
22 | loglevel = warning
23 | log-config-on-start = off
24 | stdout-loglevel = warning
25 | test.single-expect-default = 10000
26 |
27 | persistence {
28 | journal.plugin = "pg-journal"
29 | snapshot-store.plugin = "pg-snapshot"
30 |
31 | # we need event publishing for tests
32 | publish-confirmations = on
33 | publish-plugin-commands = on
34 | }
35 |
36 | log-dead-letters = 10
37 | log-dead-letters-during-shutdown = on
38 | }
39 |
40 |
--------------------------------------------------------------------------------
/modules/akka-persistence-pg/src/test/resources/pg-writestrategy-locking.conf:
--------------------------------------------------------------------------------
1 | include "pg-writestrategy-base.conf"
2 | pg-persistence {
3 | writestrategy = "akka.persistence.pg.journal.TableLockingWriteStrategy"
4 | }
5 |
6 |
7 |
--------------------------------------------------------------------------------
/modules/akka-persistence-pg/src/test/resources/pg-writestrategy-rowid.conf:
--------------------------------------------------------------------------------
1 | include "pg-writestrategy-base.conf"
2 | pg-persistence {
3 | writestrategy = "akka.persistence.pg.journal.RowIdUpdatingStrategy"
4 | }
5 |
6 |
7 |
--------------------------------------------------------------------------------
/modules/akka-persistence-pg/src/test/resources/pg-writestrategy-st.conf:
--------------------------------------------------------------------------------
1 | include "pg-writestrategy-base.conf"
2 | pg-persistence {
3 | writestrategy = "akka.persistence.pg.journal.SingleThreadedBatchWriteStrategy"
4 | }
5 |
6 |
7 |
--------------------------------------------------------------------------------
/modules/akka-persistence-pg/src/test/resources/pg-writestrategy-tx.conf:
--------------------------------------------------------------------------------
1 | include "pg-writestrategy-base.conf"
2 | pg-persistence {
3 | writestrategy = "akka.persistence.pg.journal.TransactionalWriteStrategy"
4 | }
5 |
6 |
7 |
--------------------------------------------------------------------------------
/modules/akka-persistence-pg/src/test/resources/postgres.conf:
--------------------------------------------------------------------------------
1 | postgres {
2 | host = "localhost"
3 | host = ${?POSTGRES_HOSTADDRESS}
4 | port = "5432"
5 | port = ${?POSTGRES_PORT}
6 | name: "akkapg"
7 | name: ${?POSTGRES_DBNAME}
8 | user: "akkapg"
9 | user: ${?POSTGRES_DBUSER}
10 | password: "akkapg"
11 | password: ${?POSTGRES_DBPASSWORD}
12 | schema: "akka-persistence-pg"
13 | schema: ${?POSTGRES_SCHEMA}
14 | }
15 |
--------------------------------------------------------------------------------
/modules/akka-persistence-pg/src/test/resources/simplelogger.properties:
--------------------------------------------------------------------------------
1 | com.zaxxer.hikari.pool.HikariPool=DEBUG
2 | #org.slf4j.simpleLogger.log.slick.backend=DEBUG
--------------------------------------------------------------------------------
/modules/akka-persistence-pg/src/test/scala/akka/persistence/pg/AbstractEventStoreTest.scala:
--------------------------------------------------------------------------------
1 | package akka.persistence.pg
2 |
3 | import akka.NotUsed
4 | import akka.actor.ActorSystem
5 | import akka.persistence.pg.event._
6 | import akka.persistence.pg.journal.JournalTable
7 | import akka.persistence.pg.query.scaladsl.PostgresReadJournal
8 | import akka.persistence.pg.snapshot.SnapshotTable
9 | import akka.persistence.pg.util.{CreateTables, RecreateSchema}
10 | import akka.persistence.query.PersistenceQuery
11 | import akka.stream.scaladsl.Source
12 | import akka.testkit.TestProbe
13 | import com.typesafe.config.{Config, ConfigFactory}
14 | import org.scalatest.{BeforeAndAfterAll, BeforeAndAfterEach}
15 | import org.scalatest.concurrent.ScalaFutures
16 | import org.scalatest.funsuite.AnyFunSuite
17 | import org.scalatest.matchers.should.Matchers
18 | import org.scalatest.time.{Milliseconds, Seconds, Span}
19 |
20 | import scala.reflect.ClassTag
21 |
22 | abstract class AbstractEventStoreTest
23 | extends AnyFunSuite
24 | with BeforeAndAfterEach
25 | with Matchers
26 | with BeforeAndAfterAll
27 | with JournalTable
28 | with SnapshotTable
29 | with EventStore
30 | with RecreateSchema
31 | with CreateTables
32 | with PgConfig
33 | with ScalaFutures {
34 |
35 | lazy val config: Config = ConfigFactory.load("pg-eventstore.conf")
36 | implicit val system = ActorSystem("EventStoreTest", config)
37 |
38 | override lazy val pluginConfig: PluginConfig = PgExtension(system).pluginConfig
39 |
40 | val testProbe = TestProbe()
41 |
42 | override implicit val patienceConfig = PatienceConfig(timeout = Span(10, Seconds), interval = Span(100, Milliseconds))
43 |
44 | import driver.api._
45 |
46 | override def beforeAll(): Unit = {
47 | database.run(recreateSchema.andThen(createTables)).futureValue
48 | ()
49 | }
50 |
51 | override protected def beforeEach(): Unit =
52 | PgExtension(system).whenDone {
53 | database.run(
54 | DBIO.seq(
55 | sqlu"""ALTER SEQUENCE #${pluginConfig.fullJournalTableName}_id_seq RESTART WITH 1""",
56 | journals.delete,
57 | snapshots.delete
58 | )
59 | )
60 | }.futureValue
61 |
62 | override protected def afterEach(): Unit =
63 | PersistenceQuery(system)
64 | .readJournalFor[PostgresReadJournal](PostgresReadJournal.Identifier)
65 | .cancelAll()
66 |
67 | override protected def afterAll(): Unit = {
68 | system.terminate()
69 | system.whenTerminated.futureValue
70 | ()
71 | }
72 |
73 | def startSource[E](tags: Set[EventTag], fromRowId: Long)(implicit tag: ClassTag[E]): Source[E, NotUsed] =
74 | PersistenceQuery(system)
75 | .readJournalFor[PostgresReadJournal](PostgresReadJournal.Identifier)
76 | .eventsByTags(tags, fromRowId)
77 | .map { env =>
78 | // and this will blow up if something different than a DomainEvent comes in!!
79 | env.event match {
80 | case evt: E => evt
81 | case unexpected => sys.error(s"Oeps!! That's was totally unexpected $unexpected")
82 | }
83 | }
84 |
85 | def startSource[E](fromRowId: Long)(implicit tag: ClassTag[E]): Source[E, NotUsed] =
86 | PersistenceQuery(system)
87 | .readJournalFor[PostgresReadJournal](PostgresReadJournal.Identifier)
88 | .allEvents(fromRowId)
89 | .map { env =>
90 | env.event match {
91 | case evt: E => evt
92 | case unexpected => sys.error(s"Oeps!! That's was totally unexpected $unexpected")
93 | }
94 | }
95 |
96 | def startSource[E](persistenceId: String, fromRowId: Long)(implicit tag: ClassTag[E]): Source[E, NotUsed] =
97 | PersistenceQuery(system)
98 | .readJournalFor[PostgresReadJournal](PostgresReadJournal.Identifier)
99 | .eventsByPersistenceId(persistenceId, fromRowId, Long.MaxValue)
100 | .map { env =>
101 | env.event match {
102 | case evt: E => evt
103 | case unexpected => sys.error(s"Oeps!! That's was totally unexpected $unexpected")
104 | }
105 | }
106 |
107 | def startCurrentSource[E](tags: Set[EventTag], fromRowId: Long)(implicit tag: ClassTag[E]): Source[E, NotUsed] =
108 | PersistenceQuery(system)
109 | .readJournalFor[PostgresReadJournal](PostgresReadJournal.Identifier)
110 | .currentEventsByTags(tags, fromRowId, Long.MaxValue)
111 | .map { env =>
112 | // and this will blow up if something different than a DomainEvent comes in!!
113 | env.event match {
114 | case evt: E => evt
115 | case unexpected => sys.error(s"Oeps!! That's was totally unexpected $unexpected")
116 | }
117 | }
118 |
119 | def startCurrentSource[E](fromRowId: Long)(implicit tag: ClassTag[E]): Source[E, NotUsed] =
120 | PersistenceQuery(system)
121 | .readJournalFor[PostgresReadJournal](PostgresReadJournal.Identifier)
122 | .currentAllEvents(fromRowId)
123 | .map { env =>
124 | env.event match {
125 | case evt: E => evt
126 | case unexpected => sys.error(s"Oeps!! That's was totally unexpected $unexpected")
127 | }
128 | }
129 |
130 | def startCurrentSource[E](persistenceId: String, fromSeqNr: Long, toSeqNr: Long = Long.MaxValue)(
131 | implicit tag: ClassTag[E]
132 | ): Source[E, NotUsed] =
133 | PersistenceQuery(system)
134 | .readJournalFor[PostgresReadJournal](PostgresReadJournal.Identifier)
135 | .currentEventsByPersistenceId(persistenceId, fromSeqNr, toSeqNr)
136 | .map { env =>
137 | env.event match {
138 | case evt: E => evt
139 | case unexpected => sys.error(s"Oeps!! That's was totally unexpected $unexpected")
140 | }
141 | }
142 |
143 | def startCurrentSource(): Source[String, NotUsed] =
144 | PersistenceQuery(system)
145 | .readJournalFor[PostgresReadJournal](PostgresReadJournal.Identifier)
146 | .currentPersistenceIds()
147 |
148 | }
149 |
--------------------------------------------------------------------------------
/modules/akka-persistence-pg/src/test/scala/akka/persistence/pg/CustomSerializationTest.scala:
--------------------------------------------------------------------------------
1 | package akka.persistence.pg
2 |
3 | import java.util.UUID
4 |
5 | import akka.actor.Props
6 | import akka.persistence.pg.CustomSerializationTest.{
7 | ExamplePA,
8 | GetData,
9 | IntCommand,
10 | IntEvent,
11 | MyData,
12 | StringCommand,
13 | StringEvent,
14 | TakeSnapshot
15 | }
16 | import akka.persistence.pg.journal.JournalTable
17 | import akka.persistence.{PersistentActor, SnapshotOffer}
18 | import akka.persistence.pg.util.{CreateTables, PersistentActorTest, RecreateSchema}
19 | import akka.serialization.SerializerWithStringManifest
20 | import com.typesafe.config.{Config, ConfigFactory}
21 | import org.scalatest.BeforeAndAfterAll
22 | import org.scalatest.concurrent.{Eventually, ScalaFutures}
23 | import org.scalatest.matchers.should.Matchers
24 | import org.scalatest.time.{Seconds, Span}
25 |
26 | class CustomSerializationTest
27 | extends PersistentActorTest
28 | with ScalaFutures
29 | with Eventually
30 | with RecreateSchema
31 | with Matchers
32 | with BeforeAndAfterAll
33 | with JournalTable
34 | with CreateTables
35 | with PgConfig {
36 |
37 | override def config: Config = ConfigFactory.load("example-actor-serialization-test.conf")
38 | override def pluginConfig = PluginConfig(config)
39 |
40 | override implicit val patienceConfig = PatienceConfig(timeout = scaled(Span(2, Seconds)))
41 |
42 | import driver.api._
43 |
44 | /**
45 | * recreate schema and tables before running the tests
46 | */
47 | override def beforeAll(): Unit = {
48 | database.run(recreateSchema.andThen(createTables)).futureValue
49 | ()
50 | }
51 |
52 | override protected def afterAll(): Unit = database.close()
53 |
54 | val id = UUID.randomUUID().toString
55 |
56 | test("check journal entries are stored with custom serialization") { db =>
57 | db.run(countEvents(id)).futureValue shouldEqual 0
58 | db.run(countSnapshots(id)).futureValue shouldEqual 0
59 |
60 | val actor = system.actorOf(Props(new ExamplePA(id)))
61 |
62 | testProbe.send(actor, StringCommand("foo"))
63 | testProbe.expectMsg[String]("foo")
64 |
65 | testProbe.send(actor, StringCommand("bar"))
66 | testProbe.expectMsg[String]("bar")
67 |
68 | testProbe.send(actor, IntCommand(564))
69 | testProbe.expectMsg[Int](564)
70 |
71 | testProbe.send(actor, GetData)
72 | testProbe.expectMsg[MyData](MyData("bar", 564))
73 |
74 | db.run(countEvents(id)).futureValue shouldEqual 3
75 | db.run(countSnapshots(id)).futureValue shouldEqual 0
76 |
77 | val events = db
78 | .run(
79 | journals
80 | .sortBy(_.sequenceNr)
81 | .map(r => (r.sequenceNr, r.manifest, r.payload.get))
82 | .result
83 | )
84 | .futureValue
85 |
86 | events(0)._2 shouldBe "666:string_event"
87 | events(0)._3 should contain theSameElementsAs "foo".getBytes("UTF-8")
88 | events(1)._2 shouldBe "666:string_event"
89 | events(1)._3 should contain theSameElementsAs "bar".getBytes("UTF-8")
90 | events(2)._2 shouldBe "666:int_event"
91 | events(2)._3 should contain theSameElementsAs BigInt(564).toByteArray
92 |
93 | }
94 |
95 | test("check recovery of events") { db =>
96 | db.run(countEvents(id)).futureValue shouldEqual 0
97 | db.run(countSnapshots(id)).futureValue shouldEqual 0
98 |
99 | val actor = system.actorOf(Props(new ExamplePA(id)))
100 |
101 | testProbe.send(actor, StringCommand("foo"))
102 | testProbe.expectMsg[String]("foo")
103 | testProbe.send(actor, IntCommand(999))
104 | testProbe.expectMsg[Int](999)
105 |
106 | db.run(countEvents(id)).futureValue shouldEqual 2
107 |
108 | //stop the actor
109 | system.stop(actor)
110 | testProbe watch actor
111 | testProbe.expectTerminated(actor)
112 |
113 | //send message again, don't expect an answer because actor is down
114 | testProbe.send(actor, GetData)
115 | testProbe.expectNoMessage()
116 |
117 | val recovered = system.actorOf(Props(new ExamplePA(id)))
118 | testProbe.send(recovered, GetData)
119 | testProbe.expectMsg[MyData](MyData("foo", 999))
120 |
121 | db.run(countEvents(id)).futureValue shouldEqual 2
122 | db.run(countSnapshots(id)).futureValue shouldEqual 0
123 | }
124 |
125 | test("check snapshot is stored using custom serialization") { db =>
126 | db.run(countEvents(id)).futureValue shouldEqual 0
127 | db.run(countSnapshots(id)).futureValue shouldEqual 0
128 |
129 | val actor = system.actorOf(Props(new ExamplePA(id)))
130 |
131 | //send single event
132 | testProbe.send(actor, StringCommand("foo"))
133 | testProbe.expectMsg[String]("foo")
134 | testProbe.send(actor, IntCommand(321))
135 | testProbe.expectMsg[Int](321)
136 |
137 | testProbe.send(actor, TakeSnapshot)
138 | testProbe.expectNoMessage()
139 |
140 | eventually {
141 | db.run(countSnapshots(id)).futureValue shouldEqual 1
142 | }
143 | db.run(countEvents(id)).futureValue shouldEqual 2
144 |
145 | //stop the actor
146 | system.stop(actor)
147 | testProbe watch actor
148 | testProbe.expectTerminated(actor)
149 |
150 | val recovered = system.actorOf(Props(new ExamplePA(id)))
151 | testProbe.send(recovered, GetData)
152 | testProbe.expectMsg[MyData](MyData("foo", 321))
153 | }
154 |
155 | }
156 |
157 | object CustomSerializationTest {
158 |
159 | case class StringCommand(message: String)
160 | case class IntCommand(message: Int)
161 | case class StringEvent(message: String)
162 | case class IntEvent(message: Int)
163 | case object GetData
164 | case object TakeSnapshot
165 |
166 | case class MyData(string: String, int: Int)
167 |
168 | class ExamplePA(override val persistenceId: String) extends PersistentActor {
169 |
170 | var data: MyData = MyData("", 0)
171 |
172 | override def receiveRecover: Receive = {
173 | case StringEvent(message) => data = data.copy(string = message)
174 | case IntEvent(message) => data = data.copy(int = message)
175 | case SnapshotOffer(metadata, snapshot) => data = snapshot.asInstanceOf[MyData]
176 | }
177 |
178 | override def receiveCommand: Receive = {
179 | case StringCommand(message) =>
180 | persist(StringEvent(message)) { e =>
181 | data = data.copy(string = message)
182 | sender() ! message
183 | }
184 | case IntCommand(message) =>
185 | persist(IntEvent(message)) { e =>
186 | data = data.copy(int = message)
187 | sender() ! message
188 | }
189 |
190 | case GetData => sender() ! data
191 | case TakeSnapshot => saveSnapshot(data)
192 | }
193 |
194 | }
195 |
196 | }
197 |
198 | class EventSerializer extends SerializerWithStringManifest {
199 |
200 | override def identifier: Int = 666
201 |
202 | override def manifest(o: AnyRef): String = o match {
203 | case StringEvent(_) => "string_event"
204 | case IntEvent(_) => "int_event"
205 | }
206 |
207 | override def toBinary(o: AnyRef): Array[Byte] = o match {
208 | case StringEvent(message) => message.getBytes("UTF-8")
209 | case IntEvent(message) => BigInt(message).toByteArray
210 | }
211 |
212 | override def fromBinary(bytes: Array[Byte], manifest: String): AnyRef = manifest match {
213 | case "string_event" => StringEvent(new String(bytes, "UTF-8"))
214 | case "int_event" => IntEvent(BigInt(bytes).toInt)
215 | }
216 | }
217 |
218 | class SnapshotSerializer extends SerializerWithStringManifest {
219 |
220 | override def identifier: Int = 667
221 |
222 | override def manifest(o: AnyRef): String = "data_snapshot"
223 |
224 | override def toBinary(o: AnyRef): Array[Byte] = o match {
225 | case MyData(s, i) => s"$i:$s".getBytes("UTF-8")
226 | }
227 |
228 | override def fromBinary(bytes: Array[Byte], manifest: String): AnyRef = {
229 | val snapshot = new String(bytes, "UTF-8")
230 | val i = snapshot.indexOf(':')
231 | MyData(snapshot.substring(i + 1), snapshot.substring(0, i).toInt)
232 | }
233 | }
234 |
--------------------------------------------------------------------------------
/modules/akka-persistence-pg/src/test/scala/akka/persistence/pg/EventStoreTest.scala:
--------------------------------------------------------------------------------
1 | package akka.persistence.pg
2 |
3 | import java.time.format.DateTimeFormatter
4 |
5 | import akka.actor.Props
6 | import akka.persistence.pg.TestActor._
7 | import akka.stream.scaladsl.{Sink, Source}
8 | import org.scalatest.concurrent.Eventually
9 |
10 | import scala.collection.mutable.ListBuffer
11 | import scala.util.Random
12 | import scala.util.parsing.json.JSON
13 |
14 | class EventStoreTest extends AbstractEventStoreTest with Eventually {
15 |
16 | import driver.api._
17 |
18 | implicit val materializer = system
19 |
20 | test("generate events") {
21 | val test = system.actorOf(Props(new TestActor(testProbe.ref)))
22 |
23 | testProbe.send(test, Alter("foo"))
24 | testProbe.expectMsg("j")
25 | testProbe.send(test, GetState)
26 | testProbe.expectMsg(TheState(id = "foo"))
27 |
28 | testProbe.send(test, Alter("bar"))
29 | testProbe.expectMsg("j")
30 | testProbe.send(test, GetState)
31 | testProbe.expectMsg(TheState(id = "bar"))
32 |
33 | database.run(journals.size.result).futureValue shouldBe 2
34 |
35 | // kill the actor
36 | system.stop(test)
37 | testProbe watch test
38 | testProbe.expectTerminated(test)
39 |
40 | // get persisted state
41 | val test2 = system.actorOf(Props(new TestActor(testProbe.ref)))
42 | testProbe.send(test2, GetState)
43 | testProbe.expectMsg(TheState(id = "bar"))
44 |
45 | system.stop(test2)
46 | testProbe watch test2
47 | testProbe.expectTerminated(test2)
48 | ()
49 | }
50 |
51 | test("events implementing created use this as event's creation time") {
52 | val test = system.actorOf(Props(new TestActor(testProbe.ref)))
53 |
54 | testProbe.send(test, Alter("foo"))
55 | testProbe.expectMsg("j")
56 | testProbe.send(test, GetState)
57 | testProbe.expectMsg(TheState(id = "foo"))
58 |
59 | database.run(events.size.result).futureValue shouldBe 1
60 | val storedEvent = database.run(events.result.head).futureValue
61 | getCreated(storedEvent.event) shouldBe DateTimeFormatter.ISO_DATE_TIME.format(storedEvent.created)
62 | }
63 |
64 | //put on ignore because the assertion can NOT be guaranteed, the timestamps could very well be the same
65 | ignore("events NOT implementing created don't use this as event's creation time") {
66 | val test = system.actorOf(Props(new TestActor(testProbe.ref)))
67 |
68 | testProbe.send(test, Increment(5))
69 | testProbe.expectMsg("j")
70 | testProbe.send(test, GetState)
71 | testProbe.expectMsg(TheState(count = 5))
72 |
73 | database.run(events.size.result).futureValue shouldBe 1
74 | val storedEvent = database.run(events.result.head).futureValue
75 | getCreated(storedEvent.event) shouldNot be(DateTimeFormatter.ISO_DATE_TIME.format(storedEvent.created))
76 | }
77 |
78 | def getCreated(jsonString: JsonString): Any =
79 | JSON.parseFull(jsonString.value).get.asInstanceOf[Map[String, Any]]("created")
80 |
81 | test("generate snapshots") {
82 | val test = system.actorOf(Props(new TestActor(testProbe.ref)))
83 |
84 | testProbe.send(test, Alter("baz"))
85 | testProbe.expectMsg("j")
86 | testProbe.send(test, GetState)
87 | testProbe.expectMsg(TheState(id = "baz"))
88 |
89 | testProbe.send(test, Snap)
90 | testProbe.expectMsg("s")
91 |
92 | database.run(events.size.result).futureValue shouldBe 1 //1 Alter event total
93 | database.run(snapshots.size.result).futureValue shouldBe 1 //1 snapshot stored
94 | database.run(journals.size.result).futureValue shouldBe 1 //1 journal message after the snapshot
95 |
96 | testProbe.send(test, Alter("foobar"))
97 | testProbe.expectMsg("j")
98 | testProbe.send(test, GetState)
99 | testProbe.expectMsg(TheState(id = "foobar"))
100 |
101 | database.run(events.size.result).futureValue shouldBe 2 //2 Alter events total
102 | database.run(snapshots.size.result).futureValue shouldBe 1 //1 snapshot stored
103 | database.run(journals.size.result).futureValue shouldBe 2 //2 journal message
104 |
105 | // kill the actor
106 | system.stop(test)
107 | testProbe watch test
108 | testProbe.expectTerminated(test)
109 |
110 | // get persisted state
111 | val test2 = system.actorOf(Props(new TestActor(testProbe.ref)))
112 | testProbe.send(test2, GetState)
113 | testProbe.expectMsg(TheState(id = "foobar"))
114 |
115 | system.stop(test2)
116 | testProbe watch test2
117 | testProbe.expectTerminated(test2)
118 | ()
119 | }
120 |
121 | test("all events") {
122 | val test = system.actorOf(Props(new TestActor(testProbe.ref)))
123 |
124 | 1 to 10 foreach { i =>
125 | val s: String = Random.nextString(5)
126 | testProbe.send(test, Alter(s))
127 | testProbe.expectMsg("j")
128 | testProbe.send(test, GetState)
129 | testProbe.expectMsg(TheState(id = s))
130 | }
131 |
132 | database.run(events.size.result).futureValue shouldBe 10
133 | database.run(journals.size.result).futureValue shouldBe 10
134 |
135 | val storedEvents = ListBuffer[TestActor.Event]()
136 | val eventStore = pluginConfig.eventStore.get
137 |
138 | Source
139 | .fromPublisher(database.stream(eventStore.allEvents()))
140 | .to(Sink.foreach[akka.persistence.pg.event.Event] { e =>
141 | storedEvents.append(eventStore.toDomainEvent[TestActor.Event](e))
142 | })
143 | .run()
144 |
145 | eventually(storedEvents.size shouldBe 10)
146 |
147 | }
148 |
149 | }
150 |
--------------------------------------------------------------------------------
/modules/akka-persistence-pg/src/test/scala/akka/persistence/pg/EventStoreWithSnapshotEncoderTest.scala:
--------------------------------------------------------------------------------
1 | package akka.persistence.pg
2 |
3 | import akka.actor.Props
4 | import akka.persistence.pg.TestActor._
5 | import akka.persistence.pg.snapshot.SnapshotEntry
6 | import com.typesafe.config.{Config, ConfigFactory}
7 |
8 | class EventStoreWithSnapshotEncoderTest extends AbstractEventStoreTest {
9 |
10 | import driver.api._
11 |
12 | override lazy val config: Config = ConfigFactory.load("pg-eventstore-snapshotencoder.conf")
13 |
14 | test("generate snapshots as json") {
15 | val test = system.actorOf(Props(new TestActor(testProbe.ref)))
16 |
17 | testProbe.send(test, Alter("baz"))
18 | testProbe.expectMsg("j")
19 | testProbe.send(test, GetState)
20 | testProbe.expectMsg(TheState(id = "baz"))
21 |
22 | testProbe.send(test, Snap)
23 | testProbe.expectMsg("s")
24 |
25 | database.run(events.size.result).futureValue shouldBe 1 //1 Alter event total
26 | database.run(snapshots.size.result).futureValue shouldBe 1 //1 snapshot stored
27 | val snapshotEntry: SnapshotEntry = database.run(snapshots.result.head).futureValue
28 | snapshotEntry.manifest shouldBe Some(classOf[TheState].getName)
29 | snapshotEntry.payload shouldBe None
30 | snapshotEntry.json.isDefined shouldBe true
31 | snapshotEntry.json.get.value shouldBe """{
32 | | "id": "baz",
33 | | "count": 0
34 | |}""".stripMargin
35 |
36 | database.run(journals.size.result).futureValue shouldBe 1 //1 journal message after the snapshot
37 |
38 | testProbe.send(test, Alter("foobar"))
39 | testProbe.expectMsg("j")
40 | testProbe.send(test, GetState)
41 | testProbe.expectMsg(TheState(id = "foobar"))
42 |
43 | database.run(events.size.result).futureValue shouldBe 2 //2 Alter events total
44 | database.run(snapshots.size.result).futureValue shouldBe 1 //1 snapshot stored
45 | database.run(journals.size.result).futureValue shouldBe 2 //2 journal message
46 |
47 | // kill the actor
48 | system.stop(test)
49 | testProbe watch test
50 | testProbe.expectTerminated(test)
51 |
52 | // get persisted state
53 | val test2 = system.actorOf(Props(new TestActor(testProbe.ref)))
54 | testProbe.send(test2, GetState)
55 | testProbe.expectMsg(TheState(id = "foobar"))
56 |
57 | system.stop(test2)
58 | testProbe watch test2
59 | testProbe.expectTerminated(test2)
60 | ()
61 | }
62 |
63 | test("snapshots deserialization fails") {
64 | val test = system.actorOf(Props(new TestActor(testProbe.ref)))
65 |
66 | testProbe.send(test, Alter("baz"))
67 | testProbe.expectMsg("j")
68 | testProbe.send(test, GetState)
69 | testProbe.expectMsg(TheState(id = "baz"))
70 |
71 | testProbe.send(test, Snap)
72 | testProbe.expectMsg("s")
73 |
74 | database.run(events.size.result).futureValue shouldBe 1 //1 Alter event total
75 | database.run(snapshots.size.result).futureValue shouldBe 1 //1 snapshot stored
76 |
77 | val snapshotEntry: SnapshotEntry = database.run(snapshots.result.head).futureValue
78 | snapshotEntry.manifest shouldBe Some(classOf[TheState].getName)
79 | snapshotEntry.payload shouldBe None
80 | snapshotEntry.json.isDefined shouldBe true
81 | snapshotEntry.json.get.value shouldBe """{
82 | | "id": "baz",
83 | | "count": 0
84 | |}""".stripMargin
85 |
86 | // break serialized snapshot
87 | database
88 | .run(
89 | snapshots
90 | .filter(_.persistenceId === snapshotEntry.persistenceId)
91 | .update(snapshotEntry.copy(json = Some(JsonString("""{
92 | | "id2": "bazz",
93 | | "count": 0
94 | |}""".stripMargin))))
95 | )
96 | .futureValue
97 |
98 | // kill the actor
99 | system.stop(test)
100 | testProbe watch test
101 | testProbe.expectTerminated(test)
102 |
103 | // get persisted state
104 | val test2 = system.actorOf(Props(new TestActor(testProbe.ref)))
105 | testProbe.send(test2, GetState)
106 | testProbe.expectMsg(TheState(id = "baz"))
107 |
108 | system.stop(test2)
109 | testProbe watch test2
110 | testProbe.expectTerminated(test2)
111 | ()
112 | }
113 |
114 | }
115 |
--------------------------------------------------------------------------------
/modules/akka-persistence-pg/src/test/scala/akka/persistence/pg/ExamplePersistentActorTest.scala:
--------------------------------------------------------------------------------
1 | package akka.persistence.pg
2 |
3 | import java.util.UUID
4 |
5 | import akka.actor.Props
6 | import akka.persistence.pg.ExamplePersistentActorTest.{Command, ExamplePA, GetMessage, TakeSnapshot}
7 | import akka.persistence.pg.util.{CreateTables, PersistentActorTest, RecreateSchema}
8 | import akka.persistence.{PersistentActor, SnapshotOffer}
9 | import com.typesafe.config.{Config, ConfigFactory}
10 | import org.scalatest.BeforeAndAfterAll
11 | import org.scalatest.concurrent.{Eventually, ScalaFutures}
12 | import org.scalatest.matchers.should.Matchers
13 | import org.scalatest.time.{Seconds, Span}
14 |
15 | class ExamplePersistentActorTest
16 | extends PersistentActorTest
17 | with ScalaFutures
18 | with Eventually
19 | with RecreateSchema
20 | with Matchers
21 | with BeforeAndAfterAll
22 | with CreateTables
23 | with PgConfig {
24 |
25 | override def config: Config = ConfigFactory.load("example-actor-test.conf")
26 | override def pluginConfig = PluginConfig(config)
27 |
28 | override implicit val patienceConfig = PatienceConfig(timeout = scaled(Span(2, Seconds)))
29 |
30 | /**
31 | * recreate schema and tables before running the tests
32 | */
33 | override def beforeAll(): Unit = {
34 | database.run(recreateSchema.andThen(createTables)).futureValue
35 | ()
36 | }
37 |
38 | override protected def afterAll(): Unit = database.close()
39 |
40 | val id = UUID.randomUUID().toString
41 |
42 | test("check journal entries are stored") { db =>
43 | db.run(countEvents(id)).futureValue shouldEqual 0
44 | db.run(countSnapshots(id)).futureValue shouldEqual 0
45 |
46 | val actor = system.actorOf(Props(new ExamplePA(id)))
47 |
48 | testProbe.send(actor, Command("foo"))
49 | testProbe.expectMsg[String]("oof")
50 |
51 | testProbe.send(actor, GetMessage)
52 | testProbe.expectMsg[String]("foo")
53 |
54 | testProbe.send(actor, Command("bar"))
55 | testProbe.expectMsg[String]("rab")
56 |
57 | testProbe.send(actor, GetMessage)
58 | testProbe.expectMsg[String]("bar")
59 |
60 | db.run(countEvents(id)).futureValue shouldEqual 2
61 | db.run(countSnapshots(id)).futureValue shouldEqual 0
62 | }
63 |
64 | test("check recovery of events") { db =>
65 | db.run(countEvents(id)).futureValue shouldEqual 0
66 | db.run(countSnapshots(id)).futureValue shouldEqual 0
67 |
68 | val actor = system.actorOf(Props(new ExamplePA(id)))
69 |
70 | testProbe.send(actor, Command("foo"))
71 | testProbe.expectMsg[String]("oof")
72 |
73 | db.run(countEvents(id)).futureValue shouldEqual 1
74 |
75 | //stop the actor
76 | system.stop(actor)
77 | testProbe watch actor
78 | testProbe.expectTerminated(actor)
79 |
80 | //send message again, don't expect an answer because actor is down
81 | testProbe.send(actor, GetMessage)
82 | testProbe.expectNoMessage()
83 |
84 | val recovered = system.actorOf(Props(new ExamplePA(id)))
85 | testProbe.send(recovered, GetMessage)
86 | testProbe.expectMsg[String]("foo")
87 |
88 | db.run(countEvents(id)).futureValue shouldEqual 1
89 | db.run(countSnapshots(id)).futureValue shouldEqual 0
90 | }
91 |
92 | test("check snapshot is stored") { db =>
93 | db.run(countEvents(id)).futureValue shouldEqual 0
94 | db.run(countSnapshots(id)).futureValue shouldEqual 0
95 |
96 | val actor = system.actorOf(Props(new ExamplePA(id)))
97 |
98 | //send single event
99 | testProbe.send(actor, Command("foo"))
100 | testProbe.expectMsg[String]("oof")
101 |
102 | testProbe.send(actor, TakeSnapshot)
103 | testProbe.expectNoMessage()
104 |
105 | eventually {
106 | db.run(countSnapshots(id)).futureValue shouldEqual 1
107 | }
108 | db.run(countEvents(id)).futureValue shouldEqual 1
109 |
110 | //stop the actor
111 | system.stop(actor)
112 | testProbe watch actor
113 | testProbe.expectTerminated(actor)
114 |
115 | val recovered = system.actorOf(Props(new ExamplePA(id)))
116 | testProbe.send(recovered, GetMessage)
117 | testProbe.expectMsg[String]("foo")
118 | }
119 |
120 | }
121 |
122 | object ExamplePersistentActorTest {
123 |
124 | case class Command(message: String)
125 | case class Event(message: String)
126 | case object GetMessage
127 | case object TakeSnapshot
128 |
129 | class ExamplePA(override val persistenceId: String) extends PersistentActor {
130 |
131 | var currentMessage: Option[String] = None
132 |
133 | override def receiveRecover: Receive = {
134 | case Event(message) => currentMessage = Option(message)
135 | case SnapshotOffer(metadata, snapshot) => currentMessage = snapshot.asInstanceOf[Option[String]]
136 | }
137 |
138 | override def receiveCommand: Receive = {
139 | case Command(message) =>
140 | persist(Event(message)) { e =>
141 | currentMessage = Some(message)
142 | sender() ! message.reverse
143 | }
144 | case GetMessage => sender() ! currentMessage.getOrElse(sys.error("message is not yet set"))
145 | case TakeSnapshot => saveSnapshot(currentMessage)
146 | }
147 |
148 | }
149 |
150 | }
151 |
--------------------------------------------------------------------------------
/modules/akka-persistence-pg/src/test/scala/akka/persistence/pg/ExtraDBIOSupportTest.scala:
--------------------------------------------------------------------------------
1 | package akka.persistence.pg
2 |
3 | import java.util.concurrent.atomic.AtomicInteger
4 | import java.util.concurrent.{ConcurrentHashMap, TimeUnit}
5 |
6 | import akka.actor._
7 | import akka.pattern.ask
8 | import akka.persistence.pg.perf.Messages.Alter
9 | import akka.persistence.pg.perf.ReadModelUpdateActor
10 | import akka.persistence.pg.util.{CreateTables, RecreateSchema}
11 | import akka.util.Timeout
12 | import com.typesafe.config.ConfigFactory
13 | import org.scalatest.{BeforeAndAfterAll, BeforeAndAfterEach}
14 | import org.scalatest.concurrent.ScalaFutures
15 | import org.scalatest.funsuite.AnyFunSuite
16 | import org.scalatest.matchers.should.Matchers
17 | import org.scalatest.time.{Milliseconds, Seconds, Span}
18 |
19 | import scala.collection.JavaConverters._
20 | import scala.util.Random
21 |
22 | class ExtraDBIOSupportTest
23 | extends AnyFunSuite
24 | with BeforeAndAfterEach
25 | with Matchers
26 | with BeforeAndAfterAll
27 | with CreateTables
28 | with RecreateSchema
29 | with PgConfig
30 | with WaitForEvents
31 | with ScalaFutures {
32 |
33 | override implicit val patienceConfig = PatienceConfig(timeout = Span(3, Seconds), interval = Span(100, Milliseconds))
34 |
35 | val config = ConfigFactory.load("pg-readmodelupdate.conf")
36 | val system = ActorSystem("TestCluster", config)
37 | override lazy val pluginConfig = PgExtension(system).pluginConfig
38 |
39 | import driver.api._
40 |
41 | import scala.concurrent.ExecutionContext.Implicits.global
42 |
43 | implicit val timeOut = Timeout(1, TimeUnit.MINUTES)
44 | val numActors = 20
45 | var actors: Seq[ActorRef] = _
46 | val expected = 500
47 | val readModelTable = pluginConfig.getFullName("READMODEL")
48 |
49 | test("writing events should update readmodel and not block") {
50 | val map = writeEvents()
51 | database.run(countEvents).futureValue shouldBe expected
52 | database
53 | .run(sql"""select count(*) from #$readModelTable where txt is not NULL""".as[Long])
54 | .futureValue
55 | .head shouldBe actors.size
56 | map.asScala.foreach {
57 | case (i, s) =>
58 | database.run(sql"""select txt from #$readModelTable where id = $i""".as[String]).futureValue.head shouldEqual s
59 | }
60 | }
61 |
62 | def writeEvents() = {
63 | val received: AtomicInteger = new AtomicInteger(0)
64 | val map: ConcurrentHashMap[Int, String] = new ConcurrentHashMap()
65 |
66 | def sendMessage(i: Int) =
67 | actors(i) ? Alter(Random.alphanumeric.take(16).mkString) map {
68 | case s: String =>
69 | map.put(i + 1, s)
70 | received.incrementAndGet()
71 | }
72 |
73 | 1 to expected foreach { i =>
74 | sendMessage(Random.nextInt(actors.size))
75 | }
76 |
77 | waitUntilEventsWritten(expected, received)
78 |
79 | map
80 |
81 | }
82 |
83 | override def beforeAll(): Unit = {
84 | ReadModelUpdateActor.reset()
85 | database
86 | .run(
87 | recreateSchema.andThen(createTables).andThen(sqlu"""create table #$readModelTable (
88 | "id" BIGSERIAL NOT NULL PRIMARY KEY,
89 | "cnt" INTEGER,
90 | "txt" VARCHAR(255) DEFAULT NULL)""")
91 | )
92 | .futureValue
93 | actors = 1 to numActors map { i =>
94 | database.run(sqlu""" insert into #$readModelTable values ($i, 0, null)""").futureValue
95 | system.actorOf(ReadModelUpdateActor.props(driver, pluginConfig.getFullName("READMODEL")))
96 | }
97 |
98 | }
99 |
100 | override protected def afterAll(): Unit = {
101 | system.terminate()
102 | system.whenTerminated.futureValue
103 | ()
104 | }
105 |
106 | }
107 |
--------------------------------------------------------------------------------
/modules/akka-persistence-pg/src/test/scala/akka/persistence/pg/PersistAllTest.scala:
--------------------------------------------------------------------------------
1 | package akka.persistence.pg
2 |
3 | import java.util.concurrent.TimeUnit
4 | import java.util.concurrent.atomic.AtomicInteger
5 |
6 | import akka.actor._
7 | import akka.pattern.ask
8 | import akka.persistence.pg.perf.Messages.Alter
9 | import akka.persistence.pg.perf.PersistAllActor
10 | import akka.persistence.pg.util.{CreateTables, RecreateSchema}
11 | import akka.util.Timeout
12 | import com.typesafe.config.ConfigFactory
13 | import org.scalatest.{BeforeAndAfterAll, BeforeAndAfterEach}
14 | import org.scalatest.concurrent.ScalaFutures
15 | import org.scalatest.funsuite.AnyFunSuite
16 | import org.scalatest.matchers.should.Matchers
17 | import org.scalatest.time.{Milliseconds, Seconds, Span}
18 |
19 | import scala.util.Random
20 |
21 | class PersistAllTest
22 | extends AnyFunSuite
23 | with BeforeAndAfterEach
24 | with Matchers
25 | with BeforeAndAfterAll
26 | with CreateTables
27 | with RecreateSchema
28 | with PgConfig
29 | with WaitForEvents
30 | with ScalaFutures {
31 |
32 | override implicit val patienceConfig = PatienceConfig(timeout = Span(3, Seconds), interval = Span(100, Milliseconds))
33 |
34 | val config = ConfigFactory.load("pg-persistall.conf")
35 | val system = ActorSystem("TestCluster", config)
36 | override lazy val pluginConfig = PgExtension(system).pluginConfig
37 |
38 | import driver.api._
39 |
40 | import scala.concurrent.ExecutionContext.Implicits.global
41 |
42 | implicit val timeOut = Timeout(1, TimeUnit.MINUTES)
43 | val numActors = 2
44 | var actors: Seq[ActorRef] = _
45 | val expected = 10
46 |
47 | test("writing events should respect order") {
48 | writeEvents()
49 | database.run(countEvents).futureValue shouldBe expected * 10
50 |
51 | actors.zipWithIndex.foreach {
52 | case (actor, i) =>
53 | val persistenceId = s"PersistAllActor_${i + 1}"
54 | val r: Vector[(Long, Long)] = database
55 | .run(
56 | sql"""select id, rowid from #${pluginConfig.fullJournalTableName}
57 | where persistenceid = $persistenceId order by id asc""".as[(Long, Long)]
58 | )
59 | .futureValue
60 |
61 | //check if ids are sorted => of course they are
62 | val ids = r map { case (id, rowid) => id }
63 | ids shouldEqual ids.sorted
64 |
65 | //check if rowids are sorted
66 | val rowIds = r map { case (id, rowid) => rowid }
67 | rowIds shouldEqual rowIds.sorted
68 | }
69 |
70 | }
71 |
72 | def writeEvents() = {
73 | val received: AtomicInteger = new AtomicInteger(0)
74 |
75 | def sendMessage(i: Int) =
76 | actors(i) ? Alter(Random.alphanumeric.take(16).mkString) map {
77 | case s: String =>
78 | received.incrementAndGet()
79 | }
80 |
81 | 1 to expected foreach { i =>
82 | sendMessage(Random.nextInt(actors.size))
83 | }
84 |
85 | waitUntilEventsWritten(expected, received)
86 | }
87 |
88 | override def beforeAll(): Unit = {
89 | PersistAllActor.reset()
90 | database.run(recreateSchema.andThen(createTables)).futureValue
91 | actors = 1 to numActors map { i =>
92 | system.actorOf(PersistAllActor.props)
93 | }
94 |
95 | }
96 |
97 | override protected def afterAll(): Unit = {
98 | system.terminate()
99 | system.whenTerminated.futureValue
100 | ()
101 | }
102 |
103 | }
104 |
--------------------------------------------------------------------------------
/modules/akka-persistence-pg/src/test/scala/akka/persistence/pg/PersistUsingJndiTest.scala:
--------------------------------------------------------------------------------
1 | package akka.persistence.pg
2 |
3 | import java.util.concurrent.TimeUnit
4 |
5 | import javax.naming.{Context, InitialContext}
6 | import akka.actor._
7 | import akka.persistence.pg.TestActor.{Alter, GetState, TheState}
8 | import akka.persistence.pg.journal.JournalTable
9 | import akka.persistence.pg.util.{CreateTables, RecreateSchema}
10 | import akka.testkit.TestProbe
11 | import akka.util.Timeout
12 | import com.typesafe.config.{Config, ConfigFactory}
13 | import org.postgresql.ds.PGSimpleDataSource
14 | import org.scalatest.BeforeAndAfterAll
15 | import org.scalatest.concurrent.ScalaFutures
16 | import org.scalatest.funsuite.AnyFunSuite
17 | import org.scalatest.matchers.should.Matchers
18 | import org.scalatest.time.{Milliseconds, Seconds, Span}
19 |
20 | class PersistUsingJndiTest
21 | extends AnyFunSuite
22 | with Matchers
23 | with BeforeAndAfterAll
24 | with JournalTable
25 | with CreateTables
26 | with RecreateSchema
27 | with PgConfig
28 | with WaitForEvents
29 | with ScalaFutures {
30 |
31 | override implicit val patienceConfig = PatienceConfig(timeout = Span(3, Seconds), interval = Span(100, Milliseconds))
32 |
33 | val config: Config = ConfigFactory.load("pg-persist-jndi.conf")
34 | implicit val system = ActorSystem("TestCluster", config)
35 | override lazy val pluginConfig: PluginConfig = PgExtension(system).pluginConfig
36 |
37 | import driver.api._
38 |
39 | val testProbe = TestProbe()
40 | implicit val timeOut = Timeout(1, TimeUnit.MINUTES)
41 |
42 | test("generate events") {
43 | val test = system.actorOf(Props(new TestActor(testProbe.ref)))
44 |
45 | testProbe.send(test, Alter("foo"))
46 | testProbe.expectMsg("j")
47 | testProbe.send(test, GetState)
48 | testProbe.expectMsg(TheState(id = "foo"))
49 |
50 | testProbe.send(test, Alter("bar"))
51 | testProbe.expectMsg("j")
52 | testProbe.send(test, GetState)
53 | testProbe.expectMsg(TheState(id = "bar"))
54 |
55 | database.run(journals.size.result).futureValue shouldBe 2
56 | }
57 |
58 | override def beforeAll(): Unit = {
59 | System.setProperty(Context.INITIAL_CONTEXT_FACTORY, "tyrex.naming.MemoryContextFactory")
60 | System.setProperty(Context.PROVIDER_URL, "/")
61 |
62 | val simpleDataSource = new PGSimpleDataSource()
63 | simpleDataSource.setUrl(pluginConfig.dbConfig.getString("url"))
64 | simpleDataSource.setUser(pluginConfig.dbConfig.getString("user"))
65 | simpleDataSource.setPassword(pluginConfig.dbConfig.getString("password"))
66 | simpleDataSource.setPrepareThreshold(1)
67 |
68 | new InitialContext().rebind("MyDS", simpleDataSource)
69 |
70 | database.run(recreateSchema.andThen(createTables)).futureValue
71 | ()
72 | }
73 |
74 | override protected def afterAll(): Unit = {
75 | system.terminate()
76 | system.whenTerminated.futureValue
77 | ()
78 | }
79 |
80 | }
81 |
--------------------------------------------------------------------------------
/modules/akka-persistence-pg/src/test/scala/akka/persistence/pg/ReadModelUpdateActorTest.scala:
--------------------------------------------------------------------------------
1 | package akka.persistence.pg
2 |
3 | import akka.persistence.pg.perf.Messages.Alter
4 | import akka.persistence.pg.perf.ReadModelUpdateActor
5 | import akka.persistence.pg.perf.ReadModelUpdateActor.TextNotUnique
6 | import akka.persistence.pg.util.{CreateTables, PersistentActorTest, RecreateSchema}
7 | import com.typesafe.config.{Config, ConfigFactory}
8 | import org.scalatest.concurrent.{Eventually, ScalaFutures}
9 | import org.scalatest.time.{Seconds, Span}
10 | import org.scalatest.BeforeAndAfterAll
11 | import org.scalatest.matchers.should.Matchers
12 |
13 | class ReadModelUpdateActorTest
14 | extends PersistentActorTest
15 | with ScalaFutures
16 | with Eventually
17 | with RecreateSchema
18 | with Matchers
19 | with BeforeAndAfterAll
20 | with CreateTables
21 | with PgConfig {
22 |
23 | override val config: Config = ConfigFactory.load("example-actor-test.conf")
24 | override val pluginConfig = PluginConfig(config)
25 |
26 | override implicit val patienceConfig = PatienceConfig(timeout = scaled(Span(2, Seconds)))
27 |
28 | import driver.api._
29 |
30 | /**
31 | * recreate schema and tables before running the tests
32 | */
33 | override def beforeAll(): Unit = {
34 | database
35 | .run(
36 | recreateSchema
37 | .andThen(createTables)
38 | .andThen(sqlu"""create table #$readModelTable (
39 | "id" BIGSERIAL NOT NULL PRIMARY KEY,
40 | "cnt" INTEGER,
41 | "txt" VARCHAR(255) DEFAULT NULL)""")
42 | .andThen(sqlu"""CREATE unique INDEX readmodel_txt_idx ON #$readModelTable (txt)""")
43 | )
44 | .futureValue
45 | 1 to 2 map { i =>
46 | database.run(sqlu"""insert into #$readModelTable values ($i, 0, null)""").futureValue
47 | }
48 | ()
49 | }
50 |
51 | override protected def afterAll(): Unit = database.close()
52 |
53 | val readModelTable = pluginConfig.getFullName("READMODEL")
54 | val countReadModelEntries = sql"""select count(*) from #$readModelTable where txt is not NULL""".as[Long]
55 |
56 | test("check sending unique text messages should work") { db =>
57 | ReadModelUpdateActor.reset()
58 | db.run(countEvents).futureValue shouldEqual 0
59 | db.run(countSnapshots).futureValue shouldEqual 0
60 | db.run(countReadModelEntries).futureValue.head shouldBe 0
61 |
62 | val actor1 = system.actorOf(ReadModelUpdateActor.props(driver, readModelTable))
63 | val actor2 = system.actorOf(ReadModelUpdateActor.props(driver, readModelTable))
64 |
65 | testProbe.send(actor1, Alter("foo"))
66 | testProbe.expectMsg[String]("foo")
67 |
68 | testProbe.send(actor2, Alter("bar"))
69 | testProbe.expectMsg[String]("bar")
70 |
71 | db.run(countEvents).futureValue shouldEqual 2
72 | db.run(countSnapshots).futureValue shouldEqual 0
73 | db.run(countReadModelEntries).futureValue.head shouldBe 2
74 | }
75 |
76 | test("check sending non-unique text messages should not be allowed") { implicit db =>
77 | ReadModelUpdateActor.reset()
78 | db.run(countEvents).futureValue shouldEqual 0
79 | db.run(countSnapshots).futureValue shouldEqual 0
80 | db.run(countReadModelEntries).futureValue.head shouldBe 0
81 |
82 | val actor1 = system.actorOf(ReadModelUpdateActor.props(driver, readModelTable))
83 | val actor2 = system.actorOf(ReadModelUpdateActor.props(driver, readModelTable))
84 |
85 | testProbe.send(actor1, Alter("foo"))
86 | testProbe.expectMsg[String]("foo")
87 |
88 | val checkPoint = savepoint()
89 |
90 | testProbe.send(actor2, Alter("foo"))
91 | testProbe.expectMsg(TextNotUnique)
92 |
93 | rollback(checkPoint)
94 | db.run(countEvents).futureValue shouldEqual 1
95 | db.run(countSnapshots).futureValue shouldEqual 0
96 | db.run(countReadModelEntries).futureValue.head shouldBe 1
97 |
98 | }
99 |
100 | }
101 |
--------------------------------------------------------------------------------
/modules/akka-persistence-pg/src/test/scala/akka/persistence/pg/TestActor.scala:
--------------------------------------------------------------------------------
1 | package akka.persistence.pg
2 |
3 | import java.time.OffsetDateTime
4 |
5 | import akka.actor.{ActorLogging, ActorRef, Props}
6 | import akka.persistence.pg.event.Created
7 | import akka.persistence.{PersistentActor, SaveSnapshotFailure, SaveSnapshotSuccess, SnapshotOffer}
8 |
9 | class TestActor(testProbe: ActorRef, id: Option[String] = None) extends PersistentActor with ActorLogging {
10 |
11 | import akka.persistence.pg.TestActor._
12 |
13 | override def persistenceId: String = id.getOrElse(s"TestActor")
14 |
15 | var state = TheState()
16 |
17 | override def receiveRecover: Receive = {
18 | case SnapshotOffer(_, snap: TheState) =>
19 | log.info("Recovering snapshot: {}", snap)
20 | state = snap
21 | case m: Altered =>
22 | log.info("Recovering journal: {}", m)
23 | state = state.copy(id = m.id)
24 | case i: Incremented =>
25 | log.info("Recovering journal: {}", i)
26 | state = state.copy(count = state.count + i.count)
27 | }
28 |
29 | override def receiveCommand: Receive = {
30 | case a: Alter =>
31 | persist(Altered(a.id, OffsetDateTime.now())) {
32 | case Altered(m, _) =>
33 | state = state.copy(id = m)
34 | testProbe ! "j"
35 | }
36 | case i: Increment =>
37 | persist(Incremented(i.count, OffsetDateTime.now())) {
38 | case Incremented(c, _) =>
39 | state = state.copy(count = state.count + c)
40 | testProbe ! "j"
41 | }
42 | case Snap => saveSnapshot(state)
43 | case msg: SaveSnapshotFailure => testProbe ! "f"
44 | case msg: SaveSnapshotSuccess => testProbe ! "s"
45 | case GetState => sender() ! state
46 | }
47 | }
48 |
49 | object TestActor {
50 |
51 | def props(testProbe: ActorRef, persistenceId: Option[String] = None) = Props(new TestActor(testProbe, persistenceId))
52 |
53 | case object Snap
54 |
55 | case object GetState
56 |
57 | case class Alter(id: String)
58 |
59 | case class Increment(count: Int)
60 |
61 | sealed trait Event
62 | case class Altered(id: String, created: OffsetDateTime) extends Created with Event
63 |
64 | case class Incremented(count: Int, created: OffsetDateTime) extends Event
65 |
66 | case class TheState(id: String = "", count: Int = 0)
67 |
68 | }
69 |
--------------------------------------------------------------------------------
/modules/akka-persistence-pg/src/test/scala/akka/persistence/pg/TestEventEncoder.scala:
--------------------------------------------------------------------------------
1 | package akka.persistence.pg
2 |
3 | import java.time.OffsetDateTime
4 | import java.time.format.DateTimeFormatter
5 |
6 | import akka.persistence.pg.TestActor.{Altered, Incremented}
7 | import akka.persistence.pg.event.JsonEncoder
8 | import scala.util.parsing.json._
9 |
10 | class TestEventEncoder extends JsonEncoder {
11 |
12 | override def toJson = {
13 | case a: Altered => JsonString(s"""{
14 | | "type": "altered",
15 | | "id": "${a.id}",
16 | | "created": "${DateTimeFormatter.ISO_DATE_TIME.format(a.created)}"
17 | |}""".stripMargin)
18 |
19 | case i: Incremented => JsonString(s"""{
20 | | "count": ${i.count},
21 | | "created": "${DateTimeFormatter.ISO_DATE_TIME.format(i.created)}"
22 | |}""".stripMargin)
23 | }
24 |
25 | private def parseDateTime(jsonMap: Map[String, Any]): OffsetDateTime =
26 | OffsetDateTime.from(DateTimeFormatter.ISO_DATE_TIME.parse(jsonMap("created").asInstanceOf[String]))
27 |
28 | private def altered(jsValue: Map[String, Any]): Altered =
29 | Altered(jsValue("id").asInstanceOf[String], parseDateTime(jsValue))
30 |
31 | private def incremented(jsValue: Map[String, Any]): Incremented =
32 | Incremented(jsValue("count").asInstanceOf[Double].toInt, parseDateTime(jsValue))
33 |
34 | private def parseJsonString(jsonString: JsonString) =
35 | JSON.parseFull(jsonString.value).get.asInstanceOf[Map[String, Any]]
36 |
37 | override def fromJson = {
38 | case (json, c) if c == classOf[Altered] => altered(parseJsonString(json))
39 | case (json, c) if c == classOf[Incremented] => incremented(parseJsonString(json))
40 | }
41 |
42 | }
43 |
--------------------------------------------------------------------------------
/modules/akka-persistence-pg/src/test/scala/akka/persistence/pg/TestEventStore.scala:
--------------------------------------------------------------------------------
1 | package akka.persistence.pg
2 |
3 | import akka.persistence.pg.event.EventStore
4 |
5 | class TestEventStore(override val pluginConfig: PluginConfig) extends EventStore with PgConfig
6 |
--------------------------------------------------------------------------------
/modules/akka-persistence-pg/src/test/scala/akka/persistence/pg/TestEventTagger.scala:
--------------------------------------------------------------------------------
1 | package akka.persistence.pg
2 |
3 | import akka.persistence.pg.event.EventTagger
4 | import TestActor._
5 |
6 | class TestEventTagger extends EventTagger {
7 |
8 | def tags(event: Any): Map[String, String] =
9 | event match {
10 | case _: Altered => Map(TestTags.alteredTag)
11 | case _: Incremented => Map(TestTags.incrementedTag)
12 | case _ => Map.empty
13 | }
14 | }
15 |
16 | object TestTags {
17 | val alteredTag = "_type" -> "Altered"
18 | val incrementedTag = "_type" -> "Incremented"
19 | }
20 |
--------------------------------------------------------------------------------
/modules/akka-persistence-pg/src/test/scala/akka/persistence/pg/TestSnapshotEncoder.scala:
--------------------------------------------------------------------------------
1 | package akka.persistence.pg
2 |
3 | import akka.persistence.pg.TestActor.TheState
4 | import akka.persistence.pg.event.JsonEncoder
5 |
6 | import scala.util.parsing.json._
7 |
8 | class TestSnapshotEncoder extends JsonEncoder {
9 |
10 | override def toJson = {
11 | case a: TheState => JsonString(s"""{
12 | | "id": "${a.id}",
13 | | "count": ${a.count}
14 | |}""".stripMargin)
15 | }
16 |
17 | private def parseJsonString(jsonString: JsonString) =
18 | JSON.parseFull(jsonString.value).get.asInstanceOf[Map[String, Any]]
19 |
20 | override def fromJson = {
21 | case (json, c) if c == classOf[TheState] =>
22 | val jsValue = parseJsonString(json)
23 | TheState(jsValue("id").asInstanceOf[String], jsValue("count").asInstanceOf[Double].toInt)
24 | }
25 |
26 | }
27 |
--------------------------------------------------------------------------------
/modules/akka-persistence-pg/src/test/scala/akka/persistence/pg/WaitForEvents.scala:
--------------------------------------------------------------------------------
1 | package akka.persistence.pg
2 |
3 | import java.util.concurrent.atomic.AtomicInteger
4 |
5 | trait WaitForEvents {
6 |
7 | def waitUntilEventsWritten(expected: Int, written: AtomicInteger) = {
8 | var noProgressCount = 0
9 | var numEvents = written.get()
10 | while (numEvents != expected && noProgressCount < 50) {
11 | Thread.sleep(100L)
12 | val numExtra = written.get() - numEvents
13 | if (numExtra == 0) noProgressCount += 1
14 | else numEvents += numExtra
15 | }
16 | }
17 |
18 | }
19 |
--------------------------------------------------------------------------------
/modules/akka-persistence-pg/src/test/scala/akka/persistence/pg/perf/Messages.scala:
--------------------------------------------------------------------------------
1 | package akka.persistence.pg.perf
2 |
3 | object Messages {
4 | sealed trait Event
5 | case class Alter(text: String)
6 | case class Altered(text: String, created: Long) extends Event
7 | }
8 |
--------------------------------------------------------------------------------
/modules/akka-persistence-pg/src/test/scala/akka/persistence/pg/perf/PerfActor.scala:
--------------------------------------------------------------------------------
1 | package akka.persistence.pg.perf
2 |
3 | import java.util.UUID
4 |
5 | import akka.actor.{ActorLogging, Props}
6 | import akka.persistence.PersistentActor
7 | import akka.persistence.pg.perf.Messages.{Alter, Altered}
8 |
9 | class PerfActor extends PersistentActor with ActorLogging {
10 |
11 | override val persistenceId: String = "TestActor_" + UUID.randomUUID().toString
12 |
13 | override def receiveRecover: Receive = { case _ => }
14 |
15 | override def receiveCommand: Receive = {
16 | case Alter(txt) =>
17 | persist(Altered(txt, System.currentTimeMillis())) { _ =>
18 | sender() ! txt
19 | }
20 | }
21 |
22 | }
23 |
24 | object PerfActor {
25 | def props = Props(new PerfActor)
26 | }
27 |
--------------------------------------------------------------------------------
/modules/akka-persistence-pg/src/test/scala/akka/persistence/pg/perf/PerfEventEncoder.scala:
--------------------------------------------------------------------------------
1 | package akka.persistence.pg.perf
2 |
3 | import akka.persistence.pg.JsonString
4 | import akka.persistence.pg.event.JsonEncoder
5 | import akka.persistence.pg.perf.Messages.Altered
6 |
7 | import scala.util.parsing.json.JSON
8 |
9 | class PerfEventEncoder extends JsonEncoder {
10 |
11 | override def toJson = {
12 | case Altered(text, created) => JsonString(s"""{
13 | | "type": "altered",
14 | | "txt": "$text",
15 | | "created": "$created"
16 | |}""".stripMargin)
17 | }
18 |
19 | private def parseJsonString(jsonString: JsonString) =
20 | JSON.parseFull(jsonString.value).get.asInstanceOf[Map[String, Any]]
21 |
22 | private def altered(jsValue: Map[String, Any]): Altered =
23 | Altered(jsValue("txt").asInstanceOf[String], jsValue("created").asInstanceOf[Long])
24 |
25 | override def fromJson = {
26 | case (json, _) => altered(parseJsonString(json))
27 | }
28 |
29 | }
30 |
--------------------------------------------------------------------------------
/modules/akka-persistence-pg/src/test/scala/akka/persistence/pg/perf/PersistAllActor.scala:
--------------------------------------------------------------------------------
1 | package akka.persistence.pg.perf
2 |
3 | import java.util.concurrent.atomic.AtomicInteger
4 |
5 | import akka.actor.{ActorLogging, Props}
6 | import akka.persistence.PersistentActor
7 | import akka.persistence.pg.perf.Messages.{Alter, Altered}
8 |
9 | class PersistAllActor(id: Int) extends PersistentActor with ActorLogging {
10 |
11 | override val persistenceId: String = s"PersistAllActor_$id"
12 |
13 | override def receiveRecover: Receive = { case _ => }
14 |
15 | override def receiveCommand: Receive = {
16 | case Alter(txt) =>
17 | val created = System.currentTimeMillis()
18 | val events = 1 to 10 map { i =>
19 | Altered(s"${txt}_$i", created)
20 | }
21 | persistAll(events) { _ =>
22 | sender() ! txt
23 | }
24 | }
25 |
26 | }
27 |
28 | object PersistAllActor {
29 | private val id = new AtomicInteger(0)
30 | def reset() = id.set(0)
31 |
32 | def props = Props(new PersistAllActor(id.incrementAndGet()))
33 | }
34 |
--------------------------------------------------------------------------------
/modules/akka-persistence-pg/src/test/scala/akka/persistence/pg/perf/PersistAsyncActor.scala:
--------------------------------------------------------------------------------
1 | package akka.persistence.pg.perf
2 |
3 | import akka.actor.{ActorLogging, Props}
4 | import akka.persistence.PersistentActor
5 | import akka.persistence.pg.perf.Messages.{Alter, Altered}
6 |
7 | class PersistAsyncActor(override val persistenceId: String) extends PersistentActor with ActorLogging {
8 |
9 | override def receiveRecover: Receive = { case _ => }
10 |
11 | override def receiveCommand: Receive = {
12 | case Alter(txt) =>
13 | val created = System.currentTimeMillis()
14 | val events = 1 to 10 map { i =>
15 | Altered(s"${txt}_$i", created)
16 | }
17 | persistAllAsync(events) { _ =>
18 | sender() ! txt
19 | }
20 | }
21 |
22 | }
23 |
24 | object PersistAsyncActor {
25 | def props(persistenceId: String) = Props(new PersistAsyncActor(persistenceId))
26 | }
27 |
--------------------------------------------------------------------------------
/modules/akka-persistence-pg/src/test/scala/akka/persistence/pg/perf/RandomDelayPerfActor.scala:
--------------------------------------------------------------------------------
1 | package akka.persistence.pg.perf
2 |
3 | import java.util.UUID
4 |
5 | import akka.actor.{ActorLogging, Props}
6 | import akka.persistence.PersistentActor
7 | import akka.persistence.pg.PgPostgresProfile
8 | import akka.persistence.pg.event.{EventWrapper, ExtraDBIOSupport}
9 | import akka.persistence.pg.perf.Messages.{Alter, Altered}
10 | import slick.jdbc.{GetResult, PositionedResult}
11 |
12 | import scala.util.Random
13 |
14 | object RandomDelayPerfActor {
15 | def props(driver: PgPostgresProfile, persistenceId: Option[String] = None) =
16 | Props(new RandomDelayPerfActor(driver, persistenceId))
17 | }
18 |
19 | class RandomDelayPerfActor(driver: PgPostgresProfile, pid: Option[String]) extends PersistentActor with ActorLogging {
20 |
21 | override val persistenceId: String = pid.getOrElse("TestActor_" + UUID.randomUUID().toString)
22 |
23 | override def receiveRecover: Receive = { case _ => }
24 |
25 | override def receiveCommand: Receive = {
26 | case Alter(txt) =>
27 | persist(new ExtraDBIOSupport with EventWrapper[Altered] {
28 | import driver.api._
29 | implicit object GetUnit extends GetResult[Unit] { def apply(rs: PositionedResult) = { rs.nextObject(); () } }
30 |
31 | override def extraDBIO: DBIO[_] = sql"""select pg_sleep(${Random.nextInt(150) / 1000})""".as[Unit]
32 |
33 | override def failureHandler = PartialFunction.empty
34 |
35 | override def event: Altered = Altered(txt, System.currentTimeMillis())
36 | }) { _ =>
37 | sender() ! txt
38 | }
39 | }
40 |
41 | }
42 |
--------------------------------------------------------------------------------
/modules/akka-persistence-pg/src/test/scala/akka/persistence/pg/perf/ReadModelUpdateActor.scala:
--------------------------------------------------------------------------------
1 | package akka.persistence.pg.perf
2 |
3 | import java.util.concurrent.atomic.AtomicInteger
4 |
5 | import akka.actor.Props
6 | import akka.persistence.PersistentActor
7 | import akka.persistence.pg.event.{EventWrapper, ExtraDBIOSupport}
8 | import akka.persistence.pg.PgPostgresProfile
9 | import akka.persistence.pg.perf.Messages.{Alter, Altered}
10 | import akka.persistence.pg.perf.ReadModelUpdateActor.TextNotUnique
11 | import org.postgresql.util.PSQLException
12 | import slick.jdbc.{GetResult, PositionedResult}
13 |
14 | object ReadModelUpdateActor {
15 | case object TextNotUnique
16 |
17 | private val id = new AtomicInteger(0)
18 | def reset() = id.set(0)
19 |
20 | def props(driver: PgPostgresProfile, fullTableName: String) =
21 | Props(new ReadModelUpdateActor(driver, fullTableName, id.incrementAndGet()))
22 | }
23 |
24 | class ReadModelUpdateActor(driver: PgPostgresProfile, fullTableName: String, id: Int) extends PersistentActor {
25 |
26 | override val persistenceId: String = s"TestActor_$id"
27 |
28 | override def receiveRecover: Receive = { case _ => }
29 |
30 | override def receiveCommand: Receive = {
31 | case Alter(txt) =>
32 | persist(new ExtraDBIOSupport with EventWrapper[Altered] {
33 |
34 | import driver.api._
35 | import context.dispatcher
36 | implicit object GetUnit extends GetResult[Unit] { def apply(rs: PositionedResult) = { rs.nextObject(); () } }
37 |
38 | override def extraDBIO: DBIO[_] =
39 | sql"""select cnt from #$fullTableName where id = $id"""
40 | .as[Long]
41 | .flatMap { c =>
42 | val i = c(0).toInt + 1
43 | sqlu"""update #$fullTableName set txt = $txt, cnt=$i where id = $id"""
44 | }
45 |
46 | override def failureHandler = { case t: PSQLException if t.getSQLState == "23505" => sender() ! TextNotUnique }
47 |
48 | override def event: Altered = Altered(txt, System.currentTimeMillis())
49 | }) { _ =>
50 | sender() ! txt
51 | }
52 | }
53 |
54 | override protected def onPersistRejected(cause: Throwable, event: Any, seqNr: Long): Unit =
55 | event match {
56 | case readModelUpdate: ExtraDBIOSupport =>
57 | if (readModelUpdate.failureHandler.isDefinedAt(cause)) {
58 | readModelUpdate.failureHandler(cause)
59 | }
60 | case _ =>
61 | }
62 |
63 | }
64 |
--------------------------------------------------------------------------------
/modules/akka-persistence-pg/src/test/scala/akka/persistence/pg/query/EventStoreQueryNotificationTest.scala:
--------------------------------------------------------------------------------
1 | package akka.persistence.pg.query
2 |
3 | import java.util.concurrent.TimeUnit
4 |
5 | import akka.NotUsed
6 | import akka.actor.ActorRef
7 | import akka.persistence.pg._
8 | import akka.stream.scaladsl.{RunnableGraph, Sink}
9 | import akka.util.Timeout
10 | import com.typesafe.config.{Config, ConfigFactory}
11 | import org.scalatest.concurrent.{Eventually, ScalaFutures}
12 | import org.scalatest.time.{Milliseconds, Seconds, Span}
13 |
14 | import scala.util.Random
15 |
16 | /**
17 | * uses the RowIdUpdating write strategy and will use the "rowid" column of the journal
18 | * table for queries
19 | */
20 | class EventStoreQueryNotificationTest extends AbstractEventStoreTest with PgConfig with Eventually with ScalaFutures {
21 |
22 | override lazy val config: Config = ConfigFactory.load("pg-eventstore-rowid.conf")
23 |
24 | override implicit val patienceConfig = PatienceConfig(timeout = Span(20, Seconds), interval = Span(100, Milliseconds))
25 |
26 | implicit val timeOut = Timeout(1, TimeUnit.MINUTES)
27 |
28 | val expected = 2000
29 | val numActors = 100
30 | var actors: Map[String, ActorRef] = Map.empty
31 |
32 | test("query tagged events tagged with 'Altered'") {
33 | var events = List[E]()
34 | val sink = Sink.foreach[E] { e =>
35 | events = events :+ e
36 | }
37 |
38 | val graph: RunnableGraph[NotUsed] = startSource[E](Set(TestTags.alteredTag), 0).to(sink)
39 |
40 | 1 to expected foreach { i =>
41 | actors.values.toSeq(Random.nextInt(actors.size)) ! alterCommand(i)
42 | }
43 |
44 | graph.run()
45 |
46 | println(s"query tagged events, expecting $expected events")
47 | eventually {
48 | // println(events.size)
49 | if (events.size >= expected - 5) checkConsecutive(events)
50 | events.size shouldBe expected
51 | }
52 |
53 | }
54 |
55 | test("query all events") {
56 | var events = List[E]()
57 | val sink = Sink.foreach[E] { e =>
58 | events = events :+ e
59 | }
60 |
61 | val graph: RunnableGraph[NotUsed] = startSource[E](0).to(sink)
62 |
63 | 1 to expected foreach { i =>
64 | actors.values.toSeq(Random.nextInt(actors.size)) ! alterCommand(i)
65 | }
66 |
67 | graph.run()
68 |
69 | println(s"query all events, expecting $expected events")
70 | eventually {
71 | // println(events.size)
72 | if (events.size >= expected - 5) checkConsecutive(events)
73 | events.size shouldBe expected
74 | }
75 |
76 | }
77 |
78 | test("query persistenceId events") {
79 | var events = List[E]()
80 | val sink = Sink.foreach[E] { e =>
81 | events = events :+ e
82 | }
83 |
84 | var expectedForPersistenceId = 0
85 | val index = Random.nextInt(actors.size)
86 | val persistenceId = actors.keys.toSeq(index)
87 | val graph: RunnableGraph[NotUsed] = startSource[E](persistenceId, 0).to(sink)
88 |
89 | 1 to expected foreach { i =>
90 | val chosen = Random.nextInt(actors.size)
91 | if (chosen == index) expectedForPersistenceId += 1
92 | actors.values.toSeq(chosen) ! alterCommand(i)
93 | }
94 |
95 | graph.run()
96 |
97 | println(s"query persistenceId events, expecting $expectedForPersistenceId events")
98 | eventually {
99 | // println(events.size)
100 | events should have size expectedForPersistenceId
101 | }
102 | database.run(countEvents(persistenceId)).futureValue shouldEqual expectedForPersistenceId
103 |
104 | }
105 |
106 | override def beforeAll(): Unit = {
107 | super.beforeAll()
108 | actors = (1 to numActors map { i: Int =>
109 | val pid = s"TestActor-$i"
110 | pid -> createActor(pid)
111 | }).toMap
112 | }
113 |
114 | type E = TestActor.Event
115 |
116 | def alterCommand(i: Int) = TestActor.Alter(i.toString)
117 |
118 | def createActor(pid: String): ActorRef = system.actorOf(TestActor.props(testProbe.ref, Some(pid)))
119 |
120 | def checkConsecutive(events: List[E]): Unit =
121 | events
122 | .collect { case TestActor.Altered(id, _) => id.toInt }
123 | .sorted
124 | .sliding(2)
125 | .find(l => if (l.size == 1) false else l.head + 1 != l(1))
126 | .foreach(println)
127 |
128 | }
129 |
--------------------------------------------------------------------------------
/modules/akka-persistence-pg/src/test/scala/akka/persistence/pg/query/EventStoreQueryOnIdNotificationTest.scala:
--------------------------------------------------------------------------------
1 | package akka.persistence.pg.query
2 |
3 | import com.typesafe.config.ConfigFactory
4 |
5 | /**
6 | * uses the default TableLocking write strategy and will use the "id" column of the journal
7 | * table for queries
8 | */
9 | class EventStoreQueryOnIdNotificationTest extends EventStoreQueryNotificationTest {
10 |
11 | override lazy val config = ConfigFactory.load("pg-eventstore-locking.conf")
12 |
13 | }
14 |
--------------------------------------------------------------------------------
/modules/akka-persistence-pg/src/test/scala/akka/persistence/pg/query/EventStoreQueryOnIdTest.scala:
--------------------------------------------------------------------------------
1 | package akka.persistence.pg.query
2 |
3 | import com.typesafe.config.ConfigFactory
4 |
5 | /**
6 | * uses the TableLocking write strategy and will use the "id" column of the journal
7 | * table for queries
8 | */
9 | class EventStoreQueryOnIdTest extends EventStoreQueryTest {
10 |
11 | override lazy val config = ConfigFactory.load("pg-eventstore-locking.conf")
12 |
13 | }
14 |
--------------------------------------------------------------------------------
/modules/akka-persistence-pg/src/test/scala/akka/persistence/pg/testkit/PgAsyncJournalSpec.scala:
--------------------------------------------------------------------------------
1 | package akka.persistence.pg.testkit
2 |
3 | import akka.persistence.CapabilityFlag
4 | import akka.persistence.journal.JournalSpec
5 | import akka.persistence.pg.journal.JournalTable
6 | import akka.persistence.pg.util.{CreateTables, RecreateSchema}
7 | import akka.persistence.pg.{PgConfig, PgExtension}
8 | import com.typesafe.config.ConfigFactory
9 | import org.scalatest.concurrent.ScalaFutures
10 | import org.scalatest.time.{Milliseconds, Second, Span}
11 |
12 | class PgAsyncJournalSpec
13 | extends JournalSpec(ConfigFactory.load("pg-application.conf"))
14 | with JournalTable
15 | with RecreateSchema
16 | with ScalaFutures
17 | with CreateTables
18 | with PgConfig {
19 |
20 | override implicit val patienceConfig = PatienceConfig(timeout = Span(1, Second), interval = Span(100, Milliseconds))
21 |
22 | override lazy val pluginConfig = PgExtension(system).pluginConfig
23 |
24 | import driver.api._
25 |
26 | override def beforeAll(): Unit = {
27 | pluginConfig.database
28 | .run(
29 | recreateSchema
30 | .andThen(journals.schema.create)
31 | )
32 | .futureValue
33 | super.beforeAll()
34 | }
35 |
36 | override protected def afterAll(): Unit = {
37 | system.terminate()
38 | system.whenTerminated.futureValue
39 | ()
40 | }
41 |
42 | override protected def supportsRejectingNonSerializableObjects: CapabilityFlag = false
43 |
44 | protected override def supportsSerialization: CapabilityFlag = false
45 | }
46 |
--------------------------------------------------------------------------------
/modules/akka-persistence-pg/src/test/scala/akka/persistence/pg/testkit/PgSnapshotStoreSpec.scala:
--------------------------------------------------------------------------------
1 | package akka.persistence.pg.testkit
2 |
3 | import akka.persistence.pg.snapshot.SnapshotTable
4 | import akka.persistence.pg.{PgConfig, PgExtension, PluginConfig}
5 | import akka.persistence.pg.util.RecreateSchema
6 | import akka.persistence.snapshot.SnapshotStoreSpec
7 | import com.typesafe.config.ConfigFactory
8 | import org.scalatest.concurrent.ScalaFutures
9 | import org.scalatest.time.{Milliseconds, Second, Span}
10 |
11 | class PgSnapshotStoreSpec
12 | extends SnapshotStoreSpec(ConfigFactory.load("pg-application.conf"))
13 | with SnapshotTable
14 | with RecreateSchema
15 | with ScalaFutures
16 | with PgConfig {
17 |
18 | override implicit val patienceConfig = PatienceConfig(timeout = Span(1, Second), interval = Span(100, Milliseconds))
19 |
20 | override lazy val pluginConfig: PluginConfig = PgExtension(system).pluginConfig
21 |
22 | import driver.api._
23 |
24 | override def beforeAll(): Unit = {
25 | database.run(recreateSchema.andThen(snapshots.schema.create)).futureValue
26 | super.beforeAll()
27 | }
28 |
29 | override protected def afterAll(): Unit = {
30 | system.terminate()
31 | system.whenTerminated.futureValue
32 | ()
33 | }
34 |
35 | }
36 |
--------------------------------------------------------------------------------
/modules/akka-persistence-pg/src/test/scala/akka/persistence/pg/util/CreateTables.scala:
--------------------------------------------------------------------------------
1 | package akka.persistence.pg.util
2 |
3 | import akka.persistence.pg.PgConfig
4 |
5 | trait CreateTables {
6 | self: PgConfig =>
7 |
8 | import driver.api._
9 |
10 | lazy val createJournal = sqlu"""CREATE TABLE #${pluginConfig.fullJournalTableName} (
11 | "id" BIGSERIAL NOT NULL PRIMARY KEY,
12 | "persistenceid" VARCHAR(254) NOT NULL,
13 | "sequencenr" INT NOT NULL,
14 | "rowid" BIGINT DEFAULT NULL,
15 | "deleted" BOOLEAN DEFAULT false,
16 | "payload" BYTEA,
17 | "manifest" VARCHAR(512),
18 | "uuid" VARCHAR(36) NOT NULL,
19 | "writeruuid" VARCHAR(36) NOT NULL,
20 | "created" timestamptz NOT NULL,
21 | "tags" HSTORE,
22 | "event" #${pluginConfig.jsonType},
23 | CONSTRAINT "cc_journal_payload_event" check (payload IS NOT NULL OR event IS NOT NULL))"""
24 |
25 | lazy val createSnapshot = sqlu"""CREATE TABLE #${pluginConfig.fullSnapshotTableName} (
26 | "persistenceid" VARCHAR(254) NOT NULL,
27 | "sequencenr" INT NOT NULL,
28 | "timestamp" bigint NOT NULL,
29 | "snapshot" BYTEA,
30 | "manifest" VARCHAR(512),
31 | "json" #${pluginConfig.jsonType},
32 | CONSTRAINT "cc_snapshot_payload_jsoin" check (snapshot IS NOT NULL OR (json IS NOT NULL AND manifest IS NOT NULL)),
33 | PRIMARY KEY (persistenceid, sequencenr))"""
34 |
35 | lazy val createUniqueIndex =
36 | sqlu"""CREATE UNIQUE INDEX journal_pidseq_idx ON #${pluginConfig.fullJournalTableName} (persistenceid, sequencenr)"""
37 | lazy val createEventIndex =
38 | sqlu"""CREATE INDEX journal_event_idx ON #${pluginConfig.fullJournalTableName} USING gin (event)"""
39 | lazy val createRowIdIndex =
40 | sqlu"""CREATE UNIQUE INDEX journal_rowid_idx ON #${pluginConfig.fullJournalTableName} (rowid)"""
41 |
42 | lazy val createTables = createJournal
43 | .andThen(createUniqueIndex)
44 | .andThen(createRowIdIndex)
45 | .andThen(createSnapshot)
46 |
47 | def countEvents = sql"""select count(*) from #${pluginConfig.fullJournalTableName}""".as[Long].head
48 | def countEvents(id: String) =
49 | sql"""select count(*) from #${pluginConfig.fullJournalTableName} where persistenceid = $id""".as[Long].head
50 | def countSnapshots(id: String) =
51 | sql"""select count(*) from #${pluginConfig.fullSnapshotTableName} where persistenceid = $id""".as[Long].head
52 | def countSnapshots = sql"""select count(*) from #${pluginConfig.fullSnapshotTableName}""".as[Long].head
53 |
54 | }
55 |
--------------------------------------------------------------------------------
/modules/akka-persistence-pg/src/test/scala/akka/persistence/pg/util/PersistentActorTest.scala:
--------------------------------------------------------------------------------
1 | package akka.persistence.pg.util
2 |
3 | import java.sql.Savepoint
4 | import java.util.concurrent.TimeUnit
5 |
6 | import akka.actor.ActorSystem
7 | import akka.testkit.TestProbe
8 | import akka.util.Timeout
9 | import com.typesafe.config.Config
10 | import org.scalatest._
11 | import org.scalatest.funsuite.FixtureAnyFunSuiteLike
12 | import slick.jdbc.JdbcBackend
13 |
14 | import scala.concurrent.Await
15 | import scala.concurrent.duration.Duration
16 | import scala.util.Try
17 |
18 | /**
19 | * Base class for testing a persistent actor
20 | * db sessions are rolled back after each test, maintaining a clean db state
21 | * This also means the actorsystem needs to be recreated for each test
22 | */
23 | trait PersistentActorTest extends FixtureAnyFunSuiteLike with BeforeAndAfterEach {
24 |
25 | def config: Config
26 |
27 | implicit val defaultTimeout = Timeout(10, TimeUnit.SECONDS)
28 |
29 | implicit var system: ActorSystem = _
30 | var testProbe: TestProbe = _
31 |
32 | override protected def beforeEach(): Unit = {
33 | system = ActorSystem("PersistentActorTest", config)
34 | testProbe = TestProbe()
35 | }
36 |
37 | type FixtureParam = JdbcBackend.DatabaseDef
38 |
39 | override protected def withFixture(test: OneArgTest): Outcome = {
40 | val possibleOutcome = Try {
41 | PgPluginTestUtil.withTransactionRollback { db =>
42 | withFixture(test.toNoArgTest(db))
43 | }
44 | }
45 | //akka shutdown must be done in this way instead of using afterEach
46 | system.terminate()
47 | Await.result(system.whenTerminated, Duration.Inf)
48 | possibleOutcome.get
49 | }
50 |
51 | def savepoint()(implicit db: JdbcBackend.DatabaseDef): Savepoint = db.createSession().conn.setSavepoint()
52 | def rollback(savepoint: Savepoint)(implicit db: JdbcBackend.DatabaseDef) = db.createSession().conn.rollback(savepoint)
53 |
54 | }
55 |
--------------------------------------------------------------------------------
/modules/akka-persistence-pg/src/test/scala/akka/persistence/pg/util/RecreateSchema.scala:
--------------------------------------------------------------------------------
1 | package akka.persistence.pg.util
2 |
3 | import akka.persistence.pg.PgConfig
4 |
5 | trait RecreateSchema {
6 | self: PgConfig =>
7 |
8 | import driver.api._
9 |
10 | lazy val dropSchema = sqlu"""drop schema if exists #${pluginConfig.schemaName} cascade"""
11 | lazy val createSchema = sqlu"""create schema #${pluginConfig.schemaName}"""
12 |
13 | lazy val recreateSchema = dropSchema.andThen(createSchema)
14 |
15 | }
16 |
--------------------------------------------------------------------------------
/modules/akka-persistence-pg/src/test/scala/akka/persistence/pg/writestrategy/MissingWriteStrategySuite.scala:
--------------------------------------------------------------------------------
1 | package akka.persistence.pg.writestrategy
2 |
3 | import com.typesafe.config.Config
4 |
5 | abstract class MissingWriteStrategySuite(config: Config) extends WriteStrategySuite(config) {
6 |
7 | import driver.api._
8 |
9 | override val expected = 2500
10 |
11 | test("polling stored events should miss events") {
12 | val ids = writeEvents()
13 | val missing: Seq[Long] = missingIds(ids)
14 | println(missing)
15 | missing.isEmpty shouldBe false
16 | ids.size shouldNot equal(expected)
17 | database.run(journals.size.result).futureValue shouldBe expected
18 | ()
19 | }
20 |
21 | }
22 |
--------------------------------------------------------------------------------
/modules/akka-persistence-pg/src/test/scala/akka/persistence/pg/writestrategy/NonMissingWriteStrategySuite.scala:
--------------------------------------------------------------------------------
1 | package akka.persistence.pg.writestrategy
2 |
3 | import com.typesafe.config.Config
4 |
5 | abstract class NonMissingWriteStrategySuite(config: Config) extends WriteStrategySuite(config) {
6 |
7 | import driver.api._
8 |
9 | test("polling stored events should not miss events") {
10 | val ids = writeEvents()
11 | missingIds(ids) shouldEqual Seq.empty
12 | ids.size shouldEqual expected
13 | database.run(journals.size.result).futureValue shouldBe expected
14 | ()
15 | }
16 |
17 | }
18 |
--------------------------------------------------------------------------------
/modules/akka-persistence-pg/src/test/scala/akka/persistence/pg/writestrategy/RowIdUpdatingWriteStrategySuite.scala:
--------------------------------------------------------------------------------
1 | package akka.persistence.pg.writestrategy
2 |
3 | import com.typesafe.config.ConfigFactory
4 |
5 | class RowIdUpdatingWriteStrategySuite
6 | extends NonMissingWriteStrategySuite(ConfigFactory.load("pg-writestrategy-rowid.conf"))
7 |
--------------------------------------------------------------------------------
/modules/akka-persistence-pg/src/test/scala/akka/persistence/pg/writestrategy/SingleThreadedWriteStrategySuite.scala:
--------------------------------------------------------------------------------
1 | package akka.persistence.pg.writestrategy
2 |
3 | import com.typesafe.config.ConfigFactory
4 |
5 | class SingleThreadedWriteStrategySuite
6 | extends NonMissingWriteStrategySuite(ConfigFactory.load("pg-writestrategy-st.conf"))
7 |
--------------------------------------------------------------------------------
/modules/akka-persistence-pg/src/test/scala/akka/persistence/pg/writestrategy/TableLockingWriteStrategySuite.scala:
--------------------------------------------------------------------------------
1 | package akka.persistence.pg.writestrategy
2 |
3 | import com.typesafe.config.ConfigFactory
4 |
5 | class TableLockingWriteStrategySuite
6 | extends NonMissingWriteStrategySuite(ConfigFactory.load("pg-writestrategy-locking.conf")) {}
7 |
--------------------------------------------------------------------------------
/modules/akka-persistence-pg/src/test/scala/akka/persistence/pg/writestrategy/TransactionalWriteStrategySuite.scala:
--------------------------------------------------------------------------------
1 | package akka.persistence.pg.writestrategy
2 |
3 | import com.typesafe.config.ConfigFactory
4 |
5 | class TransactionalWriteStrategySuite
6 | extends MissingWriteStrategySuite(ConfigFactory.load("pg-writestrategy-tx.conf")) {}
7 |
--------------------------------------------------------------------------------
/modules/akka-persistence-pg/src/test/scala/akka/persistence/pg/writestrategy/WriteStrategySuite.scala:
--------------------------------------------------------------------------------
1 | package akka.persistence.pg.writestrategy
2 |
3 | import java.util.concurrent.TimeUnit
4 | import java.util.concurrent.atomic.AtomicInteger
5 |
6 | import akka.actor._
7 | import akka.pattern.{ask, pipe}
8 | import akka.persistence.pg.event._
9 | import akka.persistence.pg.journal.JournalTable
10 | import akka.persistence.pg.perf.Messages.Alter
11 | import akka.persistence.pg.perf.RandomDelayPerfActor
12 | import akka.persistence.pg.snapshot.SnapshotTable
13 | import akka.persistence.pg.util.{CreateTables, RecreateSchema}
14 | import akka.persistence.pg.{PgConfig, PgExtension, PluginConfig, WaitForEvents}
15 | import akka.util.Timeout
16 | import com.typesafe.config.Config
17 | import org.scalatest.{BeforeAndAfterAll, BeforeAndAfterEach}
18 | import org.scalatest.concurrent.ScalaFutures
19 | import org.scalatest.funsuite.AnyFunSuite
20 | import org.scalatest.matchers.should.Matchers
21 | import org.scalatest.time.{Milliseconds, Seconds, Span}
22 |
23 | import scala.concurrent.Await
24 | import scala.concurrent.duration._
25 | import scala.language.postfixOps
26 | import scala.util.Random
27 | import scala.util.control.NonFatal
28 |
29 | abstract class WriteStrategySuite(config: Config)
30 | extends AnyFunSuite
31 | with Matchers
32 | with BeforeAndAfterEach
33 | with BeforeAndAfterAll
34 | with JournalTable
35 | with SnapshotTable
36 | with RecreateSchema
37 | with CreateTables
38 | with PgConfig
39 | with WaitForEvents
40 | with ScalaFutures {
41 |
42 | val system = ActorSystem("TestCluster", config)
43 | override lazy val pluginConfig: PluginConfig = PgExtension(system).pluginConfig
44 |
45 | import driver.api._
46 |
47 | import scala.concurrent.ExecutionContext.Implicits.global
48 |
49 | implicit val timeOut = Timeout(1, TimeUnit.MINUTES)
50 | var actors: Seq[ActorRef] = _
51 | val expected = 1000
52 |
53 | def writeEvents(): Seq[Long] = {
54 | val received: AtomicInteger = new AtomicInteger(0)
55 | val eventReader = system.actorOf(Props(new EventReader()))
56 |
57 | 1 to expected foreach { i =>
58 | actors(Random.nextInt(actors.size)) ? Alter(Random.alphanumeric.take(16).mkString) map {
59 | case s =>
60 | received.incrementAndGet()
61 | }
62 | }
63 |
64 | waitUntilEventsWritten(expected, received)
65 |
66 | //just sleep a bit so the EventReader has seen the last events
67 | Thread.sleep(2000)
68 | Await.result((eventReader ? "stop").mapTo[Seq[Long]], 10 seconds)
69 |
70 | }
71 |
72 | def missingIds(ids: Seq[Long]): Seq[Long] = {
73 | var result: Seq[Long] = Seq.empty[Long]
74 | var prevId = 0L
75 | ids foreach { id: Long =>
76 | if (id != prevId + 1) {
77 | result = result :+ id
78 | }
79 | prevId = id
80 | }
81 | result
82 | }
83 |
84 | override implicit val patienceConfig = PatienceConfig(timeout = Span(10, Seconds), interval = Span(100, Milliseconds))
85 |
86 | override def beforeAll(): Unit = {
87 | database
88 | .run(
89 | recreateSchema.andThen(journals.schema.create).andThen(snapshots.schema.create)
90 | )
91 | .futureValue
92 | actors = 1 to 10 map { _ =>
93 | system.actorOf(RandomDelayPerfActor.props(driver))
94 | }
95 | }
96 |
97 | override protected def afterAll(): Unit = {
98 | system.terminate()
99 | Await.result(system.whenTerminated, Duration.Inf)
100 | ()
101 | }
102 |
103 | override protected def beforeEach(): Unit = {
104 | database.run(DBIO.seq(journals.delete)).futureValue
105 | super.beforeEach()
106 | }
107 |
108 | class EventReader extends Actor {
109 |
110 | case class Retrieve(fromId: Long)
111 | case class EventIds(ids: Seq[Long])
112 |
113 | var running = true
114 | var ids: Seq[Long] = Seq.empty
115 | self ! Retrieve(0L)
116 |
117 | override def receive: Receive = {
118 | case Retrieve(fromId) if running =>
119 | database.run {
120 | pluginConfig.eventStore.get.findEvents(fromId).result
121 | } map {
122 | _ map {
123 | _.id
124 | }
125 | } recover {
126 | case NonFatal(e) => e.printStackTrace(); Seq.empty
127 | } map EventIds pipeTo self
128 | ()
129 | case EventIds(ids) =>
130 | this.ids ++= ids
131 | val max = if (this.ids.isEmpty) 0 else this.ids.max + 1
132 | self ! Retrieve(max)
133 | case "stop" =>
134 | running = false
135 | sender() ! ids
136 | }
137 |
138 | }
139 |
140 | }
141 |
142 | class DefaultEventStore(override val pluginConfig: PluginConfig) extends EventStore with PgConfig
143 |
--------------------------------------------------------------------------------
/modules/benchmark/src/it/resources/logback.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 | %d{HH:mm:ss.SSS} [%-5level] %logger{15} - %msg%n%rEx
7 | false
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
--------------------------------------------------------------------------------
/modules/benchmark/src/it/resources/pg-perf-base.conf:
--------------------------------------------------------------------------------
1 | include "postgres.conf"
2 |
3 | pg-persistence {
4 | db {
5 | user = ${postgres.user}
6 | password = ${postgres.password}
7 | url = "jdbc:postgresql://"${postgres.host}":"${postgres.port}"/"${postgres.name}
8 | connectionPool = "HikariCP"
9 | queueSize = 10000
10 | }
11 | schemaName = ${postgres.schema}
12 | pgjson = "jsonb"
13 | eventstore {
14 | encoder: "akka.persistence.pg.perf.PerfEventEncoder"
15 | schemaName: ${postgres.schema}
16 | tableName: "journal"
17 | }
18 | }
19 |
20 | akka {
21 | loglevel = warning
22 | log-config-on-start = off
23 | stdout-loglevel = warning
24 | test.single-expect-default = 10000
25 |
26 | persistence {
27 | journal.plugin = "pg-journal"
28 | snapshot-store.plugin = "pg-snapshot"
29 |
30 | # we need event publishing for tests
31 | publish-confirmations = on
32 | publish-plugin-commands = on
33 | }
34 |
35 | log-dead-letters = 10
36 | log-dead-letters-during-shutdown = on
37 | }
38 |
39 |
--------------------------------------------------------------------------------
/modules/benchmark/src/it/resources/pg-perf-locking.conf:
--------------------------------------------------------------------------------
1 | include "pg-perf-base.conf"
2 | pg-persistence {
3 | writestrategy = "akka.persistence.pg.journal.TableLockingWriteStrategy"
4 | }
5 |
6 |
7 |
--------------------------------------------------------------------------------
/modules/benchmark/src/it/resources/pg-perf-rowid.conf:
--------------------------------------------------------------------------------
1 | include "pg-perf-base.conf"
2 | pg-persistence {
3 | writestrategy = "akka.persistence.pg.journal.RowIdUpdatingStrategy"
4 | }
5 |
6 |
7 |
--------------------------------------------------------------------------------
/modules/benchmark/src/it/resources/pg-perf-st.conf:
--------------------------------------------------------------------------------
1 | include "pg-perf-base.conf"
2 | pg-persistence {
3 | writestrategy = "akka.persistence.pg.journal.SingleThreadedBatchWriteStrategy"
4 | }
5 |
6 |
7 |
--------------------------------------------------------------------------------
/modules/benchmark/src/it/resources/pg-perf-tx.conf:
--------------------------------------------------------------------------------
1 | include "pg-perf-base.conf"
2 | pg-persistence {
3 | writestrategy = "akka.persistence.pg.journal.TransactionalWriteStrategy"
4 | }
5 |
6 |
7 |
--------------------------------------------------------------------------------
/modules/benchmark/src/it/scala/gatlin/AbstractPersistenceSimulation.scala:
--------------------------------------------------------------------------------
1 | package gatlin
2 |
3 | import akka.actor.ActorSystem
4 | import akka.persistence.pg.perf.Messages.Alter
5 | import akka.persistence.pg.util.{CreateTables, RecreateSchema}
6 | import akka.persistence.pg.{PgConfig, PluginConfig}
7 | import com.typesafe.config.Config
8 | import io.gatling.core.scenario.Simulation
9 | import io.gatling.core.session.{Session => GSession, Expression}
10 | import io.gatling.commons.validation.{Success, Validation}
11 |
12 | import scala.concurrent.Await
13 | import scala.concurrent.duration._
14 | import scala.language.postfixOps
15 | import scala.util.Random
16 |
17 | abstract class AbstractPersistenceSimulation(val config: Config)
18 | extends Simulation
19 | with PgConfig
20 | with RecreateSchema
21 | with CreateTables
22 | {
23 |
24 | override val pluginConfig = PluginConfig(config)
25 | val system = ActorSystem("benchmark-system", config)
26 |
27 | /**
28 | * recreate schema and tables + indices before running the benchmark
29 | */
30 | before {
31 | Await.result(database.run(
32 | recreateSchema.andThen(createTables).andThen(createEventIndex)
33 | ), 10 seconds)
34 | warmup()
35 | }
36 |
37 | def warmup(): Unit = {}
38 |
39 | val feeder = Iterator.continually(
40 | Map("text" -> Random.alphanumeric.take(32).mkString)
41 | )
42 |
43 | case class AlterMessage(text: Expression[String]) extends AskMessage {
44 | override def apply(session: GSession): Validation[Any] = {
45 | text(session).flatMap { s => Success(Alter(s)) }
46 | }
47 | }
48 |
49 | }
50 |
--------------------------------------------------------------------------------
/modules/benchmark/src/it/scala/gatlin/AskAction.scala:
--------------------------------------------------------------------------------
1 | package gatlin
2 |
3 | import java.util.concurrent.TimeUnit
4 |
5 | import akka.actor.{Actor, ActorRef, Props}
6 | import akka.pattern.ask
7 | import akka.util.Timeout
8 | import io.gatling.core.action.{Action, ChainableAction}
9 | import io.gatling.commons.stats._
10 | import io.gatling.core.session.Session
11 | import io.gatling.commons.util.TimeHelper.nowMillis
12 | import io.gatling.core.stats.StatsEngine
13 | import io.gatling.core.stats.message.ResponseTimings
14 | import io.gatling.core.util.NameGen
15 |
16 | import scala.concurrent.ExecutionContext
17 | import scala.util.control.NonFatal
18 |
19 | object AskAction {
20 |
21 | implicit val timeout = Timeout(1, TimeUnit.MINUTES)
22 |
23 | }
24 |
25 | class AskAction(val next: Action,
26 | val message: AskMessage,
27 | val statsEngine: StatsEngine,
28 | implicit val executionContext: ExecutionContext)
29 | extends ChainableAction
30 | with NameGen {
31 |
32 | override val name = genName("ask")
33 |
34 | import AskAction.timeout
35 |
36 | override def execute(session: Session): Unit = {
37 | val sendTo = session("actor").as[ActorRef]
38 | val sendMessage = message(session).get
39 |
40 | val start = nowMillis
41 | (sendTo ? sendMessage)
42 | .map { _ => OK }
43 | .recover { case NonFatal(t) => KO }
44 | .foreach { status =>
45 | val stop = nowMillis
46 | statsEngine.logResponse(session, "Request", ResponseTimings(start, stop), status, None, None)
47 | next ! (if (status == OK) session.markAsSucceeded else session.markAsFailed)
48 | }
49 | }
50 | }
51 |
--------------------------------------------------------------------------------
/modules/benchmark/src/it/scala/gatlin/AskActionBuilder.scala:
--------------------------------------------------------------------------------
1 | package gatlin
2 |
3 | import io.gatling.core.action.Action
4 | import io.gatling.core.action.builder.ActionBuilder
5 | import io.gatling.core.structure.ScenarioContext
6 |
7 | class AskActionBuilder(val message: AskMessage) extends ActionBuilder {
8 |
9 | override def build(ctx: ScenarioContext, next: Action): Action = {
10 | new AskAction(next, message, ctx.coreComponents.statsEngine, ctx.system.dispatcher)
11 | }
12 | }
13 |
--------------------------------------------------------------------------------
/modules/benchmark/src/it/scala/gatlin/AskMessage.scala:
--------------------------------------------------------------------------------
1 | package gatlin
2 |
3 | import io.gatling.core.session.Session
4 | import io.gatling.commons.validation.Validation
5 |
6 | /**
7 | * Created by peter on 01/10/15.
8 | */
9 | trait AskMessage {
10 | def apply(session:Session): Validation[Any]
11 | }
12 |
13 |
14 |
15 |
--------------------------------------------------------------------------------
/modules/benchmark/src/it/scala/gatlin/MultiActorPerfSimulation.scala:
--------------------------------------------------------------------------------
1 | package gatlin
2 |
3 | import akka.actor.ActorRef
4 | import akka.pattern.ask
5 | import akka.persistence.pg.perf.Messages.Alter
6 | import akka.persistence.pg.perf.PerfActor
7 | import akka.util.Timeout
8 | import com.typesafe.config.Config
9 | import gatlin.Predef._
10 | import io.gatling.core.Predef._
11 |
12 | import scala.concurrent.{Await, Future}
13 | import scala.concurrent.duration._
14 | import scala.language.postfixOps
15 | import scala.util.Random
16 |
17 | abstract class MultiActorPerfSimulation(override val config: Config) extends AbstractPersistenceSimulation(config)
18 | {
19 |
20 | val numActors = 100
21 | var actors: Seq[ActorRef] = _
22 |
23 | override def warmup() = {
24 | actors = 1 to numActors map { _ => system.actorOf(PerfActor.props) }
25 | implicit val timeout = Timeout(2 seconds)
26 | import scala.concurrent.ExecutionContext.Implicits.global
27 | val f = actors map { _ ? Alter("warmup") }
28 | Await.result(Future sequence f, 10 seconds)
29 | ()
30 | }
31 |
32 | val scn = scenario("multiple persistent actors").during(30 seconds) {
33 | feed(feeder)
34 | .exec { session => session.set("actor", actors(Random.nextInt(numActors))) }
35 | .exec { request(AlterMessage("${text}")) }
36 | }
37 |
38 | setUp(scn.inject(atOnceUsers(100)))
39 |
40 | }
41 |
--------------------------------------------------------------------------------
/modules/benchmark/src/it/scala/gatlin/Predef.scala:
--------------------------------------------------------------------------------
1 | package gatlin
2 |
3 | import akka.actor.ActorRef
4 |
5 | object Predef {
6 |
7 | def request(message: AskMessage) = new AskActionBuilder(message)
8 |
9 | }
10 |
--------------------------------------------------------------------------------
/modules/benchmark/src/it/scala/gatlin/SingleActorPerfSimulation.scala:
--------------------------------------------------------------------------------
1 | package gatlin
2 |
3 | import akka.actor.ActorRef
4 | import akka.pattern.ask
5 | import akka.persistence.pg.perf.Messages.Alter
6 | import akka.persistence.pg.perf.PerfActor
7 | import akka.util.Timeout
8 | import com.typesafe.config.Config
9 | import gatlin.Predef._
10 | import io.gatling.core.Predef._
11 |
12 | import scala.concurrent.Await
13 | import scala.concurrent.duration._
14 | import scala.language.postfixOps
15 |
16 | abstract class SingleActorPerfSimulation(override val config: Config) extends AbstractPersistenceSimulation(config)
17 | {
18 |
19 | var actor: ActorRef = _
20 |
21 | override def warmup() = {
22 | actor = system.actorOf(PerfActor.props)
23 | implicit val timeout = Timeout(2 seconds)
24 | Await.result(actor ? Alter("warmup"), 10 seconds)
25 | ()
26 | }
27 |
28 | val scn = scenario("single persistent actor").during(30 seconds) {
29 | feed(feeder)
30 | .exec { session => session.set("actor", actor) }
31 | .exec { request(AlterMessage("${text}")) }
32 | }
33 |
34 | setUp(scn.inject(atOnceUsers(10)))
35 |
36 | }
37 |
--------------------------------------------------------------------------------
/modules/benchmark/src/it/scala/gatlin/simulations/Simulations.scala:
--------------------------------------------------------------------------------
1 | package gatlin.simulations
2 |
3 | import com.typesafe.config.ConfigFactory
4 | import gatlin.{MultiActorPerfSimulation, SingleActorPerfSimulation}
5 |
6 | class TransactionalSingleActorSimulation extends SingleActorPerfSimulation(ConfigFactory.load("pg-perf-tx.conf"))
7 | class TransactionalMultiActorSimulation extends MultiActorPerfSimulation(ConfigFactory.load("pg-perf-tx.conf"))
8 |
9 | class RowIdUpdatingSingleActorSimulation extends SingleActorPerfSimulation(ConfigFactory.load("pg-perf-rowid.conf"))
10 | class RowIdUpdatingMultiActorSimulation extends MultiActorPerfSimulation(ConfigFactory.load("pg-perf-rowid.conf"))
11 |
12 | class SingleThreadedSingleActorSimulation extends SingleActorPerfSimulation(ConfigFactory.load("pg-perf-st.conf"))
13 | class SingleThreadedMultiActorSimulation extends MultiActorPerfSimulation(ConfigFactory.load("pg-perf-st.conf"))
14 |
15 | class TableLockingSingleActorSimulation extends SingleActorPerfSimulation(ConfigFactory.load("pg-perf-locking.conf"))
16 | class TableLockingMultiActorSimulation extends MultiActorPerfSimulation(ConfigFactory.load("pg-perf-locking.conf"))
17 |
--------------------------------------------------------------------------------
/project/BuildSettings.scala:
--------------------------------------------------------------------------------
1 | import sbt.Keys._
2 | import sbt._
3 | import org.scalafmt.sbt.ScalafmtPlugin.autoImport._
4 |
5 | object BuildSettings {
6 |
7 | def commonSettings = Seq(
8 | parallelExecution in Test := false,
9 | concurrentRestrictions in Global += Tags.limit(Tags.Test, 1),
10 | resolvers ++= Seq(
11 | "Local Maven" at Path.userHome.asFile.toURI.toURL + ".m2/repository",
12 | Resolver.typesafeRepo("releases")
13 | ),
14 | updateOptions := updateOptions.value.withCachedResolution(true),
15 | organization := "be.wegenenverkeer",
16 | scalafmtOnCompile := true
17 | )
18 |
19 | val publishingCredentials = (for {
20 | username <- Option(System.getenv().get("SONATYPE_USERNAME"))
21 | password <- Option(System.getenv().get("SONATYPE_PASSWORD"))
22 | } yield Seq(Credentials("Sonatype Nexus Repository Manager", "oss.sonatype.org", username, password)))
23 | .getOrElse(Seq())
24 |
25 | val publishSettings = Seq(
26 | publishMavenStyle := true,
27 | pomIncludeRepository := { _ =>
28 | false
29 | },
30 | publishTo := {
31 | val nexus = "https://oss.sonatype.org/"
32 | if (isSnapshot.value)
33 | Some("snapshots" at nexus + "content/repositories/snapshots")
34 | else
35 | Some("releases" at nexus + "service/local/staging/deploy/maven2")
36 | },
37 | pomExtra := https://github.com/WegenenVerkeer/akka-persistence-postgresql
38 |
39 | https://github.com/WegenenVerkeer/akka-persistence-postgresql
40 | git@github.com:WegenenVerkeer/akka-persistence-postgresql.git
41 | git@github.com:WegenenVerkeer/akka-persistence-postgresql.git
42 |
43 |
44 |
45 | MIT licencse
46 | http://opensource.org/licenses/MIT
47 | repo
48 |
49 |
50 |
51 |
52 | AWV
53 | De ontwikkelaars van AWV
54 | http://www.wegenenverkeer.be
55 |
56 | ,
57 | credentials ++= publishingCredentials
58 | )
59 |
60 | }
61 |
--------------------------------------------------------------------------------
/project/Dependencies.scala:
--------------------------------------------------------------------------------
1 | import sbt._
2 |
3 | object Dependencies {
4 |
5 | val akkaVersion = "2.6.19"
6 | val slickVersion = "3.3.3"
7 | val gatlingVersion = "2.2.4"
8 |
9 | val scalaJava8Compat = "org.scala-lang.modules" %% "scala-java8-compat" % "0.9.0"
10 |
11 | val akkaPersistence = "com.typesafe.akka" %% "akka-persistence" % akkaVersion
12 | val akkaSlf4j = "com.typesafe.akka" %% "akka-slf4j" % akkaVersion
13 | val akkaActor = "com.typesafe.akka" %% "akka-actor" % akkaVersion
14 | val akkaPersistenceQuery = "com.typesafe.akka" %% "akka-persistence-query" % akkaVersion
15 | val akkaStreams = "com.typesafe.akka" %% "akka-stream" % akkaVersion
16 |
17 | val slick = "com.typesafe.slick" %% "slick" % slickVersion
18 | val slickHikariCp = "com.typesafe.slick" %% "slick-hikaricp" % slickVersion
19 |
20 | // Test dependencies
21 | val scalaTest = "org.scalatest" %% "scalatest" % "3.0.8" % "test,it"
22 | val akkaTest = "com.typesafe.akka" %% "akka-testkit" % akkaVersion % "test,it"
23 | val akkaPersistenceTestkit = "com.typesafe.akka" %% "akka-persistence-tck" % akkaVersion % "test,it"
24 | val slf4jSimple = "org.slf4j" % "slf4j-simple" % "1.7.26" % "test,it"
25 | val tyrex = "tyrex" % "tyrex" % "1.0.1" % "test,it"
26 |
27 | // gatling benchmark module
28 | val gatlinHighcharts = "io.gatling.highcharts" % "gatling-charts-highcharts" % gatlingVersion
29 | val gatling = "io.gatling" % "gatlin" % gatlingVersion
30 |
31 | val postgres = "org.postgresql" % "postgresql" % "42.4.0"
32 |
33 | val mainTestDependencies = Seq(
34 | scalaTest,
35 | akkaSlf4j,
36 | tyrex
37 | )
38 |
39 | }
40 |
--------------------------------------------------------------------------------
/project/build.properties:
--------------------------------------------------------------------------------
1 | sbt.version = 1.6.2
2 |
--------------------------------------------------------------------------------
/project/plugins.sbt:
--------------------------------------------------------------------------------
1 | logLevel := Level.Warn
2 |
3 | addSbtPlugin("io.gatling" % "gatling-sbt" % "2.2.2")
4 |
5 | // supports release in maven central
6 | addSbtPlugin("org.xerial.sbt" % "sbt-sonatype" % "3.9.4")
7 | addSbtPlugin("com.jsuereth" % "sbt-pgp" % "2.0.1")
8 | addSbtPlugin("com.dwijnand" % "sbt-travisci" % "1.1.3")
9 | addSbtPlugin("org.scalameta" % "sbt-scalafmt" % "2.0.1")
10 |
--------------------------------------------------------------------------------
/release.md:
--------------------------------------------------------------------------------
1 | # Release Procedure
2 |
3 | Artefacts are published to [Sonatype OSS](https://oss.sonatype.org/),
4 | which sync's with Maven Central repositories.
5 |
6 | The following procedure is executed manually:
7 |
8 | ~~~
9 | $ sbt +test # tests the code
10 | $ git tag vX.Y.Z -m "release version X.Y.Z"
11 | $ sbt +publishSigned #publishes the signed artefacts to Sonatype staging
12 | $ sbt sonatypeRelease
13 | $ git push --tags origin develop
14 | ~~~
--------------------------------------------------------------------------------
/sonatype.sbt:
--------------------------------------------------------------------------------
1 | sonatypeProfileName := "be.wegenenverkeer"
2 |
--------------------------------------------------------------------------------
/version.sbt:
--------------------------------------------------------------------------------
1 | ThisBuild / version := "0.17.0"
2 |
--------------------------------------------------------------------------------