├── .github ├── release-drafter.yml └── workflows │ ├── release.yml │ └── scala.yml ├── .gitignore ├── .sbtopts ├── .scala-steward.conf ├── .scalafmt.conf ├── CONTRIBUTING.md ├── LICENSE ├── README.md ├── build.sbt ├── core └── src │ ├── main │ ├── resources │ │ ├── LICENSE.txt │ │ └── reference.conf │ └── scala │ │ └── akka │ │ └── persistence │ │ └── postgres │ │ ├── config │ │ └── AkkaPersistenceConfig.scala │ │ ├── db │ │ ├── DbErrors.scala │ │ ├── ExtendedPostgresProfile.scala │ │ ├── SlickDatabase.scala │ │ └── SlickExtension.scala │ │ ├── journal │ │ ├── PostgresAsyncWriteJournal.scala │ │ └── dao │ │ │ ├── BaseByteArrayJournalDao.scala │ │ │ ├── ByteArrayJournalSerializer.scala │ │ │ ├── FlatJournalDao.scala │ │ │ ├── FlowControl.scala │ │ │ ├── JournalDao.scala │ │ │ ├── JournalDaoWithReadMessages.scala │ │ │ ├── JournalDaoWithUpdates.scala │ │ │ ├── JournalMetadataQueries.scala │ │ │ ├── JournalQueries.scala │ │ │ ├── JournalTables.scala │ │ │ ├── NestedPartitionsJournalDao.scala │ │ │ ├── PartitionedJournalDao.scala │ │ │ └── package.scala │ │ ├── package.scala │ │ ├── query │ │ ├── JournalSequenceActor.scala │ │ ├── PostgresReadJournalProvider.scala │ │ ├── dao │ │ │ ├── BaseByteArrayReadJournalDao.scala │ │ │ ├── FlatReadJournalDao.scala │ │ │ ├── PartitionedReadJournalDao.scala │ │ │ ├── ReadJournalDao.scala │ │ │ ├── ReadJournalMetadataQueries.scala │ │ │ └── ReadJournalQueries.scala │ │ ├── javadsl │ │ │ └── PostgresReadJournal.scala │ │ ├── package.scala │ │ └── scaladsl │ │ │ └── PostgresReadJournal.scala │ │ ├── serialization │ │ ├── PersistentReprSerializer.scala │ │ └── SnapshotSerializer.scala │ │ ├── snapshot │ │ ├── PostgresSnapshotStore.scala │ │ └── dao │ │ │ ├── ByteArraySnapshotDao.scala │ │ │ ├── ByteArraySnapshotSerializer.scala │ │ │ ├── SnapshotDao.scala │ │ │ ├── SnapshotQueries.scala │ │ │ └── SnapshotTables.scala │ │ ├── tag │ │ ├── EventTag.scala │ │ ├── EventTagQueries.scala │ │ ├── TagDao.scala │ │ ├── TagIdResolver.scala │ │ └── TagTables.scala │ │ └── util │ │ ├── BlockingOps.scala │ │ ├── ByteArrayOps.scala │ │ ├── ConfigOps.scala │ │ ├── InputStreamOps.scala │ │ └── StringOps.scala │ └── test │ ├── resources │ ├── general.conf │ ├── jndi-application.conf │ ├── jndi-shared-db-application.conf │ ├── logback-test.xml │ ├── nested-partitions-application-with-hard-delete.conf │ ├── nested-partitions-application-with-use-journal-metadata.conf │ ├── nested-partitions-application.conf │ ├── nested-partitions-journal.conf │ ├── nested-partitions-shared-db-application.conf │ ├── partitioned-application-with-hard-delete.conf │ ├── partitioned-application-with-use-journal-metadata.conf │ ├── partitioned-application.conf │ ├── partitioned-journal.conf │ ├── partitioned-shared-db-application.conf │ ├── plain-application-with-hard-delete.conf │ ├── plain-application-with-use-journal-metadata.conf │ ├── plain-application.conf │ ├── plain-shared-db-application.conf │ └── schema │ │ └── postgres │ │ ├── nested-partitions-schema.sql │ │ ├── partitioned-schema.sql │ │ └── plain-schema.sql │ └── scala │ └── akka │ └── persistence │ └── postgres │ ├── SharedActorSystemTestSpec.scala │ ├── SimpleSpec.scala │ ├── SingleActorSystemPerTestSpec.scala │ ├── TablesTestSpec.scala │ ├── configuration │ ├── AkkaPersistenceConfigTest.scala │ ├── ConfigOpsTest.scala │ └── JNDIConfigTest.scala │ ├── journal │ ├── JournalPartitioningSpec.scala │ ├── PostgresJournalPerfSpec.scala │ ├── PostgresJournalSpec.scala │ └── dao │ │ ├── ByteArrayJournalSerializerTest.scala │ │ ├── JournalMetadataQueriesTest.scala │ │ ├── JournalQueriesTest.scala │ │ ├── JournalTablesTest.scala │ │ └── TagsSerializationTest.scala │ ├── query │ ├── AllPersistenceIdsTest.scala │ ├── CurrentEventsByPersistenceIdTest.scala │ ├── CurrentEventsByTagTest.scala │ ├── CurrentEventsByTagWithGapsTest.scala │ ├── CurrentPersistenceIdsTest.scala │ ├── EventAdapterTest.scala │ ├── EventsByPersistenceIdTest.scala │ ├── EventsByTagTest.scala │ ├── HardDeleteQueryTest.scala │ ├── JournalDaoStreamMessagesMemoryTest.scala │ ├── JournalSequenceActorTest.scala │ ├── LogicalDeleteQueryTest.scala │ ├── QueryTestSpec.scala │ ├── TaggingEventAdapter.scala │ └── dao │ │ ├── ReadJournalMetadataQueriesTest.scala │ │ ├── ReadJournalQueriesTest.scala │ │ ├── ReadJournalTablesTest.scala │ │ └── TestProbeReadJournalDao.scala │ ├── serialization │ └── StoreOnlySerializableMessagesTest.scala │ ├── snapshot │ ├── PostgresSnapshotStoreSpec.scala │ └── dao │ │ ├── ByteArraySnapshotSerializerTest.scala │ │ ├── SnapshotQueriesTest.scala │ │ └── SnapshotTablesTest.scala │ ├── tag │ ├── CachedTagIdResolverSpec.scala │ └── SimpleTagDaoSpec.scala │ └── util │ ├── BaseQueryTest.scala │ ├── ClasspathResources.scala │ └── DropCreate.scala ├── docs ├── .gitignore ├── 404.html ├── Gemfile ├── Gemfile.lock ├── _config.yml ├── assets │ ├── partitioning │ │ ├── detaching.png │ │ ├── flat-journal.png │ │ ├── nested-partitions-journal.png │ │ └── partitioned-journal.png │ ├── project-logo.png │ ├── softwaremill-logo.png │ └── swissborg-logo.png ├── basic-configuration.md ├── custom-dao.md ├── deadlocks.md ├── favicon.ico ├── features.md ├── index.md ├── migration.md ├── persistence-query.md ├── serve.sh └── tagging.md ├── migration └── src │ ├── main │ ├── resources │ │ └── reference.conf │ └── scala │ │ └── akka │ │ └── persistence │ │ └── postgres │ │ └── migration │ │ ├── PgSlickSupport.scala │ │ ├── journal │ │ ├── Jdbc4JournalMigration.scala │ │ ├── JournalMigrationQueries.scala │ │ ├── JournalSchema.scala │ │ └── OldJournalDeserializer.scala │ │ └── snapshot │ │ ├── Jdbc4SnapshotStoreMigration.scala │ │ ├── OldSnapshotDeserializer.scala │ │ └── SnapshotMigrationQueries.scala │ └── test │ ├── resources │ ├── base-migration.conf │ ├── flat-migration.conf │ ├── logback-test.xml │ ├── nested-partitions-migration.conf │ └── partitioned-migration.conf │ └── scala │ └── akka │ └── persistence │ └── postgres │ └── migration │ └── MigrationTest.scala ├── project ├── AutomaticModuleName.scala ├── Dependencies.scala ├── ProjectAutoPlugin.scala ├── build.properties ├── plugins.sbt └── project-info.conf └── scripts ├── archivisation ├── 1-create-archivisation-table.sql ├── 2-select-partitions-to-detach.sql ├── 3-detach.sql ├── 4-export-detached.sh ├── 5-drop-detached.sql ├── 8-import-deleted.sh ├── 9-attach.sql ├── demo-prepare.sql └── demo.sh ├── create-release-issue.sh ├── docker-compose.yml ├── launch-postgres.sh ├── migration-0.5.0 └── partitioned │ ├── 1-add-indices-manually.sql │ └── 2-add-indices-flyway.sql ├── migration-0.6.0 ├── 1-create-journal-metadata-table.sql ├── 2-create-function-update-journal-metadata.sql ├── 3-create-trigger-update-journal-metadata.sql └── 4-populate-journal-metadata.sql ├── migration ├── flat │ ├── 1-create-schema.sql │ ├── 2-fill-event-tag.sql │ ├── 3-copy-data.sql │ ├── 4-move-sequence.sql │ ├── 5-create-indexes.sql │ ├── 6-drop-migration-procedures.sql │ ├── demo-prepare.sql │ └── demo.sh ├── nested-partitions │ ├── 1-create-schema.sql │ ├── 2-create-partitions.sql │ ├── 3-fill-event-tag.sql │ ├── 4-copy-data.sql │ ├── 5-move-sequence.sql │ ├── 6-create-indexes.sql │ ├── 7-drop-migration-procedures.sql │ ├── demo-prepare.sql │ └── demo.sh └── partitioned │ ├── 1-create-schema.sql │ ├── 2-create-partitions.sql │ ├── 3-fill-event-tag.sql │ ├── 4-copy-data.sql │ ├── 5-move-sequence.sql │ ├── 6-create-indexes.sql │ ├── 7-drop-migration-procedures.sql │ ├── demo-prepare.sql │ └── demo.sh └── psql-cli.sh /.github/release-drafter.yml: -------------------------------------------------------------------------------- 1 | # Config for https://github.com/toolmantim/release-drafter 2 | name-template: 'Akka Persistence Postgres $NEXT_PATCH_VERSION' 3 | tag-template: 'v$NEXT_PATCH_VERSION' 4 | 5 | categories: 6 | - title: '🐛 Bug Fixes' 7 | label: 'bug' 8 | - title: '🚀 Enhancements' 9 | label: 'enhancement' 10 | - title: 'Dependency changes' 11 | label: 'dependency-change' 12 | 13 | change-template: '- $TITLE [#$NUMBER](https://github.com/SwissBorg/akka-persistence-postgres/issues/$NUMBER) by [@$AUTHOR](https://github.com/$AUTHOR)' 14 | 15 | template: | 16 | ## Changes 17 | 18 | $CHANGES 19 | -------------------------------------------------------------------------------- /.github/workflows/release.yml: -------------------------------------------------------------------------------- 1 | name: Release 2 | on: 3 | push: 4 | branches: 5 | - master 6 | - dev 7 | tags: ["*"] 8 | jobs: 9 | publish: 10 | runs-on: ubuntu-latest 11 | steps: 12 | - uses: actions/checkout@v2 13 | - name: Setup Scala and Java 14 | uses: coursier/setup-action@v1 15 | with: 16 | jvm: adopt:1.11 17 | - uses: olafurpg/setup-gpg@v3 18 | - name: Publish ${{ github.ref }} 19 | run: sbt ci-release 20 | env: 21 | PGP_PASSPHRASE: ${{ secrets.PGP_PASSPHRASE }} 22 | PGP_SECRET: ${{ secrets.PGP_SECRET }} 23 | SONATYPE_PASSWORD: ${{ secrets.SONATYPE_PASSWORD }} 24 | SONATYPE_USERNAME: ${{ secrets.SONATYPE_USERNAME }} 25 | -------------------------------------------------------------------------------- /.github/workflows/scala.yml: -------------------------------------------------------------------------------- 1 | name: Scala CI 2 | 3 | on: 4 | push: 5 | branches: 6 | - master 7 | - dev 8 | pull_request: 9 | branches: [ master ] 10 | 11 | jobs: 12 | test: 13 | runs-on: ubuntu-latest 14 | name: Java ${{matrix.java}} 15 | strategy: 16 | matrix: 17 | java: ['adopt:1.11', 'temurin:1.11', 'temurin:1.17'] 18 | steps: 19 | - uses: actions/checkout@v2 20 | - name: Setup Scala and Java 21 | uses: coursier/setup-action@v1 22 | with: 23 | jvm: ${{matrix.java}} 24 | - name: Start Postgres 25 | run: sh ./scripts/launch-postgres.sh 26 | - name: Run tests 27 | run: sbt +test 28 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /RUNNING_PID 2 | logs 3 | target 4 | .idea 5 | *.iml 6 | *.iws 7 | .settings 8 | .classpath 9 | .project 10 | .worksheet 11 | **.DS_Store 12 | .metals/ 13 | .vscode/ 14 | .bsp/ 15 | -------------------------------------------------------------------------------- /.sbtopts: -------------------------------------------------------------------------------- 1 | -J-Xms512M 2 | -J-Xmx4096M 3 | -J-XX:MaxGCPauseMillis=200 4 | -------------------------------------------------------------------------------- /.scala-steward.conf: -------------------------------------------------------------------------------- 1 | pullRequests.frequency = "1 day" 2 | 3 | updates.ignore = [ 4 | { groupId = "com.typesafe.akka" }, 5 | { groupId = "org.scalameta", artifactId = "scalafmt-core" } 6 | ] 7 | 8 | -------------------------------------------------------------------------------- /.scalafmt.conf: -------------------------------------------------------------------------------- 1 | version = 3.0.6 2 | 3 | style = defaultWithAlign 4 | 5 | docstrings.style = Asterisk 6 | docstrings.wrap = no 7 | indentOperator.preset = spray 8 | maxColumn = 120 9 | rewrite.rules = [RedundantParens, SortImports, AvoidInfix] 10 | unindentTopLevelOperators = true 11 | align.tokens = [{code = "=>", owner = "Case"}] 12 | align.openParenDefnSite = false 13 | align.openParenCallSite = false 14 | optIn.breakChainOnFirstMethodDot = false 15 | optIn.configStyleArguments = false 16 | danglingParentheses.defnSite = false 17 | danglingParentheses.callSite = false 18 | spaces.inImportCurlyBraces = true 19 | rewrite.neverInfix.excludeFilters = [ 20 | and 21 | min 22 | max 23 | until 24 | to 25 | by 26 | eq 27 | ne 28 | "should.*" 29 | "contain.*" 30 | "must.*" 31 | in 32 | ignore 33 | be 34 | taggedAs 35 | thrownBy 36 | synchronized 37 | have 38 | when 39 | size 40 | only 41 | noneOf 42 | oneElementOf 43 | noElementsOf 44 | atLeastOneElementOf 45 | atMostOneElementOf 46 | allElementsOf 47 | inOrderElementsOf 48 | theSameElementsAs 49 | ] 50 | rewriteTokens = { 51 | "⇒": "=>" 52 | "→": "->" 53 | "←": "<-" 54 | } 55 | newlines.afterCurlyLambda = preserve 56 | newlines.implicitParamListModifierPrefer = before 57 | -------------------------------------------------------------------------------- /build.sbt: -------------------------------------------------------------------------------- 1 | import com.typesafe.tools.mima.plugin.MimaKeys.mimaBinaryIssueFilters 2 | 3 | lazy val `akka-persistence-postgres` = project 4 | .in(file(".")) 5 | .enablePlugins(ScalaUnidocPlugin) 6 | .disablePlugins(MimaPlugin) 7 | .aggregate(core, migration) 8 | .settings(publish / skip := true) 9 | 10 | lazy val core = project 11 | .in(file("core")) 12 | .enablePlugins(MimaPlugin) 13 | .settings( 14 | name := "akka-persistence-postgres", 15 | libraryDependencies ++= Dependencies.Libraries, 16 | mimaBinaryIssueFilters ++= Seq()) 17 | 18 | lazy val migration = project 19 | .in(file("migration")) 20 | .disablePlugins(MimaPlugin) 21 | .settings( 22 | name := "akka-persistence-postgres-migration", 23 | libraryDependencies ++= Dependencies.Migration, 24 | Test / parallelExecution := false) 25 | .dependsOn(core) 26 | 27 | TaskKey[Unit]("verifyCodeFmt") := { 28 | scalafmtCheckAll.all(ScopeFilter(inAnyProject)).result.value.toEither.left.foreach { _ => 29 | throw new MessageOnlyException( 30 | "Unformatted Scala code found. Please run 'scalafmtAll' and commit the reformatted code") 31 | } 32 | (Compile / scalafmtSbtCheck).result.value.toEither.left.foreach { _ => 33 | throw new MessageOnlyException( 34 | "Unformatted sbt code found. Please run 'scalafmtSbt' and commit the reformatted code") 35 | } 36 | } 37 | -------------------------------------------------------------------------------- /core/src/main/scala/akka/persistence/postgres/db/DbErrors.scala: -------------------------------------------------------------------------------- 1 | package akka.persistence.postgres.db 2 | 3 | import java.sql.SQLException 4 | 5 | import org.slf4j.Logger 6 | 7 | import scala.concurrent.ExecutionContext 8 | import scala.util.{ Failure, Success } 9 | 10 | object DbErrors { 11 | 12 | import ExtendedPostgresProfile.api._ 13 | 14 | val PgDuplicateTable: String = "42P07" 15 | val PgUniqueViolation: String = "23505" 16 | 17 | def withHandledPartitionErrors(logger: Logger, partitionDetails: String)(dbio: DBIOAction[_, NoStream, Effect])( 18 | implicit ec: ExecutionContext): DBIOAction[Unit, NoStream, Effect] = 19 | dbio.asTry.flatMap { 20 | case Failure(ex: SQLException) if ex.getSQLState == PgDuplicateTable => 21 | logger.debug(s"Partition for $partitionDetails already exists") 22 | DBIO.successful(()) 23 | case Failure(ex) => 24 | logger.error(s"Cannot create partition for $partitionDetails", ex) 25 | DBIO.failed(ex) 26 | case Success(_) => 27 | logger.debug(s"Created missing journal partition for $partitionDetails") 28 | DBIO.successful(()) 29 | } 30 | 31 | def withHandledIndexErrors(logger: Logger, indexDetails: String)(dbio: DBIOAction[_, NoStream, Effect])( 32 | implicit ec: ExecutionContext): DBIOAction[Unit, NoStream, Effect] = 33 | dbio.asTry.flatMap { 34 | case Failure(ex: SQLException) if ex.getSQLState == PgUniqueViolation => 35 | logger.debug(s"Index $indexDetails already exists") 36 | DBIO.successful(()) 37 | case Failure(ex) => 38 | logger.error(s"Cannot create index $indexDetails", ex) 39 | DBIO.failed(ex) 40 | case Success(_) => 41 | logger.debug(s"Created missing index $indexDetails") 42 | DBIO.successful(()) 43 | } 44 | } 45 | -------------------------------------------------------------------------------- /core/src/main/scala/akka/persistence/postgres/db/ExtendedPostgresProfile.scala: -------------------------------------------------------------------------------- 1 | package akka.persistence.postgres.db 2 | 3 | import com.github.tminglei.slickpg._ 4 | import slick.basic.Capability 5 | import slick.jdbc.JdbcCapabilities 6 | 7 | trait ExtendedPostgresProfile 8 | extends ExPostgresProfile 9 | with PgArraySupport 10 | with PgDate2Support 11 | with PgRangeSupport 12 | with PgHStoreSupport 13 | with PgSearchSupport 14 | with PgNetSupport 15 | with PgLTreeSupport 16 | with PgJsonSupport 17 | with array.PgArrayJdbcTypes 18 | with PgCirceJsonSupport { 19 | 20 | def pgjson = "jsonb" // jsonb support is in postgres 9.4.0 onward; for 9.3.x use "json" 21 | 22 | // Add back `capabilities.insertOrUpdate` to enable native `upsert` support; for postgres 9.5+ 23 | override protected def computeCapabilities: Set[Capability] = 24 | super.computeCapabilities + JdbcCapabilities.insertOrUpdate 25 | 26 | override val api = MyAPI 27 | 28 | trait MyAPI 29 | extends API 30 | with ArrayImplicits 31 | with SimpleArrayPlainImplicits 32 | with DateTimeImplicits 33 | with NetImplicits 34 | with LTreeImplicits 35 | with RangeImplicits 36 | with HStoreImplicits 37 | with SearchImplicits 38 | with SearchAssistants 39 | with JsonImplicits { 40 | implicit val strListTypeMapper: DriverJdbcType[List[String]] = new SimpleArrayJdbcType[String]("text").to(_.toList) 41 | } 42 | object MyAPI extends MyAPI 43 | } 44 | 45 | object ExtendedPostgresProfile extends ExtendedPostgresProfile 46 | -------------------------------------------------------------------------------- /core/src/main/scala/akka/persistence/postgres/db/SlickDatabase.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2014 - 2019 Dennis Vriend 3 | * Copyright (C) 2019 - 2020 Lightbend Inc. 4 | */ 5 | 6 | package akka.persistence.postgres.db 7 | 8 | import akka.actor.ActorSystem 9 | import javax.naming.InitialContext 10 | import akka.persistence.postgres.config.SlickConfiguration 11 | import com.typesafe.config.Config 12 | import slick.basic.DatabaseConfig 13 | import slick.jdbc.JdbcProfile 14 | import slick.jdbc.JdbcBackend._ 15 | 16 | /** 17 | * INTERNAL API 18 | */ 19 | @deprecated(message = "Internal API, will be removed in 4.0.0", since = "3.4.0") 20 | object SlickDriver { 21 | 22 | /** 23 | * INTERNAL API 24 | */ 25 | @deprecated(message = "Internal API, will be removed in 4.0.0", since = "3.4.0") 26 | def forDriverName(config: Config): JdbcProfile = 27 | SlickDatabase.profile(config, "slick") 28 | } 29 | 30 | /** 31 | * INTERNAL API 32 | */ 33 | object SlickDatabase { 34 | 35 | /** 36 | * INTERNAL API 37 | */ 38 | private[postgres] def profile(config: Config, path: String): JdbcProfile = 39 | DatabaseConfig.forConfig[JdbcProfile](path, config).profile 40 | 41 | /** 42 | * INTERNAL API 43 | */ 44 | private[postgres] def database(config: Config, slickConfiguration: SlickConfiguration, path: String): Database = { 45 | slickConfiguration.jndiName 46 | .map(Database.forName(_, None)) 47 | .orElse { 48 | slickConfiguration.jndiDbName.map(new InitialContext().lookup(_).asInstanceOf[Database]) 49 | } 50 | .getOrElse(Database.forConfig(path, config)) 51 | } 52 | 53 | /** 54 | * INTERNAL API 55 | */ 56 | private[postgres] def initializeEagerly( 57 | config: Config, 58 | slickConfiguration: SlickConfiguration, 59 | path: String): SlickDatabase = { 60 | val dbPath = if (path.isEmpty) "db" else s"$path.db" 61 | EagerSlickDatabase(database(config, slickConfiguration, dbPath), profile(config, path)) 62 | } 63 | } 64 | 65 | trait SlickDatabase { 66 | def database: Database 67 | def profile: JdbcProfile 68 | 69 | /** 70 | * If true, the requesting side usually a (read/write/snapshot journal) 71 | * should shutdown the database when it closes. If false, it should leave 72 | * the database connection pool open, since it might still be used elsewhere. 73 | */ 74 | def allowShutdown: Boolean 75 | } 76 | 77 | case class EagerSlickDatabase(database: Database, profile: JdbcProfile) extends SlickDatabase { 78 | override def allowShutdown: Boolean = true 79 | } 80 | 81 | /** 82 | * A LazySlickDatabase lazily initializes a database, it also manages the shutdown of the database 83 | * @param config The configuration used to create the database 84 | */ 85 | class LazySlickDatabase(config: Config, system: ActorSystem) extends SlickDatabase { 86 | val profile: JdbcProfile = SlickDatabase.profile(config, path = "") 87 | 88 | lazy val database: Database = { 89 | val db = SlickDatabase.database(config, new SlickConfiguration(config), path = "db") 90 | system.registerOnTermination { 91 | db.close() 92 | } 93 | db 94 | } 95 | 96 | /** This database shutdown is managed by the db holder, so users of this db do not need to bother shutting it down */ 97 | override def allowShutdown: Boolean = false 98 | } 99 | -------------------------------------------------------------------------------- /core/src/main/scala/akka/persistence/postgres/journal/dao/FlatJournalDao.scala: -------------------------------------------------------------------------------- 1 | package akka.persistence.postgres 2 | package journal.dao 3 | 4 | import akka.persistence.postgres.config.JournalConfig 5 | import akka.persistence.postgres.tag.{ CachedTagIdResolver, SimpleTagDao } 6 | import akka.serialization.Serialization 7 | import akka.stream.Materializer 8 | import slick.jdbc.JdbcBackend._ 9 | 10 | import scala.concurrent.ExecutionContext 11 | 12 | class FlatJournalDao(val db: Database, val journalConfig: JournalConfig, serialization: Serialization)( 13 | implicit val ec: ExecutionContext, 14 | val mat: Materializer) 15 | extends BaseByteArrayJournalDao { 16 | val queries = new JournalQueries(FlatJournalTable(journalConfig.journalTableConfiguration)) 17 | val tagDao = new SimpleTagDao(db, journalConfig.tagsTableConfiguration) 18 | val eventTagConverter = new CachedTagIdResolver(tagDao, journalConfig.tagsConfig) 19 | val serializer = new ByteArrayJournalSerializer(serialization, eventTagConverter) 20 | } 21 | -------------------------------------------------------------------------------- /core/src/main/scala/akka/persistence/postgres/journal/dao/FlowControl.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2014 - 2019 Dennis Vriend 3 | * Copyright (C) 2019 - 2020 Lightbend Inc. 4 | */ 5 | 6 | package akka.persistence.postgres.journal.dao 7 | 8 | private[postgres] sealed trait FlowControl 9 | 10 | private[postgres] object FlowControl { 11 | 12 | /** Keep querying - used when we are sure that there is more events to fetch */ 13 | case object Continue extends FlowControl 14 | 15 | /** 16 | * Keep querying with delay - used when we have consumed all events, 17 | * but want to poll for future events 18 | */ 19 | case object ContinueDelayed extends FlowControl 20 | 21 | /** Stop querying - used when we reach the desired offset */ 22 | case object Stop extends FlowControl 23 | } 24 | -------------------------------------------------------------------------------- /core/src/main/scala/akka/persistence/postgres/journal/dao/JournalDao.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2014 - 2019 Dennis Vriend 3 | * Copyright (C) 2019 - 2020 Lightbend Inc. 4 | */ 5 | 6 | package akka.persistence.postgres.journal.dao 7 | 8 | import akka.persistence.AtomicWrite 9 | import scala.collection.immutable.Seq 10 | import scala.concurrent.Future 11 | import scala.util.Try 12 | 13 | trait JournalDao extends JournalDaoWithReadMessages { 14 | 15 | /** 16 | * Deletes all persistent messages up to toSequenceNr (inclusive) for the persistenceId 17 | */ 18 | def delete(persistenceId: String, toSequenceNr: Long): Future[Unit] 19 | 20 | /** 21 | * Returns the highest sequence number for the events that are stored for that `persistenceId`. When no events are 22 | * found for the `persistenceId`, 0L will be the highest sequence number 23 | */ 24 | def highestSequenceNr(persistenceId: String, fromSequenceNr: Long): Future[Long] 25 | 26 | /** 27 | * @see [[akka.persistence.journal.AsyncWriteJournal.asyncWriteMessages(messages)]] 28 | */ 29 | def asyncWriteMessages(messages: Seq[AtomicWrite]): Future[Seq[Try[Unit]]] 30 | } 31 | -------------------------------------------------------------------------------- /core/src/main/scala/akka/persistence/postgres/journal/dao/JournalDaoWithReadMessages.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2014 - 2019 Dennis Vriend 3 | * Copyright (C) 2019 - 2020 Lightbend Inc. 4 | */ 5 | 6 | package akka.persistence.postgres.journal.dao 7 | 8 | import scala.concurrent.duration.FiniteDuration 9 | import scala.util.Try 10 | 11 | import akka.NotUsed 12 | import akka.actor.Scheduler 13 | import akka.persistence.PersistentRepr 14 | import akka.stream.scaladsl.Source 15 | 16 | trait JournalDaoWithReadMessages { 17 | 18 | /** 19 | * Returns a Source of PersistentRepr and ordering number for a certain persistenceId. 20 | * It includes the events with sequenceNr between `fromSequenceNr` (inclusive) and 21 | * `toSequenceNr` (inclusive). 22 | */ 23 | def messages( 24 | persistenceId: String, 25 | fromSequenceNr: Long, 26 | toSequenceNr: Long, 27 | max: Long): Source[Try[(PersistentRepr, Long)], NotUsed] 28 | 29 | /** 30 | * Returns a Source of PersistentRepr and ordering number for a certain persistenceId. 31 | * It includes the events with sequenceNr between `fromSequenceNr` (inclusive) and 32 | * `toSequenceNr` (inclusive). 33 | */ 34 | def messagesWithBatch( 35 | persistenceId: String, 36 | fromSequenceNr: Long, 37 | toSequenceNr: Long, 38 | batchSize: Int, 39 | refreshInterval: Option[(FiniteDuration, Scheduler)]): Source[Try[(PersistentRepr, Long)], NotUsed] 40 | 41 | } 42 | -------------------------------------------------------------------------------- /core/src/main/scala/akka/persistence/postgres/journal/dao/JournalDaoWithUpdates.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2014 - 2019 Dennis Vriend 3 | * Copyright (C) 2019 - 2020 Lightbend Inc. 4 | */ 5 | 6 | package akka.persistence.postgres.journal.dao 7 | 8 | import akka.Done 9 | 10 | import scala.concurrent.Future 11 | 12 | /** 13 | * A [[JournalDao]] with extended capabilities, such as updating payloads and tags of existing events. 14 | * These operations should be used sparingly, for example for migrating data from un-encrypted to encrypted formats 15 | */ 16 | trait JournalDaoWithUpdates extends JournalDao { 17 | 18 | /** 19 | * Update (!) an existing event with the passed in data. 20 | */ 21 | def update(persistenceId: String, sequenceNr: Long, payload: AnyRef): Future[Done] 22 | } 23 | -------------------------------------------------------------------------------- /core/src/main/scala/akka/persistence/postgres/journal/dao/JournalMetadataQueries.scala: -------------------------------------------------------------------------------- 1 | package akka.persistence.postgres.journal.dao 2 | 3 | import slick.lifted.TableQuery 4 | 5 | class JournalMetadataQueries(journalMetadataTable: TableQuery[JournalMetadataTable]) { 6 | import akka.persistence.postgres.db.ExtendedPostgresProfile.api._ 7 | 8 | private def _highestSequenceNrForPersistenceId(persistenceId: Rep[String]): Query[Rep[Long], Long, Seq] = { 9 | journalMetadataTable.filter(_.persistenceId === persistenceId).map(_.maxSequenceNumber).take(1) 10 | } 11 | 12 | val highestSequenceNrForPersistenceId = Compiled(_highestSequenceNrForPersistenceId _) 13 | 14 | private def _minAndMaxOrderingForPersistenceId( 15 | persistenceId: Rep[String]): Query[(Rep[Long], Rep[Long]), (Long, Long), Seq] = 16 | journalMetadataTable.filter(_.persistenceId === persistenceId).take(1).map(r => (r.minOrdering, r.maxOrdering)) 17 | 18 | val minAndMaxOrderingForPersistenceId = Compiled(_minAndMaxOrderingForPersistenceId _) 19 | } 20 | -------------------------------------------------------------------------------- /core/src/main/scala/akka/persistence/postgres/journal/dao/package.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2014 - 2019 Dennis Vriend 3 | * Copyright (C) 2019 - 2020 Lightbend Inc. 4 | */ 5 | 6 | package akka.persistence.postgres.journal 7 | 8 | import scala.collection.immutable.Set 9 | 10 | package object dao { 11 | def encodeTags(tags: Set[String], separator: String): Option[String] = 12 | if (tags.isEmpty) None else Option(tags.mkString(separator)) 13 | 14 | def decodeTags(tags: Option[String], separator: String): Set[String] = 15 | tags.map(_.split(separator).toSet).getOrElse(Set.empty[String]) 16 | } 17 | -------------------------------------------------------------------------------- /core/src/main/scala/akka/persistence/postgres/package.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2014 - 2019 Dennis Vriend 3 | * Copyright (C) 2019 - 2020 Lightbend Inc. 4 | */ 5 | 6 | package akka.persistence 7 | 8 | import io.circe.Json 9 | 10 | package object postgres { 11 | final case class JournalRow( 12 | ordering: Long, 13 | deleted: Boolean, 14 | persistenceId: String, 15 | sequenceNumber: Long, 16 | message: Array[Byte], 17 | tags: List[Int], 18 | metadata: Json) 19 | 20 | final case class JournalMetadataRow( 21 | id: Long, 22 | persistenceId: String, 23 | maxSequenceNumber: Long, 24 | minOrdering: Long, 25 | maxOrdering: Long) 26 | } 27 | -------------------------------------------------------------------------------- /core/src/main/scala/akka/persistence/postgres/query/PostgresReadJournalProvider.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2014 - 2019 Dennis Vriend 3 | * Copyright (C) 2019 - 2020 Lightbend Inc. 4 | */ 5 | 6 | package akka.persistence.postgres.query 7 | 8 | import akka.actor.ExtendedActorSystem 9 | import akka.persistence.query.ReadJournalProvider 10 | import com.typesafe.config.Config 11 | 12 | class PostgresReadJournalProvider(system: ExtendedActorSystem, config: Config, configPath: String) 13 | extends ReadJournalProvider { 14 | override val scaladslReadJournal = new scaladsl.PostgresReadJournal(config, configPath)(system) 15 | 16 | override val javadslReadJournal = new javadsl.PostgresReadJournal(scaladslReadJournal) 17 | } 18 | -------------------------------------------------------------------------------- /core/src/main/scala/akka/persistence/postgres/query/dao/BaseByteArrayReadJournalDao.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2014 - 2019 Dennis Vriend 3 | * Copyright (C) 2019 - 2020 Lightbend Inc. 4 | */ 5 | 6 | package akka.persistence.postgres 7 | package query.dao 8 | 9 | import akka.NotUsed 10 | import akka.persistence.PersistentRepr 11 | import akka.persistence.postgres.config.ReadJournalConfig 12 | import akka.persistence.postgres.journal.dao.{ 13 | BaseJournalDaoWithReadMessages, 14 | ByteArrayJournalSerializer, 15 | JournalMetadataTable 16 | } 17 | import akka.persistence.postgres.serialization.FlowPersistentReprSerializer 18 | import akka.persistence.postgres.tag.{ CachedTagIdResolver, SimpleTagDao, TagIdResolver } 19 | import akka.serialization.Serialization 20 | import akka.stream.Materializer 21 | import akka.stream.scaladsl.Source 22 | import slick.basic.DatabasePublisher 23 | import slick.jdbc.JdbcBackend._ 24 | 25 | import scala.collection.immutable._ 26 | import scala.concurrent.{ ExecutionContext, Future } 27 | import scala.util.Try 28 | 29 | trait BaseByteArrayReadJournalDao extends ReadJournalDao with BaseJournalDaoWithReadMessages { 30 | def db: Database 31 | def queries: ReadJournalQueries 32 | def serializer: FlowPersistentReprSerializer[JournalRow] 33 | def tagIdResolver: TagIdResolver 34 | def readJournalConfig: ReadJournalConfig 35 | 36 | import akka.persistence.postgres.db.ExtendedPostgresProfile.api._ 37 | 38 | override def allPersistenceIdsSource(max: Long): Source[String, NotUsed] = 39 | Source.fromPublisher(db.stream(queries.allPersistenceIdsDistinct(max).result)) 40 | 41 | override def eventsByTag( 42 | tag: String, 43 | offset: Long, 44 | maxOffset: Long, 45 | max: Long): Source[Try[(PersistentRepr, Long)], NotUsed] = { 46 | val publisher: Int => DatabasePublisher[JournalRow] = tagId => 47 | db.stream(queries.eventsByTag(List(tagId), offset, maxOffset).result) 48 | Source 49 | .future(tagIdResolver.lookupIdFor(tag)) 50 | .flatMapConcat(_.fold(Source.empty[JournalRow])(tagId => Source.fromPublisher(publisher(tagId)))) 51 | .via(serializer.deserializeFlow) 52 | } 53 | 54 | override def messages( 55 | persistenceId: String, 56 | fromSequenceNr: Long, 57 | toSequenceNr: Long, 58 | max: Long): Source[Try[(PersistentRepr, Long)], NotUsed] = 59 | Source 60 | .fromPublisher(db.stream(queries.messagesQuery(persistenceId, fromSequenceNr, toSequenceNr, max).result)) 61 | .via(serializer.deserializeFlow) 62 | 63 | override def journalSequence(offset: Long, limit: Long): Source[Long, NotUsed] = 64 | Source.fromPublisher(db.stream(queries.orderingByOrdering(offset, limit).result)) 65 | 66 | override def maxJournalSequence(): Future[Long] = { 67 | db.run(queries.maxOrdering.result) 68 | } 69 | } 70 | -------------------------------------------------------------------------------- /core/src/main/scala/akka/persistence/postgres/query/dao/FlatReadJournalDao.scala: -------------------------------------------------------------------------------- 1 | package akka.persistence.postgres.query.dao 2 | 3 | import akka.persistence.postgres.config.ReadJournalConfig 4 | import akka.persistence.postgres.journal.dao.{ ByteArrayJournalSerializer, FlatJournalTable } 5 | import akka.persistence.postgres.tag.{ CachedTagIdResolver, SimpleTagDao, TagIdResolver } 6 | import akka.serialization.Serialization 7 | import akka.stream.Materializer 8 | import slick.jdbc.JdbcBackend.Database 9 | 10 | import scala.concurrent.ExecutionContext 11 | 12 | class FlatReadJournalDao( 13 | val db: Database, 14 | val readJournalConfig: ReadJournalConfig, 15 | serialization: Serialization, 16 | val tagIdResolver: TagIdResolver)(implicit val ec: ExecutionContext, val mat: Materializer) 17 | extends BaseByteArrayReadJournalDao { 18 | val queries = new ReadJournalQueries( 19 | FlatJournalTable(readJournalConfig.journalTableConfiguration), 20 | readJournalConfig.includeDeleted) 21 | val serializer = new ByteArrayJournalSerializer( 22 | serialization, 23 | new CachedTagIdResolver( 24 | new SimpleTagDao(db, readJournalConfig.tagsTableConfiguration), 25 | readJournalConfig.tagsConfig)) 26 | } 27 | -------------------------------------------------------------------------------- /core/src/main/scala/akka/persistence/postgres/query/dao/PartitionedReadJournalDao.scala: -------------------------------------------------------------------------------- 1 | package akka.persistence.postgres.query.dao 2 | 3 | import akka.NotUsed 4 | import akka.persistence.PersistentRepr 5 | import akka.persistence.postgres.config.ReadJournalConfig 6 | import akka.persistence.postgres.journal.dao.{ 7 | ByteArrayJournalSerializer, 8 | JournalMetadataTable, 9 | PartitionedJournalTable 10 | } 11 | import akka.persistence.postgres.tag.{ CachedTagIdResolver, SimpleTagDao, TagIdResolver } 12 | import akka.serialization.Serialization 13 | import akka.stream.Materializer 14 | import akka.stream.scaladsl.Source 15 | import slick.jdbc.JdbcBackend.Database 16 | 17 | import scala.concurrent.ExecutionContext 18 | import scala.util.Try 19 | 20 | class PartitionedReadJournalDao( 21 | val db: Database, 22 | val readJournalConfig: ReadJournalConfig, 23 | serialization: Serialization, 24 | val tagIdResolver: TagIdResolver)(implicit val ec: ExecutionContext, val mat: Materializer) 25 | extends BaseByteArrayReadJournalDao { 26 | 27 | import akka.persistence.postgres.db.ExtendedPostgresProfile.api._ 28 | 29 | val queries = new ReadJournalQueries( 30 | PartitionedJournalTable(readJournalConfig.journalTableConfiguration), 31 | readJournalConfig.includeDeleted) 32 | private val metadataQueries: ReadJournalMetadataQueries = new ReadJournalMetadataQueries( 33 | JournalMetadataTable(readJournalConfig.journalMetadataTableConfiguration)) 34 | 35 | val serializer = new ByteArrayJournalSerializer( 36 | serialization, 37 | new CachedTagIdResolver( 38 | new SimpleTagDao(db, readJournalConfig.tagsTableConfiguration), 39 | readJournalConfig.tagsConfig)) 40 | 41 | override def messages( 42 | persistenceId: String, 43 | fromSequenceNr: Long, 44 | toSequenceNr: Long, 45 | max: Long): Source[Try[(PersistentRepr, Long)], NotUsed] = { 46 | // This behaviour override is only applied here, because it is only useful on the PartitionedJournal strategy. 47 | val query = if (readJournalConfig.useJournalMetadata) { 48 | metadataQueries.minAndMaxOrderingForPersistenceId(persistenceId).result.headOption.flatMap { 49 | case Some((minOrdering, _)) => 50 | // if journal_metadata knows the min ordering of a persistenceId, 51 | // use it to help the query planner to avoid scanning unnecessary partitions. 52 | queries.messagesMinOrderingBoundedQuery(persistenceId, fromSequenceNr, toSequenceNr, max, minOrdering).result 53 | case None => 54 | // fallback to standard behaviour 55 | queries.messagesQuery(persistenceId, fromSequenceNr, toSequenceNr, max).result 56 | } 57 | } else 58 | queries.messagesQuery(persistenceId, fromSequenceNr, toSequenceNr, max).result 59 | 60 | Source.fromPublisher(db.stream(query)).via(serializer.deserializeFlow) 61 | } 62 | } 63 | -------------------------------------------------------------------------------- /core/src/main/scala/akka/persistence/postgres/query/dao/ReadJournalDao.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2014 - 2019 Dennis Vriend 3 | * Copyright (C) 2019 - 2020 Lightbend Inc. 4 | */ 5 | 6 | package akka.persistence.postgres 7 | package query.dao 8 | 9 | import akka.NotUsed 10 | import akka.persistence.PersistentRepr 11 | import akka.persistence.postgres.journal.dao.JournalDaoWithReadMessages 12 | import akka.stream.scaladsl.Source 13 | 14 | import scala.concurrent.Future 15 | import scala.util.Try 16 | 17 | trait ReadJournalDao extends JournalDaoWithReadMessages { 18 | 19 | /** 20 | * Returns distinct stream of persistenceIds 21 | */ 22 | def allPersistenceIdsSource(max: Long): Source[String, NotUsed] 23 | 24 | /** 25 | * Returns a Source of deserialized data for certain tag from an offset. The result is sorted by 26 | * the global ordering of the events. 27 | * Each element with be a try with a PersistentRepr, set of tags, and a Long representing the global ordering of events 28 | */ 29 | def eventsByTag(tag: String, offset: Long, maxOffset: Long, max: Long): Source[Try[(PersistentRepr, Long)], NotUsed] 30 | 31 | /** 32 | * @param offset Minimum value to retrieve 33 | * @param limit Maximum number of values to retrieve 34 | * @return A Source of journal event sequence numbers (corresponding to the Ordering column) 35 | */ 36 | def journalSequence(offset: Long, limit: Long): Source[Long, NotUsed] 37 | 38 | /** 39 | * @return The value of the maximum (ordering) id in the journal 40 | */ 41 | def maxJournalSequence(): Future[Long] 42 | } 43 | -------------------------------------------------------------------------------- /core/src/main/scala/akka/persistence/postgres/query/dao/ReadJournalMetadataQueries.scala: -------------------------------------------------------------------------------- 1 | package akka.persistence.postgres.query.dao 2 | 3 | import akka.persistence.postgres.journal.dao.JournalMetadataTable 4 | import slick.lifted.TableQuery 5 | 6 | class ReadJournalMetadataQueries(journalMetadataTable: TableQuery[JournalMetadataTable]) { 7 | 8 | import akka.persistence.postgres.db.ExtendedPostgresProfile.api._ 9 | 10 | private def _minAndMaxOrderingForPersistenceId( 11 | persistenceId: Rep[String]): Query[(Rep[Long], Rep[Long]), (Long, Long), Seq] = 12 | journalMetadataTable.filter(_.persistenceId === persistenceId).take(1).map(r => (r.minOrdering, r.maxOrdering)) 13 | 14 | val minAndMaxOrderingForPersistenceId = Compiled(_minAndMaxOrderingForPersistenceId _) 15 | } 16 | -------------------------------------------------------------------------------- /core/src/main/scala/akka/persistence/postgres/query/dao/ReadJournalQueries.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2014 - 2019 Dennis Vriend 3 | * Copyright (C) 2019 - 2020 Lightbend Inc. 4 | */ 5 | 6 | package akka.persistence.postgres 7 | package query.dao 8 | 9 | import akka.persistence.postgres.journal.dao.JournalTable 10 | import slick.lifted.TableQuery 11 | 12 | class ReadJournalQueries(journalTable: TableQuery[JournalTable], includeDeleted: Boolean) { 13 | import akka.persistence.postgres.db.ExtendedPostgresProfile.api._ 14 | 15 | private def _allPersistenceIdsDistinct(max: ConstColumn[Long]): Query[Rep[String], String, Seq] = 16 | baseTableQuery().map(_.persistenceId).distinct.take(max) 17 | 18 | private def baseTableQuery() = 19 | if (includeDeleted) journalTable 20 | else journalTable.filter(_.deleted === false) 21 | 22 | val allPersistenceIdsDistinct = Compiled(_allPersistenceIdsDistinct _) 23 | 24 | private def _messagesQuery( 25 | persistenceId: Rep[String], 26 | fromSequenceNr: Rep[Long], 27 | toSequenceNr: Rep[Long], 28 | max: ConstColumn[Long]): Query[JournalTable, JournalRow, Seq] = 29 | baseTableQuery() 30 | .filter(_.persistenceId === persistenceId) 31 | .filter(_.sequenceNumber >= fromSequenceNr) 32 | .filter(_.sequenceNumber <= toSequenceNr) 33 | .sortBy(_.sequenceNumber.asc) 34 | .take(max) 35 | 36 | private def _messagesMinOrderingBoundedQuery( 37 | persistenceId: Rep[String], 38 | fromSequenceNr: Rep[Long], 39 | toSequenceNr: Rep[Long], 40 | max: ConstColumn[Long], 41 | minOrdering: Rep[Long]): Query[JournalTable, JournalRow, Seq] = 42 | baseTableQuery() 43 | .filter(_.persistenceId === persistenceId) 44 | .filter(_.sequenceNumber >= fromSequenceNr) 45 | .filter(_.sequenceNumber <= toSequenceNr) 46 | .filter(_.ordering >= minOrdering) 47 | .sortBy(_.sequenceNumber.asc) 48 | .take(max) 49 | 50 | val messagesQuery = Compiled(_messagesQuery _) 51 | 52 | val messagesMinOrderingBoundedQuery = Compiled(_messagesMinOrderingBoundedQuery _) 53 | 54 | protected def _eventsByTag( 55 | tag: Rep[List[Int]], 56 | offset: ConstColumn[Long], 57 | maxOffset: ConstColumn[Long]): Query[JournalTable, JournalRow, Seq] = { 58 | baseTableQuery() 59 | .filter(_.tags @> tag) 60 | .sortBy(_.ordering.asc) 61 | .filter(row => row.ordering > offset && row.ordering <= maxOffset) 62 | } 63 | 64 | val eventsByTag = Compiled(_eventsByTag _) 65 | 66 | private def _journalSequenceQuery(from: ConstColumn[Long], limit: ConstColumn[Long]) = 67 | journalTable.filter(_.ordering > from).map(_.ordering).sorted.take(limit) 68 | 69 | val orderingByOrdering = Compiled(_journalSequenceQuery _) 70 | 71 | val maxOrdering = Compiled { 72 | journalTable.map(_.ordering).max.getOrElse(0L) 73 | } 74 | } 75 | -------------------------------------------------------------------------------- /core/src/main/scala/akka/persistence/postgres/query/package.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2014 - 2019 Dennis Vriend 3 | * Copyright (C) 2019 - 2020 Lightbend Inc. 4 | */ 5 | 6 | package akka.persistence.postgres 7 | 8 | import akka.NotUsed 9 | import akka.persistence.query._ 10 | import akka.stream.scaladsl.Source 11 | import scala.language.implicitConversions 12 | 13 | package object query { 14 | implicit class OffsetOps(val that: Offset) extends AnyVal { 15 | def value = that match { 16 | case Sequence(offsetValue) => offsetValue 17 | case NoOffset => 0L 18 | case _ => 19 | throw new IllegalArgumentException( 20 | "akka-persistence-postgres does not support " + that.getClass.getName + " offsets") 21 | } 22 | } 23 | } 24 | -------------------------------------------------------------------------------- /core/src/main/scala/akka/persistence/postgres/serialization/PersistentReprSerializer.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2014 - 2019 Dennis Vriend 3 | * Copyright (C) 2019 - 2020 Lightbend Inc. 4 | */ 5 | 6 | package akka.persistence.postgres.serialization 7 | 8 | import akka.NotUsed 9 | import akka.persistence.journal.Tagged 10 | import akka.persistence.{ AtomicWrite, PersistentRepr } 11 | import akka.stream.scaladsl.Flow 12 | 13 | import scala.collection.immutable._ 14 | import scala.concurrent.{ ExecutionContext, Future } 15 | import scala.util.Try 16 | 17 | trait PersistentReprSerializer[T] { 18 | 19 | implicit def executionContext: ExecutionContext 20 | 21 | /** 22 | * An akka.persistence.AtomicWrite contains a Sequence of events (with metadata, the PersistentRepr) 23 | * that must all be persisted or all fail, what makes the operation atomic. The function converts 24 | * each AtomicWrite to a Future[Seq[T]]. 25 | * The Try denotes whether there was a problem with the AtomicWrite or not. 26 | */ 27 | def serialize(messages: Seq[AtomicWrite]): Seq[Future[Seq[T]]] = { 28 | messages.map { atomicWrite => 29 | val serialized = atomicWrite.payload.map(serialize) 30 | Future.sequence(serialized) 31 | } 32 | } 33 | 34 | def serialize(persistentRepr: PersistentRepr): Future[T] = persistentRepr.payload match { 35 | case Tagged(payload, tags) => 36 | serialize(persistentRepr.withPayload(payload), tags) 37 | case _ => serialize(persistentRepr, Set.empty[String]) 38 | } 39 | 40 | def serialize(persistentRepr: PersistentRepr, tags: Set[String]): Future[T] 41 | 42 | /** 43 | * deserialize into a PersistentRepr, a set of tags and a Long representing the global ordering of events 44 | */ 45 | def deserialize(t: T): Try[(PersistentRepr, Long)] 46 | } 47 | 48 | trait FlowPersistentReprSerializer[T] extends PersistentReprSerializer[T] { 49 | 50 | /** 51 | * A flow which deserializes each element into a PersistentRepr, 52 | * a set of tags and a Long representing the global ordering of events 53 | */ 54 | def deserializeFlow: Flow[T, Try[(PersistentRepr, Long)], NotUsed] = { 55 | Flow[T].map(deserialize) 56 | } 57 | 58 | } 59 | -------------------------------------------------------------------------------- /core/src/main/scala/akka/persistence/postgres/serialization/SnapshotSerializer.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2014 - 2019 Dennis Vriend 3 | * Copyright (C) 2019 - 2020 Lightbend Inc. 4 | */ 5 | 6 | package akka.persistence.postgres.serialization 7 | 8 | import akka.persistence.SnapshotMetadata 9 | 10 | import scala.util.Try 11 | 12 | trait SnapshotSerializer[T] { 13 | def serialize(metadata: SnapshotMetadata, snapshot: Any): Try[T] 14 | 15 | def deserialize(t: T): Try[(SnapshotMetadata, Any)] 16 | } 17 | -------------------------------------------------------------------------------- /core/src/main/scala/akka/persistence/postgres/snapshot/dao/ByteArraySnapshotSerializer.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2014 - 2019 Dennis Vriend 3 | * Copyright (C) 2019 - 2020 Lightbend Inc. 4 | */ 5 | 6 | package akka.persistence.postgres.snapshot.dao 7 | 8 | import akka.persistence.SnapshotMetadata 9 | import akka.persistence.postgres.serialization.SnapshotSerializer 10 | import akka.persistence.postgres.snapshot.dao.ByteArraySnapshotSerializer.Metadata 11 | import akka.persistence.postgres.snapshot.dao.SnapshotTables.SnapshotRow 12 | import akka.serialization.{ Serialization, Serializers } 13 | import io.circe.{ Decoder, Encoder } 14 | 15 | import scala.util.Try 16 | 17 | class ByteArraySnapshotSerializer(serialization: Serialization) extends SnapshotSerializer[SnapshotRow] { 18 | 19 | def serialize(metadata: SnapshotMetadata, snapshot: Any): Try[SnapshotRow] = { 20 | import io.circe.syntax._ 21 | val payload = snapshot.asInstanceOf[AnyRef] 22 | for { 23 | ser <- Try(serialization.findSerializerFor(payload)) 24 | serializedSnapshot <- serialization.serialize(payload) 25 | } yield { 26 | val metadataJson = 27 | Metadata(ser.identifier, Option(Serializers.manifestFor(ser, payload)).filterNot(_.trim.isEmpty)) 28 | SnapshotRow( 29 | metadata.persistenceId, 30 | metadata.sequenceNr, 31 | metadata.timestamp, 32 | serializedSnapshot, 33 | metadataJson.asJson) 34 | } 35 | } 36 | 37 | def deserialize(snapshotRow: SnapshotRow): Try[(SnapshotMetadata, Any)] = { 38 | for { 39 | metadata <- snapshotRow.metadata.as[Metadata].toTry 40 | snapshot <- serialization.deserialize(snapshotRow.snapshot, metadata.serId, metadata.serManifest.getOrElse("")) 41 | } yield { 42 | val snapshotMetadata = 43 | SnapshotMetadata(snapshotRow.persistenceId, snapshotRow.sequenceNumber, snapshotRow.created) 44 | (snapshotMetadata, snapshot) 45 | } 46 | } 47 | } 48 | 49 | object ByteArraySnapshotSerializer { 50 | case class Metadata(serId: Int, serManifest: Option[String]) 51 | 52 | object Metadata { 53 | implicit val encoder: Encoder[Metadata] = Encoder 54 | .forProduct2[Metadata, Int, Option[String]]("sid", "sm")(m => (m.serId, m.serManifest)) 55 | .mapJson(_.dropNullValues) 56 | implicit val decoder: Decoder[Metadata] = 57 | Decoder.forProduct2("sid", "sm")(Metadata.apply).or(Decoder.forProduct2("serId", "serManifest")(Metadata.apply)) 58 | } 59 | } 60 | -------------------------------------------------------------------------------- /core/src/main/scala/akka/persistence/postgres/snapshot/dao/SnapshotDao.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2014 - 2019 Dennis Vriend 3 | * Copyright (C) 2019 - 2020 Lightbend Inc. 4 | */ 5 | 6 | package akka.persistence.postgres.snapshot.dao 7 | 8 | import akka.persistence.SnapshotMetadata 9 | 10 | import scala.concurrent.Future 11 | 12 | trait SnapshotDao { 13 | def deleteAllSnapshots(persistenceId: String): Future[Unit] 14 | 15 | def deleteUpToMaxSequenceNr(persistenceId: String, maxSequenceNr: Long): Future[Unit] 16 | 17 | def deleteUpToMaxTimestamp(persistenceId: String, maxTimestamp: Long): Future[Unit] 18 | 19 | def deleteUpToMaxSequenceNrAndMaxTimestamp( 20 | persistenceId: String, 21 | maxSequenceNr: Long, 22 | maxTimestamp: Long): Future[Unit] 23 | 24 | def latestSnapshot(persistenceId: String): Future[Option[(SnapshotMetadata, Any)]] 25 | 26 | def snapshotForMaxTimestamp(persistenceId: String, timestamp: Long): Future[Option[(SnapshotMetadata, Any)]] 27 | 28 | def snapshotForMaxSequenceNr(persistenceId: String, sequenceNr: Long): Future[Option[(SnapshotMetadata, Any)]] 29 | 30 | def snapshotForMaxSequenceNrAndMaxTimestamp( 31 | persistenceId: String, 32 | sequenceNr: Long, 33 | timestamp: Long): Future[Option[(SnapshotMetadata, Any)]] 34 | 35 | def delete(persistenceId: String, sequenceNr: Long): Future[Unit] 36 | 37 | def save(snapshotMetadata: SnapshotMetadata, snapshot: Any): Future[Unit] 38 | } 39 | -------------------------------------------------------------------------------- /core/src/main/scala/akka/persistence/postgres/snapshot/dao/SnapshotQueries.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2014 - 2019 Dennis Vriend 3 | * Copyright (C) 2019 - 2020 Lightbend Inc. 4 | */ 5 | 6 | package akka.persistence.postgres.snapshot.dao 7 | 8 | import akka.persistence.postgres.config.SnapshotTableConfiguration 9 | import akka.persistence.postgres.snapshot.dao.SnapshotTables.SnapshotRow 10 | 11 | class SnapshotQueries(override val snapshotTableCfg: SnapshotTableConfiguration) extends SnapshotTables { 12 | import akka.persistence.postgres.db.ExtendedPostgresProfile.api._ 13 | 14 | private val SnapshotTableC = Compiled(SnapshotTable) 15 | 16 | def insertOrUpdate(snapshotRow: SnapshotRow) = 17 | SnapshotTableC.insertOrUpdate(snapshotRow) 18 | 19 | private def _selectAllByPersistenceId(persistenceId: Rep[String]) = 20 | SnapshotTable.filter(_.persistenceId === persistenceId).sortBy(_.sequenceNumber.desc) 21 | val selectAllByPersistenceId = Compiled(_selectAllByPersistenceId _) 22 | 23 | private def _selectLatestByPersistenceId(persistenceId: Rep[String]) = 24 | _selectAllByPersistenceId(persistenceId).take(1) 25 | val selectLatestByPersistenceId = Compiled(_selectLatestByPersistenceId _) 26 | 27 | private def _selectByPersistenceIdAndSequenceNr(persistenceId: Rep[String], sequenceNr: Rep[Long]) = 28 | _selectAllByPersistenceId(persistenceId).filter(_.sequenceNumber === sequenceNr) 29 | val selectByPersistenceIdAndSequenceNr = Compiled(_selectByPersistenceIdAndSequenceNr _) 30 | 31 | private def _selectByPersistenceIdUpToMaxTimestamp(persistenceId: Rep[String], maxTimestamp: Rep[Long]) = 32 | _selectAllByPersistenceId(persistenceId).filter(_.created <= maxTimestamp) 33 | val selectByPersistenceIdUpToMaxTimestamp = Compiled(_selectByPersistenceIdUpToMaxTimestamp _) 34 | 35 | private def _selectByPersistenceIdUpToMaxSequenceNr(persistenceId: Rep[String], maxSequenceNr: Rep[Long]) = 36 | _selectAllByPersistenceId(persistenceId).filter(_.sequenceNumber <= maxSequenceNr) 37 | val selectByPersistenceIdUpToMaxSequenceNr = Compiled(_selectByPersistenceIdUpToMaxSequenceNr _) 38 | 39 | private def _selectByPersistenceIdUpToMaxSequenceNrAndMaxTimestamp( 40 | persistenceId: Rep[String], 41 | maxSequenceNr: Rep[Long], 42 | maxTimestamp: Rep[Long]) = 43 | _selectByPersistenceIdUpToMaxSequenceNr(persistenceId, maxSequenceNr).filter(_.created <= maxTimestamp) 44 | val selectByPersistenceIdUpToMaxSequenceNrAndMaxTimestamp = Compiled( 45 | _selectByPersistenceIdUpToMaxSequenceNrAndMaxTimestamp _) 46 | 47 | private def _selectOneByPersistenceIdAndMaxTimestamp(persistenceId: Rep[String], maxTimestamp: Rep[Long]) = 48 | _selectAllByPersistenceId(persistenceId).filter(_.created <= maxTimestamp).take(1) 49 | val selectOneByPersistenceIdAndMaxTimestamp = Compiled(_selectOneByPersistenceIdAndMaxTimestamp _) 50 | 51 | private def _selectOneByPersistenceIdAndMaxSequenceNr(persistenceId: Rep[String], maxSequenceNr: Rep[Long]) = 52 | _selectAllByPersistenceId(persistenceId).filter(_.sequenceNumber <= maxSequenceNr).take(1) 53 | val selectOneByPersistenceIdAndMaxSequenceNr = Compiled(_selectOneByPersistenceIdAndMaxSequenceNr _) 54 | 55 | private def _selectOneByPersistenceIdAndMaxSequenceNrAndMaxTimestamp( 56 | persistenceId: Rep[String], 57 | maxSequenceNr: Rep[Long], 58 | maxTimestamp: Rep[Long]) = 59 | _selectByPersistenceIdUpToMaxSequenceNr(persistenceId, maxSequenceNr).filter(_.created <= maxTimestamp).take(1) 60 | val selectOneByPersistenceIdAndMaxSequenceNrAndMaxTimestamp = Compiled( 61 | _selectOneByPersistenceIdAndMaxSequenceNrAndMaxTimestamp _) 62 | } 63 | -------------------------------------------------------------------------------- /core/src/main/scala/akka/persistence/postgres/snapshot/dao/SnapshotTables.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2014 - 2019 Dennis Vriend 3 | * Copyright (C) 2019 - 2020 Lightbend Inc. 4 | */ 5 | 6 | package akka.persistence.postgres.snapshot.dao 7 | 8 | import akka.persistence.postgres.config.SnapshotTableConfiguration 9 | import akka.persistence.postgres.snapshot.dao.SnapshotTables._ 10 | import io.circe.Json 11 | 12 | object SnapshotTables { 13 | case class SnapshotRow( 14 | persistenceId: String, 15 | sequenceNumber: Long, 16 | created: Long, 17 | snapshot: Array[Byte], 18 | metadata: Json) 19 | } 20 | 21 | trait SnapshotTables { 22 | import akka.persistence.postgres.db.ExtendedPostgresProfile.api._ 23 | 24 | def snapshotTableCfg: SnapshotTableConfiguration 25 | 26 | class Snapshot(_tableTag: Tag) 27 | extends Table[SnapshotRow]( 28 | _tableTag, 29 | _schemaName = snapshotTableCfg.schemaName, 30 | _tableName = snapshotTableCfg.tableName) { 31 | def * = (persistenceId, sequenceNumber, created, snapshot, metadata) <> (SnapshotRow.tupled, SnapshotRow.unapply) 32 | 33 | val persistenceId: Rep[String] = 34 | column[String](snapshotTableCfg.columnNames.persistenceId, O.Length(255, varying = true)) 35 | val sequenceNumber: Rep[Long] = column[Long](snapshotTableCfg.columnNames.sequenceNumber) 36 | val created: Rep[Long] = column[Long](snapshotTableCfg.columnNames.created) 37 | val snapshot: Rep[Array[Byte]] = column[Array[Byte]](snapshotTableCfg.columnNames.snapshot) 38 | val metadata: Rep[Json] = column[Json](snapshotTableCfg.columnNames.metadata) 39 | val pk = primaryKey(s"${tableName}_pk", (persistenceId, sequenceNumber)) 40 | } 41 | 42 | lazy val SnapshotTable = new TableQuery(tag => new Snapshot(tag)) 43 | } 44 | -------------------------------------------------------------------------------- /core/src/main/scala/akka/persistence/postgres/tag/EventTag.scala: -------------------------------------------------------------------------------- 1 | package akka.persistence.postgres.tag 2 | 3 | case class EventTag(id: Int, name: String) 4 | -------------------------------------------------------------------------------- /core/src/main/scala/akka/persistence/postgres/tag/EventTagQueries.scala: -------------------------------------------------------------------------------- 1 | package akka.persistence.postgres.tag 2 | 3 | import akka.persistence.postgres.config.TagsTableConfiguration 4 | 5 | class EventTagQueries(val tagsTableCfg: TagsTableConfiguration) extends TagTables { 6 | 7 | import akka.persistence.postgres.db.ExtendedPostgresProfile.api._ 8 | 9 | def add(xs: EventTag) = 10 | EventTagTable.returning(EventTagTable.map(_.id)) += xs 11 | 12 | val selectAll = EventTagTable.result 13 | 14 | private def _selectByName(name: Rep[String]) = 15 | EventTagTable.filter(_.name === name) 16 | 17 | val selectByName = Compiled(_selectByName _) 18 | 19 | private def _selectById(id: Rep[Int]) = 20 | EventTagTable.filter(_.id === id) 21 | 22 | val selectById = Compiled(_selectById _) 23 | 24 | } 25 | -------------------------------------------------------------------------------- /core/src/main/scala/akka/persistence/postgres/tag/TagDao.scala: -------------------------------------------------------------------------------- 1 | package akka.persistence.postgres.tag 2 | 3 | import akka.persistence.postgres.config.TagsTableConfiguration 4 | import slick.jdbc.JdbcBackend.Database 5 | 6 | import scala.concurrent.{ ExecutionContext, Future } 7 | 8 | trait TagDao { 9 | 10 | def find(tagName: String): Future[Option[Int]] 11 | 12 | def insert(tagName: String): Future[Int] 13 | } 14 | 15 | class SimpleTagDao(db: Database, tagsTableCfg: TagsTableConfiguration)(implicit ec: ExecutionContext) extends TagDao { 16 | import akka.persistence.postgres.db.ExtendedPostgresProfile.api._ 17 | 18 | private val queries = new EventTagQueries(tagsTableCfg) 19 | 20 | def find(tagName: String): Future[Option[Int]] = 21 | db.run(queries.selectByName(tagName).map(_.map(_.id)).result.headOption) 22 | 23 | def insert(tagName: String): Future[Int] = 24 | db.run(queries.add(EventTag(Int.MinValue, tagName)).transactionally) 25 | } 26 | -------------------------------------------------------------------------------- /core/src/main/scala/akka/persistence/postgres/tag/TagIdResolver.scala: -------------------------------------------------------------------------------- 1 | package akka.persistence.postgres.tag 2 | 3 | import akka.persistence.postgres.config.TagsConfig 4 | import com.github.blemale.scaffeine.{ AsyncLoadingCache, Scaffeine } 5 | 6 | import scala.concurrent.{ ExecutionContext, Future } 7 | import scala.util.Success 8 | 9 | trait TagIdResolver { 10 | def getOrAssignIdsFor(tags: Set[String]): Future[Map[String, Int]] 11 | def lookupIdFor(name: String): Future[Option[Int]] 12 | } 13 | 14 | class CachedTagIdResolver(dao: TagDao, config: TagsConfig)(implicit ctx: ExecutionContext) extends TagIdResolver { 15 | 16 | // TODO add support for loading many tags at once 17 | // Package private - for testing purposes 18 | private[tag] val cache: AsyncLoadingCache[String, Int] = 19 | Scaffeine().expireAfterAccess(config.cacheTtl).buildAsyncFuture(findOrInsert(_, config.insertionRetryAttempts)) 20 | 21 | private def findOrInsert(tagName: String, retryAttempts: Int): Future[Int] = 22 | dao.find(tagName).flatMap { 23 | case Some(id) => Future.successful(id) 24 | case None => 25 | dao.insert(tagName).recoverWith { 26 | case _ if retryAttempts > 0 => findOrInsert(tagName, retryAttempts - 1) 27 | } 28 | } 29 | 30 | override def getOrAssignIdsFor(tags: Set[String]): Future[Map[String, Int]] = 31 | cache.getAll(tags) 32 | 33 | override def lookupIdFor(tagName: String): Future[Option[Int]] = 34 | Future.sequence(cache.getIfPresent(tagName).toList).map(_.headOption).flatMap { 35 | case Some(tagId) => Future.successful(Some(tagId)) 36 | case _ => 37 | val findRes = dao.find(tagName) 38 | findRes.onComplete { 39 | case Success(Some(tagId)) => cache.put(tagName, Future.successful(tagId)) 40 | case _ => // do nothing 41 | } 42 | findRes 43 | } 44 | } 45 | -------------------------------------------------------------------------------- /core/src/main/scala/akka/persistence/postgres/tag/TagTables.scala: -------------------------------------------------------------------------------- 1 | package akka.persistence.postgres.tag 2 | 3 | import akka.persistence.postgres.config.TagsTableConfiguration 4 | 5 | trait TagTables { 6 | 7 | import akka.persistence.postgres.db.ExtendedPostgresProfile.api._ 8 | 9 | def tagsTableCfg: TagsTableConfiguration 10 | 11 | class EventTagTableDefinition(_tableTag: Tag) 12 | extends Table[EventTag](_tableTag, _schemaName = tagsTableCfg.schemaName, _tableName = tagsTableCfg.tableName) { 13 | def * = (id, name) <> (EventTag.tupled, EventTag.unapply) 14 | 15 | val id: Rep[Int] = column[Int](tagsTableCfg.columnNames.id, O.AutoInc) 16 | val name: Rep[String] = column[String](tagsTableCfg.columnNames.name, O.Length(255, varying = true)) 17 | val pk = primaryKey(s"${tableName}_pk", id) 18 | val nameIdx = index(s"${tableName}_name_idx", name, unique = true) 19 | } 20 | 21 | lazy val EventTagTable = new TableQuery(tag => new EventTagTableDefinition(tag)) 22 | } 23 | -------------------------------------------------------------------------------- /core/src/main/scala/akka/persistence/postgres/util/BlockingOps.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2014 - 2019 Dennis Vriend 3 | * Copyright (C) 2019 - 2020 Lightbend Inc. 4 | */ 5 | 6 | package akka.persistence.postgres.util 7 | 8 | import scala.concurrent.duration.{ FiniteDuration, _ } 9 | import scala.concurrent.{ Await, Future } 10 | 11 | object BlockingOps { 12 | implicit class BlockingFutureImplicits[T](val that: Future[T]) extends AnyVal { 13 | def futureValue(implicit awaitDuration: FiniteDuration = 24.hour): T = 14 | Await.result(that, awaitDuration) 15 | def printFutureValue(implicit awaitDuration: FiniteDuration = 24.hour): Unit = 16 | println(that.futureValue) 17 | } 18 | } 19 | -------------------------------------------------------------------------------- /core/src/main/scala/akka/persistence/postgres/util/ByteArrayOps.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2014 - 2019 Dennis Vriend 3 | * Copyright (C) 2019 - 2020 Lightbend Inc. 4 | */ 5 | 6 | package akka.persistence.postgres.util 7 | 8 | import java.io.{ ByteArrayInputStream, InputStream } 9 | import java.util.Base64 10 | 11 | object ByteArrayOps { 12 | implicit class ByteArrayImplicits(val that: Array[Byte]) extends AnyVal { 13 | def encodeBase64: String = Base64.getEncoder.encodeToString(that) 14 | def toInputStream: InputStream = new ByteArrayInputStream(that) 15 | } 16 | } 17 | -------------------------------------------------------------------------------- /core/src/main/scala/akka/persistence/postgres/util/ConfigOps.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2014 - 2019 Dennis Vriend 3 | * Copyright (C) 2019 - 2020 Lightbend Inc. 4 | */ 5 | 6 | package akka.persistence.postgres.util 7 | 8 | import java.util.Locale 9 | import java.util.concurrent.TimeUnit 10 | 11 | import com.typesafe.config.{ Config, ConfigFactory } 12 | 13 | import scala.concurrent.duration.{ Duration, FiniteDuration } 14 | import scala.language.implicitConversions 15 | import scala.util.Try 16 | 17 | object ConfigOps { 18 | implicit class ConfigOperations(val config: Config) extends AnyVal { 19 | def as[A](key: String): Try[A] = 20 | Try(config.getAnyRef(key)).map(_.asInstanceOf[A]) 21 | 22 | def as[A](key: String, default: A): A = 23 | Try(config.getAnyRef(key)).map(_.asInstanceOf[A]).getOrElse(default) 24 | 25 | def asConfig(key: String, default: Config = ConfigFactory.empty) = 26 | Try(config.getConfig(key)).getOrElse(default) 27 | 28 | def asInt(key: String, default: Int): Int = 29 | Try(config.getInt(key)).getOrElse(default) 30 | 31 | def asString(key: String, default: String): String = 32 | Try(config.getString(key)).getOrElse(default) 33 | 34 | def asOptionalNonEmptyString(key: String): Option[String] = { 35 | if (config.hasPath(key)) Some(config.getString(key)).filterNot(_.isEmpty) else None 36 | } 37 | 38 | def asBoolean(key: String, default: Boolean) = 39 | Try(config.getBoolean(key)).getOrElse(default) 40 | 41 | def asFiniteDuration(key: String, default: FiniteDuration) = 42 | Try(FiniteDuration(config.getDuration(key).toMillis, TimeUnit.MILLISECONDS)).getOrElse(default) 43 | 44 | def asDuration(key: String): Duration = 45 | config.getString(key).toLowerCase(Locale.ROOT) match { 46 | case "off" => Duration.Undefined 47 | case _ => config.getMillisDuration(key).requiring(_ > Duration.Zero, key + " >0s, or off") 48 | } 49 | 50 | def getMillisDuration(key: String): FiniteDuration = getDuration(key, TimeUnit.MILLISECONDS) 51 | 52 | def getNanosDuration(key: String): FiniteDuration = getDuration(key, TimeUnit.NANOSECONDS) 53 | 54 | def getDuration(key: String, unit: TimeUnit): FiniteDuration = Duration(config.getDuration(key, unit), unit) 55 | 56 | def ?[A](key: String): Try[A] = as(key) 57 | 58 | def ?:[A](key: String, default: A) = as(key, default) 59 | 60 | def withkey[A](key: String)(f: Config => A): A = f(config.getConfig(key)) 61 | } 62 | 63 | implicit def TryToOption[A](t: Try[A]): Option[A] = t.toOption 64 | 65 | final implicit class TryOps[A](val t: Try[A]) extends AnyVal { 66 | def ?:(default: A): A = t.getOrElse(default) 67 | } 68 | 69 | final implicit class StringTryOps(val t: Try[String]) extends AnyVal { 70 | 71 | /** 72 | * Trim the String content, when empty, return None 73 | */ 74 | def trim: Option[String] = t.map(_.trim).filter(_.nonEmpty) 75 | } 76 | 77 | final implicit class Requiring[A](val value: A) extends AnyVal { 78 | @inline def requiring(cond: Boolean, msg: => Any): A = { 79 | require(cond, msg) 80 | value 81 | } 82 | 83 | @inline def requiring(cond: A => Boolean, msg: => Any): A = { 84 | require(cond(value), msg) 85 | value 86 | } 87 | } 88 | } 89 | -------------------------------------------------------------------------------- /core/src/main/scala/akka/persistence/postgres/util/InputStreamOps.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2014 - 2019 Dennis Vriend 3 | * Copyright (C) 2019 - 2020 Lightbend Inc. 4 | */ 5 | 6 | package akka.persistence.postgres.util 7 | 8 | import java.io.{ ByteArrayOutputStream, InputStream } 9 | 10 | import scala.concurrent.blocking 11 | 12 | object InputStreamOps { 13 | implicit class InputStreamImplicits(val is: InputStream) extends AnyVal { 14 | def toArray: Array[Byte] = blocking { 15 | /* based on https://stackoverflow.com/a/17861016/865265 16 | * Thanks to 17 | * - https://stackoverflow.com/users/1435969/ivan-gammel 18 | * - https://stackoverflow.com/users/2619133/oliverkn 19 | */ 20 | val bos: ByteArrayOutputStream = new ByteArrayOutputStream 21 | val buffer: Array[Byte] = new Array[Byte](0xffff) 22 | var len: Int = is.read(buffer) 23 | while (len != -1) { 24 | bos.write(buffer, 0, len) 25 | len = is.read(buffer) 26 | } 27 | bos.toByteArray 28 | } 29 | } 30 | } 31 | -------------------------------------------------------------------------------- /core/src/main/scala/akka/persistence/postgres/util/StringOps.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2014 - 2019 Dennis Vriend 3 | * Copyright (C) 2019 - 2020 Lightbend Inc. 4 | */ 5 | 6 | package akka.persistence.postgres.util 7 | 8 | import java.util.Base64 9 | 10 | object StringOps { 11 | implicit class StringImplicits(val that: String) extends AnyVal { 12 | def toByteArray: Array[Byte] = Base64.getDecoder.decode(that) 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /core/src/test/resources/general.conf: -------------------------------------------------------------------------------- 1 | # Copyright 2016 Dennis Vriend 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | // This file contains the general settings which are shared in all akka-persistence-postgres tests 16 | 17 | akka { 18 | stdout-loglevel = off // defaults to WARNING can be disabled with off. The stdout-loglevel is only in effect during system startup and shutdown 19 | log-dead-letters-during-shutdown = on 20 | loglevel = debug 21 | log-dead-letters = on 22 | log-config-on-start = off // Log the complete configuration at INFO level when the actor system is started 23 | 24 | loggers = ["akka.event.slf4j.Slf4jLogger"] 25 | logging-filter = "akka.event.slf4j.Slf4jLoggingFilter" 26 | 27 | actor { 28 | allow-java-serialization = on 29 | debug { 30 | receive = on // log all messages sent to an actor if that actors receive method is a LoggingReceive 31 | autoreceive = off // log all special messages like Kill, PoisoffPill etc sent to all actors 32 | lifecycle = off // log all actor lifecycle events of all actors 33 | fsm = off // enable logging of all events, transitioffs and timers of FSM Actors that extend LoggingFSM 34 | event-stream = off // enable logging of subscriptions (subscribe/unsubscribe) on the ActorSystem.eventStream 35 | } 36 | } 37 | } 38 | 39 | docker { 40 | host = "localhost" 41 | host = ${?VM_HOST} 42 | } 43 | 44 | postgres-journal { 45 | logicalDelete = ${akka-persistence-postgres.logicalDeletion.enable} 46 | event-adapters { 47 | test-write-event-adapter = "akka.persistence.postgres.query.EventAdapterTest$TestWriteEventAdapter" 48 | test-read-event-adapter = "akka.persistence.postgres.query.EventAdapterTest$TestReadEventAdapter" 49 | } 50 | 51 | event-adapter-bindings { 52 | "akka.persistence.postgres.query.EventAdapterTest$Event" = test-write-event-adapter 53 | "akka.persistence.postgres.query.EventAdapterTest$TaggedEvent" = test-write-event-adapter 54 | "akka.persistence.postgres.query.EventAdapterTest$TaggedAsyncEvent" = test-write-event-adapter 55 | "akka.persistence.postgres.query.EventAdapterTest$EventAdapted" = test-read-event-adapter 56 | } 57 | } 58 | 59 | 60 | postgres-read-journal { 61 | includeLogicallyDeleted = ${akka-persistence-postgres.logicalDeletion.enable} 62 | refresh-interval = "10ms" 63 | max-buffer-size = "500" 64 | } 65 | 66 | slick.db.idleTimeout = 10000 // 10 seconds 67 | -------------------------------------------------------------------------------- /core/src/test/resources/jndi-application.conf: -------------------------------------------------------------------------------- 1 | # Copyright 2016 Dennis Vriend 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | // general.conf is included only for shared settings used for the akka-persistence-postgres tests 16 | include "general.conf" 17 | 18 | akka { 19 | persistence { 20 | journal { 21 | plugin = "postgres-journal" 22 | // Enable the line below to automatically start the journal when the actorsystem is started 23 | // auto-start-journals = ["postgres-journal"] 24 | } 25 | snapshot-store { 26 | plugin = "postgres-snapshot-store" 27 | // Enable the line below to automatically start the snapshot-store when the actorsystem is started 28 | // auto-start-snapshot-stores = ["postgres-snapshot-store"] 29 | } 30 | } 31 | } 32 | 33 | postgres-journal { 34 | slick = ${slick} 35 | } 36 | 37 | # the akka-persistence-snapshot-store in use 38 | postgres-snapshot-store { 39 | slick = ${slick} 40 | } 41 | 42 | # the akka-persistence-query provider in use 43 | postgres-read-journal { 44 | slick = ${slick} 45 | } 46 | 47 | slick { 48 | profile = "slick.jdbc.PostgresProfile$" 49 | jndiName = "java:/jboss/datasources/bla" 50 | } 51 | -------------------------------------------------------------------------------- /core/src/test/resources/jndi-shared-db-application.conf: -------------------------------------------------------------------------------- 1 | # Copyright 2016 Dennis Vriend 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | include "general.conf" 16 | 17 | akka { 18 | persistence { 19 | journal { 20 | plugin = "postgres-journal" 21 | // Enable the line below to automatically start the journal when the actorsystem is started 22 | // auto-start-journals = ["postgres-journal"] 23 | } 24 | snapshot-store { 25 | plugin = "postgres-snapshot-store" 26 | // Enable the line below to automatically start the snapshot-store when the actorsystem is started 27 | // auto-start-snapshot-stores = ["postgres-snapshot-store"] 28 | } 29 | } 30 | } 31 | 32 | akka-persistence-postgres { 33 | shared-databases { 34 | slick { 35 | profile = "slick.jdbc.PostgresProfile$" 36 | jndiName = "java:/jboss/datasources/bla" 37 | } 38 | } 39 | } 40 | 41 | postgres-journal { 42 | use-shared-db = "slick" 43 | } 44 | 45 | # the akka-persistence-snapshot-store in use 46 | postgres-snapshot-store { 47 | use-shared-db = "slick" 48 | } 49 | 50 | # the akka-persistence-query provider in use 51 | postgres-read-journal { 52 | use-shared-db = "slick" 53 | } 54 | 55 | -------------------------------------------------------------------------------- /core/src/test/resources/logback-test.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | debug 7 | 8 | 9 | %date{ISO8601} - %logger -> %-5level[%thread] %logger{0} - %msg%n 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | -------------------------------------------------------------------------------- /core/src/test/resources/nested-partitions-application-with-hard-delete.conf: -------------------------------------------------------------------------------- 1 | include "plain-application-with-hard-delete.conf" 2 | include "nested-partitions-journal.conf" 3 | -------------------------------------------------------------------------------- /core/src/test/resources/nested-partitions-application-with-use-journal-metadata.conf: -------------------------------------------------------------------------------- 1 | include "plain-application-with-use-journal-metadata.conf" 2 | include "nested-partitions-journal.conf" 3 | -------------------------------------------------------------------------------- /core/src/test/resources/nested-partitions-application.conf: -------------------------------------------------------------------------------- 1 | include "plain-application.conf" 2 | include "nested-partitions-journal.conf" 3 | -------------------------------------------------------------------------------- /core/src/test/resources/nested-partitions-journal.conf: -------------------------------------------------------------------------------- 1 | postgres-journal.dao = "akka.persistence.postgres.journal.dao.NestedPartitionsJournalDao" 2 | postgres-journal.tables.journal.partitions { 3 | size = 2000 4 | } 5 | -------------------------------------------------------------------------------- /core/src/test/resources/nested-partitions-shared-db-application.conf: -------------------------------------------------------------------------------- 1 | include "plain-shared-db-application.conf" 2 | include "nested-partitions-journal.conf" 3 | -------------------------------------------------------------------------------- /core/src/test/resources/partitioned-application-with-hard-delete.conf: -------------------------------------------------------------------------------- 1 | include "plain-application-with-hard-delete.conf" 2 | include "partitioned-journal.conf" 3 | -------------------------------------------------------------------------------- /core/src/test/resources/partitioned-application-with-use-journal-metadata.conf: -------------------------------------------------------------------------------- 1 | include "plain-application-with-use-journal-metadata.conf" 2 | include "partitioned-journal.conf" 3 | -------------------------------------------------------------------------------- /core/src/test/resources/partitioned-application.conf: -------------------------------------------------------------------------------- 1 | include "plain-application.conf" 2 | include "partitioned-journal.conf" 3 | -------------------------------------------------------------------------------- /core/src/test/resources/partitioned-journal.conf: -------------------------------------------------------------------------------- 1 | postgres-journal.dao = "akka.persistence.postgres.journal.dao.PartitionedJournalDao" 2 | postgres-journal.tables.journal.partitions { 3 | size = 2000 4 | } 5 | -------------------------------------------------------------------------------- /core/src/test/resources/partitioned-shared-db-application.conf: -------------------------------------------------------------------------------- 1 | include "plain-shared-db-application.conf" 2 | include "partitioned-journal.conf" 3 | -------------------------------------------------------------------------------- /core/src/test/resources/plain-application-with-hard-delete.conf: -------------------------------------------------------------------------------- 1 | # Copyright 2016 Dennis Vriend 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | // general.conf is included only for shared settings used for the akka-persistence-postgres tests 16 | include "general.conf" 17 | include "plain-application.conf" 18 | 19 | akka-persistence-postgres.logicalDeletion.enable = false 20 | -------------------------------------------------------------------------------- /core/src/test/resources/plain-application-with-use-journal-metadata.conf: -------------------------------------------------------------------------------- 1 | include "general.conf" 2 | include "plain-application.conf" 3 | 4 | postgres-journal.use-journal-metadata = true 5 | -------------------------------------------------------------------------------- /core/src/test/resources/plain-application.conf: -------------------------------------------------------------------------------- 1 | # Copyright 2016 Dennis Vriend 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | // general.conf is included only for shared settings used for the akka-persistence-postgres tests 16 | include "general.conf" 17 | 18 | akka { 19 | persistence { 20 | journal { 21 | plugin = "postgres-journal" 22 | // Enable the line below to automatically start the journal when the actorsystem is started 23 | // auto-start-journals = ["postgres-journal"] 24 | } 25 | snapshot-store { 26 | plugin = "postgres-snapshot-store" 27 | // Enable the line below to automatically start the snapshot-store when the actorsystem is started 28 | // auto-start-snapshot-stores = ["postgres-snapshot-store"] 29 | } 30 | } 31 | } 32 | 33 | postgres-journal { 34 | slick = ${slick} 35 | } 36 | 37 | # the akka-persistence-snapshot-store in use 38 | postgres-snapshot-store { 39 | slick = ${slick} 40 | } 41 | 42 | # the akka-persistence-query provider in use 43 | postgres-read-journal { 44 | slick = ${slick} 45 | } 46 | 47 | slick { 48 | profile = "akka.persistence.postgres.db.ExtendedPostgresProfile$" 49 | db { 50 | host = "localhost" 51 | host = ${?POSTGRES_HOST} 52 | url = "jdbc:postgresql://"${slick.db.host}":5432/docker?reWriteBatchedInserts=true" 53 | user = "docker" 54 | user = ${?POSTGRES_USER} 55 | password = "docker" 56 | password = ${?POSTGRES_PASSWORD} 57 | driver = "org.postgresql.Driver" 58 | numThreads = 20 59 | maxConnections = 20 60 | minConnections = 1 61 | } 62 | } 63 | -------------------------------------------------------------------------------- /core/src/test/resources/plain-shared-db-application.conf: -------------------------------------------------------------------------------- 1 | # Copyright 2016 Dennis Vriend 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | include "general.conf" 16 | 17 | akka { 18 | persistence { 19 | journal { 20 | plugin = "postgres-journal" 21 | // Enable the line below to automatically start the journal when the actorsystem is started 22 | // auto-start-journals = ["postgres-journal"] 23 | } 24 | snapshot-store { 25 | plugin = "postgres-snapshot-store" 26 | // Enable the line below to automatically start the snapshot-store when the actorsystem is started 27 | // auto-start-snapshot-stores = ["postgres-snapshot-store"] 28 | } 29 | } 30 | } 31 | 32 | akka-persistence-postgres { 33 | shared-databases { 34 | slick { 35 | profile = "slick.jdbc.PostgresProfile$" 36 | db { 37 | host = "localhost" 38 | host = ${?POSTGRES_HOST} 39 | url = "jdbc:postgresql://"${akka-persistence-postgres.shared-databases.slick.db.host}":5432/docker?reWriteBatchedInserts=true" 40 | user = "docker" 41 | user = ${?POSTGRES_USER} 42 | password = "docker" 43 | password = ${?POSTGRES_PASSWORD} 44 | driver = "org.postgresql.Driver" 45 | numThreads = 5 46 | maxConnections = 5 47 | minConnections = 1 48 | } 49 | } 50 | } 51 | } 52 | 53 | postgres-journal { 54 | use-shared-db = "slick" 55 | } 56 | 57 | # the akka-persistence-snapshot-store in use 58 | postgres-snapshot-store { 59 | use-shared-db = "slick" 60 | } 61 | 62 | # the akka-persistence-query provider in use 63 | postgres-read-journal { 64 | use-shared-db = "slick" 65 | } 66 | -------------------------------------------------------------------------------- /core/src/test/resources/schema/postgres/plain-schema.sql: -------------------------------------------------------------------------------- 1 | DROP TABLE IF EXISTS public.journal; 2 | 3 | CREATE TABLE IF NOT EXISTS public.journal 4 | ( 5 | ordering BIGSERIAL, 6 | sequence_number BIGINT NOT NULL, 7 | deleted BOOLEAN DEFAULT FALSE NOT NULL, 8 | persistence_id TEXT NOT NULL, 9 | message BYTEA NOT NULL, 10 | tags int[], 11 | metadata jsonb NOT NULL, 12 | PRIMARY KEY (persistence_id, sequence_number) 13 | ); 14 | 15 | CREATE EXTENSION IF NOT EXISTS intarray WITH SCHEMA public; 16 | CREATE INDEX journal_tags_idx ON public.journal USING GIN (tags public.gin__int_ops); 17 | CREATE INDEX journal_ordering_idx ON public.journal USING BRIN (ordering); 18 | 19 | DROP TABLE IF EXISTS public.tags; 20 | 21 | CREATE TABLE IF NOT EXISTS public.tags 22 | ( 23 | id BIGSERIAL, 24 | name TEXT NOT NULL, 25 | PRIMARY KEY (id) 26 | ); 27 | 28 | CREATE UNIQUE INDEX IF NOT EXISTS tags_name_idx on public.tags (name); 29 | 30 | DROP TABLE IF EXISTS public.snapshot; 31 | 32 | CREATE TABLE IF NOT EXISTS public.snapshot 33 | ( 34 | persistence_id TEXT NOT NULL, 35 | sequence_number BIGINT NOT NULL, 36 | created BIGINT NOT NULL, 37 | snapshot BYTEA NOT NULL, 38 | metadata jsonb NOT NULL, 39 | PRIMARY KEY (persistence_id, sequence_number) 40 | ); 41 | 42 | DROP TRIGGER IF EXISTS trig_update_journal_metadata ON public.journal; 43 | DROP FUNCTION IF EXISTS public.update_journal_metadata(); 44 | DROP TABLE IF EXISTS public.journal_metadata; 45 | 46 | CREATE TABLE public.journal_metadata( 47 | id BIGINT GENERATED ALWAYS AS IDENTITY, 48 | max_sequence_number BIGINT NOT NULL, 49 | min_ordering BIGINT NOT NULL, 50 | max_ordering BIGINT NOT NULL, 51 | persistence_id TEXT NOT NULL, 52 | PRIMARY KEY (persistence_id) 53 | ) PARTITION BY HASH(persistence_id); 54 | 55 | CREATE TABLE public.journal_metadata_0 PARTITION OF public.journal_metadata FOR VALUES WITH (MODULUS 2, REMAINDER 0); 56 | CREATE TABLE public.journal_metadata_1 PARTITION OF public.journal_metadata FOR VALUES WITH (MODULUS 2, REMAINDER 1); 57 | 58 | CREATE OR REPLACE FUNCTION public.update_journal_metadata() RETURNS TRIGGER AS 59 | $$ 60 | DECLARE 61 | BEGIN 62 | INSERT INTO public.journal_metadata (persistence_id, max_sequence_number, max_ordering, min_ordering) 63 | VALUES ( 64 | NEW.persistence_id, 65 | NEW.sequence_number, 66 | NEW.ordering, 67 | CASE 68 | WHEN NEW.sequence_number = 1 THEN NEW.ordering 69 | ELSE -1 70 | END 71 | ) 72 | ON CONFLICT (persistence_id) DO UPDATE 73 | SET 74 | max_sequence_number = GREATEST(public.journal_metadata.max_sequence_number, NEW.sequence_number), 75 | max_ordering = GREATEST(public.journal_metadata.max_ordering, NEW.ordering), 76 | min_ordering = LEAST(public.journal_metadata.min_ordering, NEW.ordering); 77 | 78 | RETURN NEW; 79 | END; 80 | $$ 81 | LANGUAGE plpgsql; 82 | 83 | CREATE TRIGGER trig_update_journal_metadata 84 | AFTER INSERT ON public.journal 85 | FOR EACH ROW 86 | EXECUTE PROCEDURE public.update_journal_metadata(); -------------------------------------------------------------------------------- /core/src/test/scala/akka/persistence/postgres/SharedActorSystemTestSpec.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2014 - 2019 Dennis Vriend 3 | * Copyright (C) 2019 - 2020 Lightbend Inc. 4 | */ 5 | 6 | package akka.persistence.postgres 7 | 8 | import akka.actor.ActorSystem 9 | import akka.persistence.postgres.config.{ JournalConfig, ReadJournalConfig } 10 | import akka.persistence.postgres.db.SlickExtension 11 | import akka.persistence.postgres.query.javadsl.PostgresReadJournal 12 | import akka.persistence.postgres.util.DropCreate 13 | import akka.serialization.SerializationExtension 14 | import akka.stream.{ Materializer, SystemMaterializer } 15 | import akka.util.Timeout 16 | import com.typesafe.config.{ Config, ConfigFactory, ConfigValue } 17 | import org.scalatest.BeforeAndAfterAll 18 | 19 | import scala.concurrent.ExecutionContext 20 | import scala.concurrent.duration._ 21 | 22 | abstract class SharedActorSystemTestSpec(val config: Config) extends SimpleSpec with DropCreate with BeforeAndAfterAll { 23 | def this(config: String = "plain-application.conf", configOverrides: Map[String, ConfigValue] = Map.empty) = 24 | this(configOverrides.foldLeft(ConfigFactory.load(config)) { case (conf, (path, configValue)) => 25 | conf.withValue(path, configValue) 26 | }) 27 | 28 | implicit lazy val system: ActorSystem = ActorSystem("test", config) 29 | implicit lazy val mat: Materializer = SystemMaterializer(system).materializer 30 | 31 | implicit lazy val ec: ExecutionContext = system.dispatcher 32 | implicit val pc: PatienceConfig = PatienceConfig(timeout = 2.minutes) 33 | implicit val timeout: Timeout = Timeout(1.minute) 34 | 35 | lazy val serialization = SerializationExtension(system) 36 | 37 | val cfg = config.getConfig("postgres-journal") 38 | val journalConfig = new JournalConfig(cfg) 39 | lazy val db = SlickExtension(system).database(cfg).database 40 | val readJournalConfig = new ReadJournalConfig(config.getConfig(PostgresReadJournal.Identifier)) 41 | 42 | override protected def afterAll(): Unit = { 43 | super.afterAll() 44 | db.close() 45 | system.terminate().futureValue 46 | } 47 | } 48 | -------------------------------------------------------------------------------- /core/src/test/scala/akka/persistence/postgres/SimpleSpec.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2014 - 2019 Dennis Vriend 3 | * Copyright (C) 2019 - 2020 Lightbend Inc. 4 | */ 5 | 6 | package akka.persistence.postgres 7 | 8 | import akka.actor.{ ActorRef, ActorSystem } 9 | import akka.persistence.postgres.util.ClasspathResources 10 | import akka.testkit.TestProbe 11 | import org.scalatest._ 12 | import org.scalatest.concurrent.{ Eventually, ScalaFutures } 13 | import org.scalatest.flatspec.AnyFlatSpec 14 | import org.scalatest.matchers.should.Matchers 15 | 16 | trait SimpleSpec 17 | extends AnyFlatSpec 18 | with Matchers 19 | with ScalaFutures 20 | with TryValues 21 | with OptionValues 22 | with Eventually 23 | with ClasspathResources 24 | with BeforeAndAfterAll 25 | with BeforeAndAfterEach 26 | with GivenWhenThen { 27 | 28 | /** 29 | * Sends the PoisonPill command to an actor and waits for it to die 30 | */ 31 | def killActors(actors: ActorRef*)(implicit system: ActorSystem): Unit = { 32 | val tp = TestProbe() 33 | actors.foreach { (actor: ActorRef) => 34 | tp.watch(actor) 35 | system.stop(actor) 36 | tp.expectTerminated(actor) 37 | } 38 | } 39 | } 40 | -------------------------------------------------------------------------------- /core/src/test/scala/akka/persistence/postgres/SingleActorSystemPerTestSpec.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2014 - 2019 Dennis Vriend 3 | * Copyright (C) 2019 - 2020 Lightbend Inc. 4 | */ 5 | 6 | package akka.persistence.postgres 7 | 8 | import akka.actor.ActorSystem 9 | import akka.persistence.postgres.config.{ JournalConfig, ReadJournalConfig, SlickConfiguration } 10 | import akka.persistence.postgres.db.SlickDatabase 11 | import akka.persistence.postgres.query.javadsl.PostgresReadJournal 12 | import akka.persistence.postgres.util.DropCreate 13 | import akka.util.Timeout 14 | import com.typesafe.config.{ Config, ConfigFactory, ConfigValue } 15 | import org.scalatest.BeforeAndAfterEach 16 | import slick.jdbc.JdbcBackend.Database 17 | 18 | import scala.concurrent.duration._ 19 | 20 | abstract class SingleActorSystemPerTestSpec(val config: Config) 21 | extends SimpleSpec 22 | with DropCreate 23 | with BeforeAndAfterEach { 24 | def this(config: String = "plain-application.conf", configOverrides: Map[String, ConfigValue] = Map.empty) = 25 | this(configOverrides.foldLeft(ConfigFactory.load(config)) { case (conf, (path, configValue)) => 26 | conf.withValue(path, configValue) 27 | }) 28 | 29 | implicit val pc: PatienceConfig = PatienceConfig(timeout = 2.minutes) 30 | implicit val timeout: Timeout = Timeout(1.minute) 31 | 32 | val cfg: Config = config.getConfig("postgres-journal") 33 | val journalConfig = new JournalConfig(cfg) 34 | val readJournalConfig = new ReadJournalConfig(config.getConfig(PostgresReadJournal.Identifier)) 35 | 36 | // The db is initialized in the before and after each bocks 37 | var dbOpt: Option[Database] = None 38 | def db: Database = { 39 | dbOpt.getOrElse { 40 | val newDb = if (cfg.hasPath("slick.profile")) { 41 | SlickDatabase.database(cfg, new SlickConfiguration(cfg.getConfig("slick")), "slick.db") 42 | } else 43 | SlickDatabase.database( 44 | config, 45 | new SlickConfiguration(config.getConfig("akka-persistence-postgres.shared-databases.slick")), 46 | "akka-persistence-postgres.shared-databases.slick.db") 47 | 48 | dbOpt = Some(newDb) 49 | newDb 50 | } 51 | } 52 | 53 | def closeDb(): Unit = { 54 | dbOpt.foreach(_.close()) 55 | dbOpt = None 56 | } 57 | 58 | override protected def afterEach(): Unit = { 59 | super.afterEach() 60 | closeDb() 61 | } 62 | 63 | override protected def afterAll(): Unit = { 64 | super.afterAll() 65 | closeDb() 66 | } 67 | 68 | def withActorSystem(f: ActorSystem => Unit): Unit = { 69 | implicit val system: ActorSystem = ActorSystem("test", config) 70 | f(system) 71 | system.terminate().futureValue 72 | } 73 | 74 | def withActorSystem(config: Config = config)(f: ActorSystem => Unit): Unit = { 75 | implicit val system: ActorSystem = ActorSystem("test", config) 76 | f(system) 77 | system.terminate().futureValue 78 | } 79 | } 80 | -------------------------------------------------------------------------------- /core/src/test/scala/akka/persistence/postgres/configuration/ConfigOpsTest.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2014 - 2019 Dennis Vriend 3 | * Copyright (C) 2019 - 2020 Lightbend Inc. 4 | */ 5 | 6 | package akka.persistence.postgres.configuration 7 | 8 | import akka.persistence.postgres.SimpleSpec 9 | import akka.persistence.postgres.util.ConfigOps 10 | import ConfigOps._ 11 | import com.typesafe.config.ConfigFactory 12 | 13 | class ConfigOpsTest extends SimpleSpec { 14 | it should "parse field values to Try[A]" in { 15 | val cfg = ConfigFactory.parseString(""" 16 | | person { 17 | | firstName = "foo" 18 | | lastName = "bar" 19 | | age = 25 20 | | hasCar = true 21 | | hasGirlfriend = false 22 | | } 23 | """.stripMargin) 24 | 25 | cfg.as[String]("person.firstName").success.value shouldBe "foo" 26 | cfg.as[String]("person.lastName").success.value shouldBe "bar" 27 | cfg.as[Int]("person.age").success.value shouldBe 25 28 | cfg.as[Boolean]("person.hasCar").success.value shouldBe true 29 | cfg.as[Boolean]("person.hasGirlfriend").success.value shouldBe false 30 | } 31 | 32 | it should "parse field values with default values with empty config" in { 33 | val cfg = ConfigFactory.parseString("") 34 | cfg.as[String]("person.firstName", "john") shouldBe "john" 35 | cfg.as[String]("person.lastName", "doe") shouldBe "doe" 36 | cfg.as[Int]("person.age", 35) shouldBe 35 37 | cfg.as[Boolean]("person.hasCar", false) shouldBe false 38 | cfg.as[Boolean]("person.hasGirlfriend", false) shouldBe false 39 | } 40 | 41 | it should "parse field values with default values for wrong config" in { 42 | val cfg = ConfigFactory.parseString(""" 43 | | RedShirt { 44 | | firstName = "red" 45 | | lastName = "shirt" 46 | | age = 25 47 | | hasSpaceship = true 48 | | hasGirlfriend = true 49 | | } 50 | """.stripMargin) 51 | 52 | cfg.as[String]("person.firstName", "john") shouldBe "john" 53 | cfg.as[String]("person.lastName", "doe") shouldBe "doe" 54 | cfg.as[Int]("person.age", 35) shouldBe 35 55 | cfg.as[Boolean]("person.hasCar", false) shouldBe false 56 | cfg.as[Boolean]("person.hasGirlfriend", false) shouldBe false 57 | } 58 | 59 | it should "parse field values to with defaults" in { 60 | val cfg = ConfigFactory.parseString(""" 61 | | person { 62 | | age = 25 63 | | hasGirlfriend = true 64 | | } 65 | """.stripMargin) 66 | 67 | cfg.as[String]("person.firstName", "john") shouldBe "john" 68 | cfg.as[String]("person.lastName", "doe") shouldBe "doe" 69 | cfg.as[Int]("person.age", 35) shouldBe 25 70 | cfg.as[Boolean]("person.hasCar", false) shouldBe false 71 | cfg.as[Boolean]("person.hasGirlfriend", false) shouldBe true 72 | } 73 | } 74 | -------------------------------------------------------------------------------- /core/src/test/scala/akka/persistence/postgres/configuration/JNDIConfigTest.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2014 - 2019 Dennis Vriend 3 | * Copyright (C) 2019 - 2020 Lightbend Inc. 4 | */ 5 | 6 | package akka.persistence.postgres.configuration 7 | 8 | import akka.actor.ActorSystem 9 | import akka.persistence.postgres.SimpleSpec 10 | import akka.persistence.postgres.db.SlickExtension 11 | import com.typesafe.config.ConfigFactory 12 | 13 | class JNDIConfigTest extends SimpleSpec { 14 | "JNDI config" should "read the config and throw NoInitialContextException in case the JNDI resource is not available" in { 15 | withActorSystem("jndi-application.conf") { system => 16 | val jdbcJournalConfig = system.settings.config.getConfig("postgres-journal") 17 | val slickExtension = SlickExtension(system) 18 | intercept[javax.naming.NoInitialContextException] { 19 | // Since the JNDI resource is not actually available we expect a NoInitialContextException 20 | // This is an indication that the application actually attempts to load the configured JNDI resource 21 | slickExtension.database(jdbcJournalConfig).database 22 | } 23 | } 24 | } 25 | 26 | "JNDI config for shared databases" should "read the config and throw NoInitialContextException in case the JNDI resource is not available" in { 27 | withActorSystem("jndi-shared-db-application.conf") { system => 28 | val jdbcJournalConfig = system.settings.config.getConfig("postgres-journal") 29 | val slickExtension = SlickExtension(system) 30 | intercept[javax.naming.NoInitialContextException] { 31 | // Since the JNDI resource is not actually available we expect a NoInitialContextException 32 | // This is an indication that the application actually attempts to load the configured JNDI resource 33 | slickExtension.database(jdbcJournalConfig).database 34 | } 35 | } 36 | } 37 | 38 | def withActorSystem(config: String)(f: ActorSystem => Unit): Unit = { 39 | val cfg = ConfigFactory.load(config) 40 | val system = ActorSystem("test", cfg) 41 | 42 | try { 43 | f(system) 44 | } finally { 45 | system.terminate().futureValue 46 | } 47 | } 48 | } 49 | -------------------------------------------------------------------------------- /core/src/test/scala/akka/persistence/postgres/journal/dao/JournalMetadataQueriesTest.scala: -------------------------------------------------------------------------------- 1 | package akka.persistence.postgres.journal.dao 2 | 3 | import akka.persistence.postgres.util.BaseQueryTest 4 | 5 | class JournalMetadataQueriesTest extends BaseQueryTest { 6 | 7 | it should "create SQL query for highestSequenceNrForPersistenceId" in withJournalMetadataQueries { queries => 8 | queries.highestSequenceNrForPersistenceId( 9 | "aaa") shouldBeSQL """select "max_sequence_number" from "journal_metadata" where "persistence_id" = ? limit 1""" 10 | } 11 | 12 | it should "create SQL query for minAndMaxOrderingForPersistenceId" in withJournalMetadataQueries { queries => 13 | queries.minAndMaxOrderingForPersistenceId( 14 | "aaa") shouldBeSQL """select "min_ordering", "max_ordering" from "journal_metadata" where "persistence_id" = ? limit 1""" 15 | } 16 | 17 | private def withJournalMetadataQueries(f: JournalMetadataQueries => Unit): Unit = { 18 | withActorSystem { implicit system => 19 | f(new JournalMetadataQueries(JournalMetadataTable(journalConfig.journalMetadataTableConfiguration))) 20 | } 21 | } 22 | } 23 | -------------------------------------------------------------------------------- /core/src/test/scala/akka/persistence/postgres/journal/dao/JournalQueriesTest.scala: -------------------------------------------------------------------------------- 1 | package akka.persistence.postgres.journal.dao 2 | 3 | import akka.persistence.postgres.JournalRow 4 | import akka.persistence.postgres.util.BaseQueryTest 5 | import io.circe.{ Json, JsonObject } 6 | 7 | class JournalQueriesTest extends BaseQueryTest { 8 | 9 | it should "create SQL query for highestMarkedSequenceNrForPersistenceId" in withJournalQueries { queries => 10 | queries.highestMarkedSequenceNrForPersistenceId( 11 | "aaa") shouldBeSQL """select max("sequence_number") from "journal" where ("deleted" = true) and ("persistence_id" = ?)""" 12 | } 13 | 14 | it should "create SQL query for highestSequenceNrForPersistenceId" in withJournalQueries { queries => 15 | queries.highestSequenceNrForPersistenceId( 16 | "aaa") shouldBeSQL """select max("sequence_number") from "journal" where "persistence_id" = ?""" 17 | } 18 | 19 | it should "create SQL query for messagesQuery" in withJournalQueries { queries => 20 | queries.messagesQuery( 21 | "aaa", 22 | 11L, 23 | 11L, 24 | 11L) shouldBeSQL """select "ordering", "deleted", "persistence_id", "sequence_number", "message", "tags", "metadata" from "journal" where ((("persistence_id" = ?) and ("deleted" = false)) and ("sequence_number" >= ?)) and ("sequence_number" <= ?) order by "sequence_number" limit ?""" 25 | } 26 | 27 | it should "create SQL query for messagesMinOrderingBoundedQuery" in withJournalQueries { queries => 28 | queries.messagesMinOrderingBoundedQuery( 29 | "aaa", 30 | 11L, 31 | 11L, 32 | 11L, 33 | 11L) shouldBeSQL """select "ordering", "deleted", "persistence_id", "sequence_number", "message", "tags", "metadata" from "journal" where (((("persistence_id" = ?) and ("deleted" = false)) and ("sequence_number" >= ?)) and ("sequence_number" <= ?)) and ("ordering" >= ?) order by "sequence_number" limit ?""" 34 | } 35 | 36 | it should "create SQL query for markJournalMessagesAsDeleted" in withJournalQueries { queries => 37 | queries.markJournalMessagesAsDeleted( 38 | "aaa", 39 | 11L) shouldBeSQL """update "journal" set "deleted" = ? where (("journal"."persistence_id" = 'aaa') and ("journal"."sequence_number" <= 11)) and ("journal"."deleted" = false)""" 40 | } 41 | 42 | it should "create SQL query for update" in withJournalQueries { queries => 43 | queries.update( 44 | "aaa", 45 | 11L, 46 | Array.ofDim(0), 47 | emptyJson) shouldBeSQL """update "journal" set "message" = ?, "metadata" = ? where ("journal"."persistence_id" = 'aaa') and ("journal"."sequence_number" = 11)""" 48 | } 49 | 50 | it should "create SQL query for delete" in withJournalQueries { queries => 51 | queries.delete( 52 | "aaa", 53 | 11L) shouldBeSQL """delete from "journal" where ("journal"."persistence_id" = 'aaa') and ("journal"."sequence_number" <= 11)""" 54 | } 55 | 56 | it should "create SQL query for writeJournalRows" in withJournalQueries { queries => 57 | val row = JournalRow(1L, deleted = false, "p", 3L, Array.ofDim(0), List(1, 2, 3), emptyJson) 58 | queries.writeJournalRows( 59 | Seq(row, row, row)) shouldBeSQL """insert into "journal" ("deleted","persistence_id","sequence_number","message","tags","metadata") values (?,?,?,?,?,?)""" 60 | } 61 | 62 | private lazy val emptyJson = Json.fromJsonObject(JsonObject.empty) 63 | 64 | private def withJournalQueries(f: JournalQueries => Unit): Unit = { 65 | withActorSystem { implicit system => 66 | f(new JournalQueries(FlatJournalTable.apply(journalConfig.journalTableConfiguration))) 67 | } 68 | } 69 | } 70 | -------------------------------------------------------------------------------- /core/src/test/scala/akka/persistence/postgres/journal/dao/JournalTablesTest.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2014 - 2019 Dennis Vriend 3 | * Copyright (C) 2019 - 2020 Lightbend Inc. 4 | */ 5 | 6 | package akka.persistence.postgres.journal.dao 7 | 8 | import akka.persistence.postgres.TablesTestSpec 9 | 10 | class JournalTablesTest extends TablesTestSpec { 11 | val journalTableConfiguration = journalConfig.journalTableConfiguration 12 | 13 | for { 14 | (journalName, journalTable) <- List( 15 | ("FlatJournalTable", FlatJournalTable(journalTableConfiguration)), 16 | ("PartitionedJournalTable", PartitionedJournalTable(journalTableConfiguration)), 17 | ("NestedPartitionsJournalTable", NestedPartitionsJournalTable(journalTableConfiguration))) 18 | } { 19 | journalName should "be configured with a schema name" in { 20 | journalTable.baseTableRow.schemaName shouldBe journalTableConfiguration.schemaName 21 | 22 | } 23 | 24 | it should "be configured with a table name" in { 25 | journalTable.baseTableRow.tableName shouldBe journalTableConfiguration.tableName 26 | } 27 | 28 | it should "be configured with column names" in { 29 | val colName = toColumnName(journalTableConfiguration.tableName)(_) 30 | journalTable.baseTableRow.persistenceId.toString shouldBe colName( 31 | journalTableConfiguration.columnNames.persistenceId) 32 | journalTable.baseTableRow.deleted.toString shouldBe colName(journalTableConfiguration.columnNames.deleted) 33 | journalTable.baseTableRow.sequenceNumber.toString shouldBe colName( 34 | journalTableConfiguration.columnNames.sequenceNumber) 35 | journalTable.baseTableRow.tags.toString shouldBe colName(journalTableConfiguration.columnNames.tags) 36 | } 37 | } 38 | 39 | val journalMetadataTableConfiguration = journalConfig.journalMetadataTableConfiguration 40 | val journalMetadataTable = JournalMetadataTable(journalMetadataTableConfiguration) 41 | 42 | "JournalMetadataTable" should "be configured with a schema name" in { 43 | journalMetadataTable.baseTableRow.schemaName shouldBe journalMetadataTableConfiguration.schemaName 44 | } 45 | 46 | it should "be configured with a table name" in { 47 | journalMetadataTable.baseTableRow.tableName shouldBe journalMetadataTableConfiguration.tableName 48 | } 49 | 50 | it should "be configured with column names" in { 51 | val colName = toColumnName(journalMetadataTableConfiguration.tableName)(_) 52 | journalMetadataTable.baseTableRow.persistenceId.toString shouldBe colName( 53 | journalMetadataTableConfiguration.columnNames.persistenceId) 54 | journalMetadataTable.baseTableRow.maxSequenceNumber.toString shouldBe colName( 55 | journalMetadataTableConfiguration.columnNames.maxSequenceNumber) 56 | journalMetadataTable.baseTableRow.maxOrdering.toString shouldBe colName( 57 | journalMetadataTableConfiguration.columnNames.maxOrdering) 58 | journalMetadataTable.baseTableRow.minOrdering.toString shouldBe colName( 59 | journalMetadataTableConfiguration.columnNames.minOrdering) 60 | } 61 | } 62 | -------------------------------------------------------------------------------- /core/src/test/scala/akka/persistence/postgres/journal/dao/TagsSerializationTest.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2014 - 2019 Dennis Vriend 3 | * Copyright (C) 2019 - 2020 Lightbend Inc. 4 | */ 5 | 6 | package akka.persistence.postgres.journal.dao 7 | 8 | import akka.persistence.postgres.SharedActorSystemTestSpec 9 | 10 | class TagsSerializationTest extends SharedActorSystemTestSpec { 11 | "Encode" should "no tags" in { 12 | encodeTags(Set.empty[String], ",") shouldBe None 13 | } 14 | 15 | it should "one tag" in { 16 | encodeTags(Set("foo"), ",").value shouldBe "foo" 17 | } 18 | 19 | it should "two tags" in { 20 | encodeTags(Set("foo", "bar"), ",").value shouldBe "foo,bar" 21 | } 22 | 23 | it should "three tags" in { 24 | encodeTags(Set("foo", "bar", "baz"), ",").value shouldBe "foo,bar,baz" 25 | } 26 | 27 | "decode" should "no tags" in { 28 | decodeTags(None, ",") shouldBe Set() 29 | } 30 | 31 | it should "one tag with separator" in { 32 | decodeTags(Some("foo"), ",") shouldBe Set("foo") 33 | } 34 | 35 | it should "two tags with separator" in { 36 | decodeTags(Some("foo,bar"), ",") shouldBe Set("foo", "bar") 37 | } 38 | 39 | it should "three tags with separator" in { 40 | decodeTags(Some("foo,bar,baz"), ",") shouldBe Set("foo", "bar", "baz") 41 | } 42 | 43 | "TagsSerialization" should "be bijective" in { 44 | val tags: Set[String] = Set("foo", "bar", "baz") 45 | decodeTags(encodeTags(tags, ","), ",") shouldBe tags 46 | } 47 | } 48 | -------------------------------------------------------------------------------- /core/src/test/scala/akka/persistence/postgres/query/AllPersistenceIdsTest.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2014 - 2019 Dennis Vriend 3 | * Copyright (C) 2019 - 2020 Lightbend Inc. 4 | */ 5 | 6 | package akka.persistence.postgres.query 7 | 8 | import akka.persistence.postgres.util.Schema.{ NestedPartitions, Partitioned, Plain, SchemaType } 9 | 10 | import scala.concurrent.duration._ 11 | 12 | abstract class AllPersistenceIdsTest(val schemaType: SchemaType) extends QueryTestSpec(schemaType.configName) { 13 | it should "not terminate the stream when there are not pids" in withActorSystem { implicit system => 14 | val journalOps = new ScalaPostgresReadJournalOperations(system) 15 | journalOps.withPersistenceIds() { tp => 16 | tp.request(1) 17 | tp.expectNoMessage(100.millis) 18 | tp.cancel() 19 | tp.expectNoMessage(100.millis) 20 | } 21 | } 22 | 23 | it should "find persistenceIds for actors" in withActorSystem { implicit system => 24 | val journalOps = new JavaDslPostgresReadJournalOperations(system) 25 | withTestActors() { (actor1, actor2, actor3) => 26 | journalOps.withPersistenceIds() { tp => 27 | tp.request(10) 28 | tp.expectNoMessage(100.millis) 29 | 30 | actor1 ! 1 31 | tp.expectNext(ExpectNextTimeout, "my-1") 32 | tp.expectNoMessage(100.millis) 33 | 34 | actor2 ! 1 35 | tp.expectNext(ExpectNextTimeout, "my-2") 36 | tp.expectNoMessage(100.millis) 37 | 38 | actor3 ! 1 39 | tp.expectNext(ExpectNextTimeout, "my-3") 40 | tp.expectNoMessage(100.millis) 41 | 42 | actor1 ! 1 43 | tp.expectNoMessage(100.millis) 44 | 45 | actor2 ! 1 46 | tp.expectNoMessage(100.millis) 47 | 48 | actor3 ! 1 49 | tp.expectNoMessage(100.millis) 50 | 51 | tp.cancel() 52 | tp.expectNoMessage(100.millis) 53 | } 54 | } 55 | } 56 | } 57 | 58 | class NestedPartitionsScalaAllPersistenceIdsTest extends AllPersistenceIdsTest(NestedPartitions) 59 | 60 | class PartitionedScalaAllPersistenceIdsTest extends AllPersistenceIdsTest(Partitioned) 61 | 62 | class PlainScalaAllPersistenceIdsTest extends AllPersistenceIdsTest(Plain) 63 | -------------------------------------------------------------------------------- /core/src/test/scala/akka/persistence/postgres/query/CurrentPersistenceIdsTest.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2014 - 2019 Dennis Vriend 3 | * Copyright (C) 2019 - 2020 Lightbend Inc. 4 | */ 5 | 6 | package akka.persistence.postgres.query 7 | 8 | import akka.persistence.postgres.util.Schema.{ NestedPartitions, Partitioned, Plain, SchemaType } 9 | 10 | abstract class CurrentPersistenceIdsTest(val schemaType: SchemaType) 11 | extends QueryTestSpec(s"${schemaType.resourceNamePrefix}-shared-db-application.conf") { 12 | it should "not find any persistenceIds for empty journal" in withActorSystem { implicit system => 13 | val journalOps = new ScalaPostgresReadJournalOperations(system) 14 | journalOps.withCurrentPersistenceIds() { tp => 15 | tp.request(1) 16 | tp.expectComplete() 17 | } 18 | } 19 | 20 | it should "find persistenceIds for actors" in withActorSystem { implicit system => 21 | val journalOps = new JavaDslPostgresReadJournalOperations(system) 22 | withTestActors() { (actor1, actor2, actor3) => 23 | actor1 ! 1 24 | actor2 ! 1 25 | actor3 ! 1 26 | 27 | eventually { 28 | journalOps.withCurrentPersistenceIds() { tp => 29 | tp.request(3) 30 | tp.expectNextUnordered("my-1", "my-2", "my-3") 31 | tp.expectComplete() 32 | } 33 | } 34 | } 35 | } 36 | } 37 | 38 | // Note: these tests use the shared-db configs, the test for all persistence ids use the regular db config 39 | 40 | class NestedPartitionsScalaCurrentPersistenceIdsTest extends CurrentPersistenceIdsTest(NestedPartitions) 41 | 42 | class PartitionedScalaCurrentPersistenceIdsTest extends CurrentPersistenceIdsTest(Partitioned) 43 | 44 | class PlainScalaCurrentPersistenceIdsTest extends CurrentPersistenceIdsTest(Plain) 45 | -------------------------------------------------------------------------------- /core/src/test/scala/akka/persistence/postgres/query/TaggingEventAdapter.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2014 - 2019 Dennis Vriend 3 | * Copyright (C) 2019 - 2020 Lightbend Inc. 4 | */ 5 | 6 | package akka.persistence.postgres.query 7 | 8 | import akka.persistence.postgres.query.TaggingEventAdapter.TagEvent 9 | import akka.persistence.journal.{ Tagged, WriteEventAdapter } 10 | 11 | object TaggingEventAdapter { 12 | case class TagEvent(payload: Any, tags: Set[String]) 13 | } 14 | 15 | /** 16 | * The TaggingEventAdapter will instruct persistence 17 | * to tag the received event. 18 | */ 19 | class TaggingEventAdapter extends WriteEventAdapter { 20 | override def manifest(event: Any): String = "" 21 | 22 | override def toJournal(event: Any): Any = event match { 23 | case TagEvent(payload, tags) => 24 | Tagged(payload, tags) 25 | case _ => event 26 | } 27 | } 28 | -------------------------------------------------------------------------------- /core/src/test/scala/akka/persistence/postgres/query/dao/ReadJournalMetadataQueriesTest.scala: -------------------------------------------------------------------------------- 1 | package akka.persistence.postgres.query.dao 2 | 3 | import akka.persistence.postgres.journal.dao.JournalMetadataTable 4 | import akka.persistence.postgres.util.BaseQueryTest 5 | 6 | class ReadJournalMetadataQueriesTest extends BaseQueryTest { 7 | it should "create SQL query for minAndMaxOrderingForPersistenceId" in withReadJournalMetadataQueries { queries => 8 | queries.minAndMaxOrderingForPersistenceId( 9 | "aaa") shouldBeSQL """select "min_ordering", "max_ordering" from "journal_metadata" where "persistence_id" = ? limit 1""" 10 | } 11 | 12 | private def withReadJournalMetadataQueries(f: ReadJournalMetadataQueries => Unit): Unit = { 13 | withActorSystem { implicit system => 14 | f(new ReadJournalMetadataQueries(JournalMetadataTable(readJournalConfig.journalMetadataTableConfiguration))) 15 | } 16 | } 17 | } 18 | -------------------------------------------------------------------------------- /core/src/test/scala/akka/persistence/postgres/query/dao/ReadJournalQueriesTest.scala: -------------------------------------------------------------------------------- 1 | package akka.persistence.postgres.query.dao 2 | 3 | import akka.persistence.postgres.journal.dao.FlatJournalTable 4 | import akka.persistence.postgres.util.BaseQueryTest 5 | 6 | class ReadJournalQueriesTest extends BaseQueryTest { 7 | 8 | it should "create SQL query for allPersistenceIdsDistinct" in withReadJournalQueries { queries => 9 | queries.allPersistenceIdsDistinct(23L) shouldBeSQL """select distinct "persistence_id" from "journal" limit ?""" 10 | } 11 | 12 | it should "create SQL query for messagesQuery" in withReadJournalQueries { queries => 13 | queries.messagesQuery( 14 | "p1", 15 | 1L, 16 | 4L, 17 | 5L) shouldBeSQL """select "ordering", "deleted", "persistence_id", "sequence_number", "message", "tags", "metadata" from "journal" where (("persistence_id" = ?) and ("sequence_number" >= ?)) and ("sequence_number" <= ?) order by "sequence_number" limit ?""" 18 | } 19 | 20 | it should "create SQL query for messagesMinOrderingBoundedQuery" in withReadJournalQueries { queries => 21 | queries.messagesMinOrderingBoundedQuery( 22 | "aaa", 23 | 1L, 24 | 4L, 25 | 5L, 26 | 1L) shouldBeSQL """select "ordering", "deleted", "persistence_id", "sequence_number", "message", "tags", "metadata" from "journal" where ((("persistence_id" = ?) and ("sequence_number" >= ?)) and ("sequence_number" <= ?)) and ("ordering" >= ?) order by "sequence_number" limit ?""" 27 | } 28 | 29 | it should "create SQL query for eventsByTag" in withReadJournalQueries { queries => 30 | queries.eventsByTag( 31 | List(11), 32 | 23L, 33 | 25L) shouldBeSQL """select "ordering", "deleted", "persistence_id", "sequence_number", "message", "tags", "metadata" from "journal" where ("tags" @> ?) and (("ordering" > ?) and ("ordering" <= ?)) order by "ordering"""" 34 | } 35 | 36 | it should "create SQL query for journalSequenceQuery" in withReadJournalQueries { queries => 37 | queries.orderingByOrdering( 38 | 11L, 39 | 23L) shouldBeSQL """select "ordering" from "journal" where "ordering" > ? order by "ordering" limit ?""" 40 | } 41 | 42 | it should "create SQL query for maxJournalSequenceQuery" in withReadJournalQueries { queries => 43 | queries.maxOrdering shouldBeSQL """select max("ordering") from "journal"""" 44 | } 45 | 46 | private def withReadJournalQueries(f: ReadJournalQueries => Unit): Unit = { 47 | withActorSystem { implicit system => 48 | f( 49 | new ReadJournalQueries( 50 | FlatJournalTable(readJournalConfig.journalTableConfiguration), 51 | readJournalConfig.includeDeleted)) 52 | } 53 | } 54 | } 55 | -------------------------------------------------------------------------------- /core/src/test/scala/akka/persistence/postgres/query/dao/ReadJournalTablesTest.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2014 - 2019 Dennis Vriend 3 | * Copyright (C) 2019 - 2020 Lightbend Inc. 4 | */ 5 | 6 | package akka.persistence.postgres.query.dao 7 | 8 | import akka.persistence.postgres.TablesTestSpec 9 | import akka.persistence.postgres.journal.dao.{ FlatJournalTable, NestedPartitionsJournalTable, PartitionedJournalTable } 10 | 11 | class ReadJournalTablesTest extends TablesTestSpec { 12 | val readJournalTableConfiguration = readJournalConfig.journalTableConfiguration 13 | 14 | for { 15 | (journalName, journalTable) <- List( 16 | ("FlatJournalTable", FlatJournalTable(readJournalTableConfiguration)), 17 | ("PartitionedJournalTable", PartitionedJournalTable(readJournalTableConfiguration)), 18 | ("NestedPartitionsJournalTable", NestedPartitionsJournalTable(readJournalTableConfiguration))) 19 | } { 20 | s"Read $journalName" should "be configured with a schema name" in { 21 | journalTable.baseTableRow.schemaName shouldBe readJournalTableConfiguration.schemaName 22 | } 23 | 24 | it should "be configured with a table name" in { 25 | journalTable.baseTableRow.tableName shouldBe readJournalTableConfiguration.tableName 26 | } 27 | 28 | it should "be configured with column names" in { 29 | val colName = toColumnName(readJournalTableConfiguration.tableName)(_) 30 | journalTable.baseTableRow.persistenceId.toString shouldBe colName( 31 | readJournalTableConfiguration.columnNames.persistenceId) 32 | journalTable.baseTableRow.sequenceNumber.toString shouldBe colName( 33 | readJournalTableConfiguration.columnNames.sequenceNumber) 34 | journalTable.baseTableRow.tags.toString shouldBe colName(readJournalTableConfiguration.columnNames.tags) 35 | } 36 | } 37 | } 38 | -------------------------------------------------------------------------------- /core/src/test/scala/akka/persistence/postgres/query/dao/TestProbeReadJournalDao.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2014 - 2019 Dennis Vriend 3 | * Copyright (C) 2019 - 2020 Lightbend Inc. 4 | */ 5 | 6 | package akka.persistence.postgres.query.dao 7 | 8 | import akka.NotUsed 9 | import akka.actor.Scheduler 10 | import akka.pattern.ask 11 | import akka.persistence.PersistentRepr 12 | import akka.persistence.postgres.query.dao.TestProbeReadJournalDao.JournalSequence 13 | import akka.stream.scaladsl.Source 14 | import akka.testkit.TestProbe 15 | import akka.util.Timeout 16 | 17 | import scala.concurrent.Future 18 | import scala.concurrent.duration._ 19 | import scala.util.Try 20 | 21 | object TestProbeReadJournalDao { 22 | case class JournalSequence(offset: Long, limit: Long) 23 | } 24 | 25 | /** 26 | * Read journal dao where the journalSequence query is backed by a testprobe 27 | */ 28 | class TestProbeReadJournalDao(val probe: TestProbe) extends ReadJournalDao { 29 | // Since the testprobe is instrumented by the test, it should respond very fast 30 | implicit val askTimeout: Timeout = Timeout(100.millis) 31 | 32 | /** 33 | * Returns distinct stream of persistenceIds 34 | */ 35 | override def allPersistenceIdsSource(max: Long): Source[String, NotUsed] = ??? 36 | 37 | /** 38 | * Returns a Source of bytes for certain tag from an offset. The result is sorted by 39 | * created time asc thus the offset is relative to the creation time 40 | */ 41 | override def eventsByTag( 42 | tag: String, 43 | offset: Long, 44 | maxOffset: Long, 45 | max: Long): Source[Try[(PersistentRepr, Long)], NotUsed] = ??? 46 | 47 | /** 48 | * Returns a Source of bytes for a certain persistenceId 49 | */ 50 | override def messages( 51 | persistenceId: String, 52 | fromSequenceNr: Long, 53 | toSequenceNr: Long, 54 | max: Long): Source[Try[(PersistentRepr, Long)], NotUsed] = ??? 55 | 56 | override def messagesWithBatch( 57 | persistenceId: String, 58 | fromSequenceNr: Long, 59 | toSequenceNr: Long, 60 | batchSize: Int, 61 | refreshInterval: Option[(FiniteDuration, Scheduler)]): Source[Try[(PersistentRepr, Long)], NotUsed] = ??? 62 | 63 | /** 64 | * @param offset Minimum value to retrieve 65 | * @param limit Maximum number of values to retrieve 66 | * @return A Source of journal event sequence numbers (corresponding to the Ordering column) 67 | */ 68 | override def journalSequence(offset: Long, limit: Long): Source[Long, NotUsed] = { 69 | val f = probe.ref.ask(JournalSequence(offset, limit)).mapTo[scala.collection.immutable.Seq[Long]] 70 | Source.future(f).mapConcat(identity) 71 | } 72 | 73 | /** 74 | * @return The value of the maximum (ordering) id in the journal 75 | */ 76 | override def maxJournalSequence(): Future[Long] = Future.successful(0) 77 | 78 | } 79 | -------------------------------------------------------------------------------- /core/src/test/scala/akka/persistence/postgres/snapshot/PostgresSnapshotStoreSpec.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2014 - 2019 Dennis Vriend 3 | * Copyright (C) 2019 - 2020 Lightbend Inc. 4 | */ 5 | 6 | package akka.persistence.postgres.snapshot 7 | 8 | import akka.persistence.postgres.config._ 9 | import akka.persistence.postgres.db.SlickDatabase 10 | import akka.persistence.postgres.util.Schema._ 11 | import akka.persistence.postgres.util.{ClasspathResources, DropCreate} 12 | import akka.persistence.snapshot.SnapshotStoreSpec 13 | import com.typesafe.config.ConfigFactory 14 | import org.scalatest.BeforeAndAfterAll 15 | import org.scalatest.concurrent.ScalaFutures 16 | 17 | import scala.concurrent.ExecutionContextExecutor 18 | import scala.concurrent.duration._ 19 | 20 | abstract class PostgresSnapshotStoreSpec(schemaType: SchemaType) 21 | extends SnapshotStoreSpec(ConfigFactory.load(schemaType.configName)) 22 | with BeforeAndAfterAll 23 | with ScalaFutures 24 | with ClasspathResources 25 | with DropCreate { 26 | implicit val pc: PatienceConfig = PatienceConfig(timeout = 10.seconds) 27 | 28 | implicit lazy val ec: ExecutionContextExecutor = system.dispatcher 29 | 30 | lazy val cfg = system.settings.config.getConfig("postgres-journal") 31 | 32 | lazy val journalConfig = new JournalConfig(cfg) 33 | 34 | lazy val db = SlickDatabase.database(cfg, new SlickConfiguration(cfg.getConfig("slick")), "slick.db") 35 | 36 | override def beforeAll(): Unit = { 37 | dropCreate(schemaType) 38 | super.beforeAll() 39 | } 40 | 41 | override def afterAll(): Unit = { 42 | db.close() 43 | } 44 | } 45 | 46 | class PlainSnapshotStoreSpec extends PostgresSnapshotStoreSpec(Plain) 47 | -------------------------------------------------------------------------------- /core/src/test/scala/akka/persistence/postgres/snapshot/dao/ByteArraySnapshotSerializerTest.scala: -------------------------------------------------------------------------------- 1 | package akka.persistence.postgres.snapshot.dao 2 | 3 | import java.time.{ LocalDateTime, ZoneOffset } 4 | 5 | import akka.persistence.SnapshotMetadata 6 | import akka.persistence.postgres.SharedActorSystemTestSpec 7 | import akka.persistence.postgres.snapshot.dao.SnapshotTables.SnapshotRow 8 | import akka.serialization.Serializers 9 | import io.circe.Json 10 | import org.scalatest.TryValues 11 | 12 | class ByteArraySnapshotSerializerTest extends SharedActorSystemTestSpec with TryValues { 13 | 14 | val serializer = new ByteArraySnapshotSerializer(serialization) 15 | val fakeSnapshot = "fake snapshot" 16 | val payloadSer = serialization.serializerFor(fakeSnapshot.getClass) 17 | val serId = payloadSer.identifier 18 | val serManifest = Serializers.manifestFor(payloadSer, fakeSnapshot) 19 | 20 | it should "serialize snapshot and its metadata" in { 21 | val timestamp = LocalDateTime.now().toEpochSecond(ZoneOffset.UTC) 22 | val result = serializer.serialize(SnapshotMetadata("per-1", 42, timestamp), fakeSnapshot) 23 | val row = result.get 24 | row.persistenceId should equal("per-1") 25 | row.sequenceNumber shouldBe 42 26 | row.created shouldBe timestamp 27 | row.metadata should equal { 28 | Json.obj( 29 | // serialization manifest for String should be blank and omitted 30 | "sid" -> Json.fromInt(serId)) 31 | } 32 | } 33 | 34 | { 35 | 36 | val serializedPayload = { 37 | val timestamp = LocalDateTime.now().toEpochSecond(ZoneOffset.UTC) 38 | val row = serializer.serialize(SnapshotMetadata("per-1", 42, timestamp), fakeSnapshot).get 39 | row.snapshot 40 | } 41 | 42 | it should "deserialize snapshot and metadata" in { 43 | val timestamp = LocalDateTime.now().toEpochSecond(ZoneOffset.UTC) 44 | val meta = Json.obj("sid" -> Json.fromInt(serId)) 45 | val row = SnapshotRow("per-1", 42, timestamp, serializedPayload, meta) 46 | val (metadata, snapshot) = serializer.deserialize(row).get 47 | snapshot should equal(fakeSnapshot) 48 | metadata should equal(SnapshotMetadata("per-1", 42, timestamp)) 49 | } 50 | 51 | it should "deserialize metadata with legacy long keys" in { 52 | val timestamp = LocalDateTime.now().toEpochSecond(ZoneOffset.UTC) 53 | val meta = Json.obj("serId" -> Json.fromInt(serId), "serManifest" -> Json.fromString("")) 54 | val row = SnapshotRow("per-1", 42, timestamp, serializedPayload, meta) 55 | val (metadata, _) = serializer.deserialize(row).get 56 | metadata should equal(SnapshotMetadata("per-1", 42, timestamp)) 57 | } 58 | 59 | it should "deserialize metadata with mixed legacy long & new short keys - short keys takes precedence" in { 60 | val timestamp = LocalDateTime.now().toEpochSecond(ZoneOffset.UTC) 61 | val meta = Json.obj( 62 | "sid" -> Json.fromInt(serId), 63 | "serId" -> Json.fromInt(-1), 64 | "serManifest" -> Json.fromString("this will be ignored")) 65 | val row = SnapshotRow("per-1", 42, timestamp, serializedPayload, meta) 66 | val (metadata, _) = serializer.deserialize(row).get 67 | metadata should equal(SnapshotMetadata("per-1", 42, timestamp)) 68 | } 69 | } 70 | 71 | } 72 | -------------------------------------------------------------------------------- /core/src/test/scala/akka/persistence/postgres/snapshot/dao/SnapshotTablesTest.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2014 - 2019 Dennis Vriend 3 | * Copyright (C) 2019 - 2020 Lightbend Inc. 4 | */ 5 | 6 | package akka.persistence.postgres.snapshot.dao 7 | 8 | import akka.persistence.postgres.TablesTestSpec 9 | 10 | class SnapshotTablesTest extends TablesTestSpec { 11 | val snapshotTableConfiguration = snapshotConfig.snapshotTableConfiguration 12 | object TestByteASnapshotTables extends SnapshotTables { 13 | override val snapshotTableCfg = snapshotTableConfiguration 14 | } 15 | 16 | "SnapshotTable" should "be configured with a schema name" in { 17 | TestByteASnapshotTables.SnapshotTable.baseTableRow.schemaName shouldBe snapshotTableConfiguration.schemaName 18 | } 19 | 20 | it should "be configured with a table name" in { 21 | TestByteASnapshotTables.SnapshotTable.baseTableRow.tableName shouldBe snapshotTableConfiguration.tableName 22 | } 23 | 24 | it should "be configured with column names" in { 25 | val colName = toColumnName(snapshotTableConfiguration.tableName)(_) 26 | TestByteASnapshotTables.SnapshotTable.baseTableRow.persistenceId.toString shouldBe colName( 27 | snapshotTableConfiguration.columnNames.persistenceId) 28 | TestByteASnapshotTables.SnapshotTable.baseTableRow.sequenceNumber.toString shouldBe colName( 29 | snapshotTableConfiguration.columnNames.sequenceNumber) 30 | TestByteASnapshotTables.SnapshotTable.baseTableRow.created.toString shouldBe colName( 31 | snapshotTableConfiguration.columnNames.created) 32 | TestByteASnapshotTables.SnapshotTable.baseTableRow.snapshot.toString shouldBe colName( 33 | snapshotTableConfiguration.columnNames.snapshot) 34 | } 35 | } 36 | -------------------------------------------------------------------------------- /core/src/test/scala/akka/persistence/postgres/tag/SimpleTagDaoSpec.scala: -------------------------------------------------------------------------------- 1 | package akka.persistence.postgres.tag 2 | 3 | import java.util.concurrent.ThreadLocalRandom 4 | 5 | import akka.persistence.postgres.config.{ SlickConfiguration, TagsTableConfiguration } 6 | import akka.persistence.postgres.db.SlickDatabase 7 | import com.typesafe.config.{ Config, ConfigFactory } 8 | import org.scalatest.concurrent.{ IntegrationPatience, ScalaFutures } 9 | import org.scalatest.flatspec.AnyFlatSpecLike 10 | import org.scalatest.matchers.should.Matchers 11 | import org.scalatest.{ BeforeAndAfter, BeforeAndAfterAll, OptionValues } 12 | import slick.jdbc 13 | 14 | import scala.concurrent.ExecutionContext 15 | 16 | class SimpleTagDaoSpec 17 | extends AnyFlatSpecLike 18 | with Matchers 19 | with ScalaFutures 20 | with OptionValues 21 | with BeforeAndAfterAll 22 | with BeforeAndAfter 23 | with IntegrationPatience { 24 | 25 | import akka.persistence.postgres.db.ExtendedPostgresProfile.api._ 26 | 27 | private implicit val global: ExecutionContext = ExecutionContext.global 28 | 29 | before { 30 | withDB { db => 31 | db.run(sqlu"""TRUNCATE tags""".transactionally).futureValue 32 | } 33 | } 34 | 35 | it should "return id of existing tag" in withDB { db => 36 | // given 37 | val dao = new SimpleTagDao(db, tagTableConfig) 38 | val tagName = "predefined" 39 | db.run(sqlu"""INSERT INTO tags (name) VALUES ('#$tagName')""".transactionally).futureValue 40 | 41 | // when 42 | val returnedTagId = dao.find(tagName).futureValue 43 | // then 44 | returnedTagId shouldBe defined 45 | } 46 | 47 | it should "return None if tag cannot be found" in withDao { dao => 48 | // given 49 | val tagName = "non-existing" 50 | 51 | // when 52 | val returnedTagId = dao.find(tagName).futureValue 53 | // then 54 | returnedTagId should not be defined 55 | } 56 | 57 | it should "return id of created tag" in withDao { dao => 58 | // given 59 | val tagName = generateTagName() 60 | val tagId = dao.insert(tagName).futureValue 61 | 62 | // when 63 | val returnedTagId = dao.find(tagName).futureValue 64 | // then 65 | returnedTagId.value should equal(tagId) 66 | } 67 | 68 | private def withDao(f: TagDao => Unit): Unit = 69 | withDB { db => 70 | val dao = new SimpleTagDao(db, tagTableConfig) 71 | f(dao) 72 | } 73 | 74 | lazy val journalConfig: Config = { 75 | val globalConfig = ConfigFactory.load("plain-application.conf") 76 | globalConfig.getConfig("postgres-journal") 77 | } 78 | lazy val slickConfig: SlickConfiguration = new SlickConfiguration(journalConfig.getConfig("slick")) 79 | lazy val tagTableConfig: TagsTableConfiguration = new TagsTableConfiguration(ConfigFactory.empty) 80 | 81 | private def withDB(f: jdbc.JdbcBackend.Database => Unit): Unit = { 82 | lazy val db = SlickDatabase.database(journalConfig, slickConfig, "slick.db") 83 | try { 84 | f(db) 85 | } finally { 86 | db.close() 87 | } 88 | } 89 | 90 | private def generateTagName()(implicit position: org.scalactic.source.Position): String = 91 | s"dao-spec-${position.lineNumber}-${ThreadLocalRandom.current().nextInt()}" 92 | } 93 | -------------------------------------------------------------------------------- /core/src/test/scala/akka/persistence/postgres/util/BaseQueryTest.scala: -------------------------------------------------------------------------------- 1 | package akka.persistence.postgres.util 2 | 3 | import akka.persistence.postgres.SingleActorSystemPerTestSpec 4 | import akka.persistence.postgres.db.ExtendedPostgresProfile 5 | import slick.lifted.RunnableCompiled 6 | 7 | class BaseQueryTest extends SingleActorSystemPerTestSpec { 8 | import akka.persistence.postgres.db.ExtendedPostgresProfile.api._ 9 | implicit class SQLStringMatcherRunnableCompiled(under: RunnableCompiled[_, _]) { 10 | def toSQL: String = { 11 | under.result.toSQL 12 | } 13 | 14 | def shouldBeSQL(expected: String): Unit = { 15 | under.toSQL shouldBe expected 16 | } 17 | } 18 | implicit class SQLStringMatcherProfileAction(under: ExtendedPostgresProfile.ProfileAction[_, _, _]) { 19 | 20 | def toSQL: String = { 21 | under.statements.toList.mkString(" ") 22 | } 23 | 24 | def shouldBeSQL(expected: String): Unit = { 25 | under.toSQL shouldBe expected 26 | } 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /core/src/test/scala/akka/persistence/postgres/util/ClasspathResources.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2014 - 2019 Dennis Vriend 3 | * Copyright (C) 2019 - 2020 Lightbend Inc. 4 | */ 5 | 6 | package akka.persistence.postgres.util 7 | 8 | import java.io.InputStream 9 | 10 | import scala.io.{ Source => ScalaIOSource } 11 | 12 | object ClasspathResources extends ClasspathResources 13 | 14 | trait ClasspathResources { 15 | def streamToString(is: InputStream): String = 16 | ScalaIOSource.fromInputStream(is).mkString 17 | 18 | def fromClasspathAsString(fileName: String): String = 19 | streamToString(fromClasspathAsStream(fileName)) 20 | 21 | def fromClasspathAsStream(fileName: String): InputStream = 22 | getClass.getClassLoader.getResourceAsStream(fileName) 23 | } 24 | -------------------------------------------------------------------------------- /core/src/test/scala/akka/persistence/postgres/util/DropCreate.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2014 - 2019 Dennis Vriend 3 | * Copyright (C) 2019 - 2020 Lightbend Inc. 4 | */ 5 | 6 | package akka.persistence.postgres.util 7 | 8 | import java.sql.Statement 9 | import akka.persistence.postgres.config.{ JournalMetadataTableConfiguration, JournalTableConfiguration } 10 | import akka.persistence.postgres.journal.dao.{ 11 | FlatJournalTable, 12 | JournalMetadataTable, 13 | JournalTable, 14 | NestedPartitionsJournalTable, 15 | PartitionedJournalTable 16 | } 17 | import akka.persistence.postgres.util.Schema.SchemaType 18 | import slick.jdbc.JdbcBackend.{ Database, Session } 19 | import slick.lifted.TableQuery 20 | 21 | object Schema { 22 | 23 | sealed trait SchemaType { 24 | def resourceNamePrefix: String 25 | lazy val schema: String = s"schema/postgres/$resourceNamePrefix-schema.sql" 26 | lazy val configName: String = s"${resourceNamePrefix}-application.conf" 27 | def table(journalTableCfg: JournalTableConfiguration): TableQuery[JournalTable] 28 | def metadataTable(journalMetadataTableCfg: JournalMetadataTableConfiguration) 29 | : TableQuery[JournalMetadataTable] = JournalMetadataTable.apply(journalMetadataTableCfg) 30 | } 31 | 32 | case object Plain extends SchemaType { 33 | override val resourceNamePrefix: String = "plain" 34 | override def table(journalTableCfg: JournalTableConfiguration): TableQuery[JournalTable] = 35 | FlatJournalTable(journalTableCfg) 36 | } 37 | 38 | case object NestedPartitions extends SchemaType { 39 | override val resourceNamePrefix: String = "nested-partitions" 40 | override def table(journalTableCfg: JournalTableConfiguration): TableQuery[JournalTable] = 41 | NestedPartitionsJournalTable(journalTableCfg) 42 | } 43 | 44 | case object Partitioned extends SchemaType { 45 | override val resourceNamePrefix: String = "partitioned" 46 | override def table(journalTableCfg: JournalTableConfiguration): TableQuery[JournalTable] = 47 | PartitionedJournalTable(journalTableCfg) 48 | } 49 | } 50 | 51 | trait DropCreate extends ClasspathResources { 52 | def db: Database 53 | 54 | def dropCreate(schemaType: SchemaType): Unit = 55 | create(schemaType.schema) 56 | 57 | def create(schema: String): Unit = 58 | for { 59 | schema <- Option(fromClasspathAsString(schema)) 60 | } withStatement { stmt => 61 | stmt.executeUpdate(schema) 62 | } 63 | 64 | def withDatabase[A](f: Database => A): A = 65 | f(db) 66 | 67 | def withSession[A](f: Session => A): A = { 68 | withDatabase { db => 69 | val session = db.createSession() 70 | try f(session) 71 | finally session.close() 72 | } 73 | } 74 | 75 | def withStatement[A](f: Statement => A): A = 76 | withSession(session => session.withStatement()(f)) 77 | } 78 | -------------------------------------------------------------------------------- /docs/.gitignore: -------------------------------------------------------------------------------- 1 | _site 2 | .sass-cache 3 | .jekyll-metadata 4 | -------------------------------------------------------------------------------- /docs/404.html: -------------------------------------------------------------------------------- 1 | --- 2 | layout: default 3 | --- 4 | 5 | 18 | 19 |
20 |

404

21 | 22 |

Page not found :(

23 |

The requested page could not be found.

24 |
25 | -------------------------------------------------------------------------------- /docs/Gemfile: -------------------------------------------------------------------------------- 1 | source "https://rubygems.org" 2 | 3 | # Hello! This is where you manage which Jekyll version is used to run. 4 | # When you want to use a different version, change it below, save the 5 | # file and run `bundle install`. Run Jekyll with `bundle exec`, like so: 6 | # 7 | # bundle exec jekyll serve 8 | # 9 | # This will help ensure the proper Jekyll version is running. 10 | # Happy Jekylling! 11 | 12 | # This is the default theme for new Jekyll sites. You may change this to anything you like. 13 | gem "minima", "~> 2.5.1" 14 | 15 | # If you want to use GitHub Pages, remove the "gem "jekyll"" above and 16 | # uncomment the line below. To upgrade, run `bundle update github-pages`. 17 | gem "github-pages", group: :jekyll_plugins 18 | gem "just-the-docs" 19 | 20 | # If you have any plugins, put them here! 21 | group :jekyll_plugins do 22 | gem "jekyll-feed", "~> 0.15.1" 23 | end 24 | 25 | # Windows does not include zoneinfo files, so bundle the tzinfo-data gem 26 | # and associated library. 27 | install_if -> { RUBY_PLATFORM =~ %r!mingw|mswin|java! } do 28 | gem "tzinfo", "~> 1.2.10" 29 | gem "tzinfo-data" 30 | end 31 | 32 | # Performance-booster for watching directories on Windows 33 | gem "wdm", "~> 0.1.0", :install_if => Gem.win_platform? 34 | -------------------------------------------------------------------------------- /docs/_config.yml: -------------------------------------------------------------------------------- 1 | # Welcome to Jekyll! 2 | # 3 | # This config file is meant for settings that affect your whole blog, values 4 | # which you are expected to set up once and rarely edit after that. If you find 5 | # yourself editing this file very often, consider using Jekyll's data files 6 | # feature for the data you need to update frequently. 7 | # 8 | # For technical reasons, this file is *NOT* reloaded automatically when you use 9 | # 'bundle exec jekyll serve'. If you change this file, please restart the server process. 10 | 11 | # Site settings 12 | # These are used to personalize your new site. If you look in the HTML files, 13 | # you will see them accessed via {{ site.title }}, {{ site.email }}, and so on. 14 | # You can create any custom variable you would like, and they will be accessible 15 | # in the templates via {{ site.myvariable }}. 16 | title: Akka Persistence Postgres 17 | description: >- # this means to ignore newlines until "baseurl:" 18 | The Akka Persistence Postgres plugin allows for using PostgreSQL 11 and Amazon Aurora databases as backend for Akka Persistence and Akka Persistence Query. 19 | The main goal is to keep index size and memory consumption on a moderate level while being able to cope with an increasing data volume. 20 | baseurl: "" # the subpath of your site, e.g. /blog 21 | url: "https://swissborg.github.io/akka-persistence-postgres" 22 | repo: "https://github.com/SwissBorg/akka-persistence-postgres/tree/master" 23 | 24 | # Build settings 25 | markdown: kramdown 26 | remote_theme: mkubala/just-the-docs 27 | plugins: 28 | - jekyll-mentions 29 | - jekyll-relative-links 30 | - jekyll-sitemap 31 | - rouge 32 | - jemoji 33 | 34 | # Exclude from processing. 35 | # The following items will not be processed, by default. Create a custom list 36 | # to override the default setting. 37 | # exclude: 38 | # - Gemfile 39 | # - Gemfile.lock 40 | # - node_modules 41 | # - vendor/bundle/ 42 | # - vendor/cache/ 43 | # - vendor/gems/ 44 | # - vendor/ruby/ 45 | -------------------------------------------------------------------------------- /docs/assets/partitioning/detaching.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SwissBorg/akka-persistence-postgres/55e8618c2d28c73f9e7126fdefa224b798d2e5a4/docs/assets/partitioning/detaching.png -------------------------------------------------------------------------------- /docs/assets/partitioning/flat-journal.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SwissBorg/akka-persistence-postgres/55e8618c2d28c73f9e7126fdefa224b798d2e5a4/docs/assets/partitioning/flat-journal.png -------------------------------------------------------------------------------- /docs/assets/partitioning/nested-partitions-journal.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SwissBorg/akka-persistence-postgres/55e8618c2d28c73f9e7126fdefa224b798d2e5a4/docs/assets/partitioning/nested-partitions-journal.png -------------------------------------------------------------------------------- /docs/assets/partitioning/partitioned-journal.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SwissBorg/akka-persistence-postgres/55e8618c2d28c73f9e7126fdefa224b798d2e5a4/docs/assets/partitioning/partitioned-journal.png -------------------------------------------------------------------------------- /docs/assets/project-logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SwissBorg/akka-persistence-postgres/55e8618c2d28c73f9e7126fdefa224b798d2e5a4/docs/assets/project-logo.png -------------------------------------------------------------------------------- /docs/assets/softwaremill-logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SwissBorg/akka-persistence-postgres/55e8618c2d28c73f9e7126fdefa224b798d2e5a4/docs/assets/softwaremill-logo.png -------------------------------------------------------------------------------- /docs/assets/swissborg-logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SwissBorg/akka-persistence-postgres/55e8618c2d28c73f9e7126fdefa224b798d2e5a4/docs/assets/swissborg-logo.png -------------------------------------------------------------------------------- /docs/custom-dao.md: -------------------------------------------------------------------------------- 1 | --- 2 | layout: page 3 | title: Custom DAO Implementation 4 | permalink: /custom-dao 5 | nav_order: 40 6 | --- 7 | 8 | # Custom DAO Implementation 9 | 10 | The plugin supports loading a custom DAO for the journal and snapshot. You should implement a custom Data Access Object (DAO) if you wish to alter the default persistency strategy in 11 | any way, but wish to reuse all the logic that the plugin already has in place, eg. the Akka Persistence Query API. For example, the default persistency strategy that the plugin 12 | supports serializes journal and snapshot messages using a serializer of your choice and stores them as byte arrays in the database. 13 | 14 | By means of configuration in application.conf a DAO can be configured, below the default DAOs are shown: 15 | 16 | ```hocon 17 | postgres-journal { 18 | dao = "akka.persistence.postgres.journal.dao.FlatJournalDao" 19 | } 20 | 21 | postgres-snapshot-store { 22 | dao = "akka.persistence.postgres.snapshot.dao.ByteArraySnapshotDao" 23 | } 24 | 25 | postgres-read-journal { 26 | dao = "akka.persistence.postgres.query.dao.FlatReadJournalDao" 27 | } 28 | ``` 29 | 30 | Storing messages as byte arrays in blobs is not the only way to store information in a database. For example, you could store messages with full type information as a normal database rows, each event type having its own table. 31 | For example, implementing a Journal Log table that stores all persistenceId, sequenceNumber and event type discriminator field, and storing the event data in another table with full typing 32 | 33 | You only have to implement two interfaces `akka.persistence.postgres.journal.dao.JournalDao` and/or `akka.persistence.postgres.snapshot.dao.SnapshotDao`. 34 | 35 | For example, take a look at the following two custom DAOs: 36 | 37 | ```scala 38 | class MyCustomJournalDao(db: Database, journalConfig: JournalConfig, serialization: Serialization)(implicit ec: ExecutionContext, mat: Materializer) extends JournalDao { 39 | // snip 40 | } 41 | 42 | class MyCustomSnapshotDao(db: JdbcBackend#Database, snapshotConfig: SnapshotConfig, serialization: Serialization)(implicit ec: ExecutionContext, val mat: Materializer) extends SnapshotDao { 43 | // snip 44 | } 45 | ``` 46 | 47 | As you can see, the custom DAOs get a _Slick database_, the journal or snapshot _configuration_, an _akka.serialization.Serialization_, an _ExecutionContext_ and _Materializer_ injected after constructed. 48 | You should register the Fully Qualified Class Name in application.conf so that the custom DAOs will be used. 49 | 50 | For more information please review the two default implementations `akka.persistence.postgres.journal.dao.FlatJournalDao` and `akka.persistence.postgres.snapshot.dao.ByteArraySnapshotDao` or the demo custom DAO example from the [demo-akka-persistence-postgres](https://github.com/mkubala/demo-akka-persistence-postgres) site. 51 | 52 | > :warning: The APIs for custom DAOs are not guaranteed to be binary backwards compatible between major versions of the plugin. 53 | > There may also be source incompatible changes of the APIs for customer DAOs if new capabilities must be added to the traits. 54 | -------------------------------------------------------------------------------- /docs/favicon.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SwissBorg/akka-persistence-postgres/55e8618c2d28c73f9e7126fdefa224b798d2e5a4/docs/favicon.ico -------------------------------------------------------------------------------- /docs/index.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: About the plugin 3 | nav_order: 0 4 | --- 5 | 6 | ![Akka Persistence Postgres]({{ 'assets/project-logo.png' | absolute_url }}) 7 | 8 | The Akka Persistence Postgres plugin allows for using Postgres database as backend for [Akka Persistence](https://doc.akka.io/docs/akka/current/persistence.html) and [Akka Persistence Query](https://doc.akka.io/docs/akka/current/persistence-query.html). 9 | 10 | akka-persistence-postgres writes journal and snapshot entries to a configured PostgreSQL store. It implements the full akka-persistence-query API and is therefore very useful for implementing DDD-style application models using Akka and Scala for creating reactive applications. 11 | 12 | It’s been originally created as a fork of [Akka Persistence JDBC plugin](https://github.com/akka/akka-persistence-jdbc) 4.0.0, focused on PostgreSQL features such as partitions, arrays, BRIN indexes and others. Many parts of this doc have been adopted from the original [project page](https://doc.akka.io/docs/akka-persistence-jdbc/4.0.0/index.html). 13 | 14 | The main goal is to keep index size and memory consumption on a moderate level while being able to cope with an increasing data volume. 15 | 16 | ## Installation 17 | 18 | To use `akka-persistence-postgres` in your SBT project, add the following to your `build.sbt`: 19 | 20 | ```scala 21 | libraryDependencies += "com.swisborg" %% "akka-persistence-postgres" % "0.6.0" 22 | ``` 23 | 24 | For a maven project add: 25 | ```xml 26 | 27 | com.swisborg 28 | akka-persistence-postgres_2.13 29 | 0.6.0 30 | 31 | ``` 32 | to your `pom.xml`. 33 | 34 | > :warning: Since Akka [does not allow mixed versions](https://doc.akka.io/docs/akka/current/common/binary-compatibility-rules.html#mixed-versioning-is-not-allowed) in a project, Akka dependencies are marked a `Provided`. This means that your application `libraryDependencies` needs to directly include Akka as a dependency. The minimal supported Akka version is 2.6.16. 35 | 36 | 37 | ## Source code 38 | 39 | Source code for this plugin can be found on [GitHub](https://github.com/SwissBorg/akka-persistence-postgres). 40 | 41 | ## Contribution policy 42 | 43 | Contributions via GitHub pull requests are gladly accepted. Along with any pull requests, please state that the contribution is your original work and that you license the work to the project under the project's open source license. Whether or not you state this explicitly, by submitting any copyrighted material via pull request, email, or other means you agree to license the material under the project's open source license and warrant that you have the legal authority to do so. 44 | 45 | ## Contributors 46 | List of all contributors can be found on [GitHub](https://github.com/SwissBorg/akka-persistence-postgres/graphs/contributors). 47 | 48 | ## Sponsors 49 | 50 | Development and maintenance of akka-persistence-postgres is sponsored by: 51 | 52 | ![SoftwareMill]({{ 'assets/softwaremill-logo.png' | absolute_url }}) 53 | 54 | [SoftwareMill](https://softwaremill.com) is a software development and consulting company. We help clients scale their business through software. Our areas of expertise include backends, distributed systems, blockchain, machine learning and data analytics. 55 | 56 | ![SwissBorg]({{ 'assets/swissborg-logo.png' | absolute_url }}) 57 | 58 | [SwissBorg](https://swissborg.com) makes managing your crypto investment easy and helps control your wealth. 59 | 60 | ## License 61 | 62 | This source code is made available under the [Apache 2.0 License](https://www.apache.org/licenses/LICENSE-2.0). 63 | -------------------------------------------------------------------------------- /docs/serve.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | bundle exec jekyll serve 4 | -------------------------------------------------------------------------------- /docs/tagging.md: -------------------------------------------------------------------------------- 1 | --- 2 | layout: page 3 | title: Tagging events 4 | permalink: /tagging 5 | nav_order: 20 6 | --- 7 | 8 | # Tagging events 9 | 10 | To tag events you'll need to create an [Event Adapter](https://doc.akka.io/docs/akka/current/persistence.html#event-adapters) that will wrap the event in a [akka.persistence.journal.Tagged](https://doc.akka.io/api/akka/current/akka/persistence/journal/Tagged.html) class with the given tags. The `Tagged` class will instruct `akka-persistence-postgres` to tag the event with the given set of tags. 11 | 12 | The persistence plugin will __not__ store the `Tagged` class in the journal. It will strip the `tags` and `payload` from the `Tagged` class, and use the class only as an instruction to tag the event with the given tags and store the `payload` in the `message` field of the journal table. 13 | 14 | ```scala 15 | package com.swissborg.example 16 | 17 | import akka.persistence.journal.{ Tagged, WriteEventAdapter } 18 | import com.swissborg.example.Person.{ LastNameChanged, FirstNameChanged, PersonCreated } 19 | 20 | class TaggingEventAdapter extends WriteEventAdapter { 21 | override def manifest(event: Any): String = "" 22 | 23 | def withTag(event: Any, tag: String) = Tagged(event, Set(tag)) 24 | 25 | override def toJournal(event: Any): Any = event match { 26 | case _: PersonCreated => 27 | withTag(event, "person-created") 28 | case _: FirstNameChanged => 29 | withTag(event, "first-name-changed") 30 | case _: LastNameChanged => 31 | withTag(event, "last-name-changed") 32 | case _ => event 33 | } 34 | } 35 | ``` 36 | 37 | The `EventAdapter` must be registered by adding the following to the root of `application.conf` Please see the [demo-akka-persistence-postgres](https://github.com/mkubala/demo-akka-persistence-postgres) project for more information. 38 | 39 | ```bash 40 | postgres-journal { 41 | event-adapters { 42 | tagging = "com.swissborg.example.TaggingEventAdapter" 43 | } 44 | event-adapter-bindings { 45 | "com.swissborg.example.Person$PersonCreated" = tagging 46 | "com.swissborg.example.Person$FirstNameChanged" = tagging 47 | "com.swissborg.example.Person$LastNameChanged" = tagging 48 | } 49 | } 50 | ``` 51 | 52 | You can retrieve a subset of all events by specifying offset, or use `0L` to retrieve all events with a given tag. The offset corresponds to an ordered sequence number for the specific tag. Note that the corresponding offset of each event is provided in the `EventEnvelope`, which makes it possible to resume the stream at a later point from a given offset. 53 | 54 | In addition to the offset the `EventEnvelope` also provides `persistenceId` and `sequenceNr` for each event. The `sequenceNr` is the sequence number for the persistent actor with the `persistenceId` that persisted the event. The `persistenceId` + `sequenceNr` is an unique identifier for the event. 55 | 56 | The returned event stream contains only events that correspond to the given tag, and is ordered by the creation time of the events. The same stream elements (in same order) are returned for multiple executions of the same query. Deleted events are not deleted from the tagged event stream. 57 | -------------------------------------------------------------------------------- /migration/src/main/resources/reference.conf: -------------------------------------------------------------------------------- 1 | akka-persistence-postgres { 2 | migration { 3 | batchSize = 500 4 | } 5 | } 6 | -------------------------------------------------------------------------------- /migration/src/main/scala/akka/persistence/postgres/migration/PgSlickSupport.scala: -------------------------------------------------------------------------------- 1 | package akka.persistence.postgres.migration 2 | 3 | import io.circe.{ Json, Printer } 4 | import org.slf4j.{ Logger, LoggerFactory } 5 | import slick.jdbc.{ GetResult, SetParameter } 6 | 7 | abstract class PgSlickSupport { 8 | 9 | lazy val log: Logger = LoggerFactory.getLogger(this.getClass) 10 | 11 | implicit val GetByteArr: GetResult[Array[Byte]] = GetResult(_.nextBytes()) 12 | implicit val SetByteArr: SetParameter[Array[Byte]] = SetParameter((arr, pp) => pp.setBytes(arr)) 13 | implicit val SetJson: SetParameter[Json] = SetParameter((json, pp) => pp.setString(json.printWith(Printer.noSpaces))) 14 | 15 | } 16 | -------------------------------------------------------------------------------- /migration/src/main/scala/akka/persistence/postgres/migration/journal/OldJournalDeserializer.scala: -------------------------------------------------------------------------------- 1 | package akka.persistence.postgres.migration.journal 2 | 3 | import akka.persistence.PersistentRepr 4 | import akka.serialization.Serialization 5 | 6 | import scala.util.Try 7 | 8 | private[journal] class OldJournalDeserializer(serialization: Serialization) { 9 | 10 | def deserialize(message: Array[Byte]): Try[PersistentRepr] = 11 | serialization.deserialize(message, classOf[PersistentRepr]) 12 | 13 | } 14 | -------------------------------------------------------------------------------- /migration/src/main/scala/akka/persistence/postgres/migration/snapshot/OldSnapshotDeserializer.scala: -------------------------------------------------------------------------------- 1 | package akka.persistence.postgres.migration.snapshot 2 | 3 | import akka.persistence.serialization.Snapshot 4 | import akka.serialization.Serialization 5 | 6 | import scala.util.Try 7 | 8 | private[snapshot] class OldSnapshotDeserializer(serialization: Serialization) { 9 | 10 | def deserialize(rawSnapshot: Array[Byte]): Try[Any] = { 11 | serialization.deserialize(rawSnapshot, classOf[Snapshot]).map(_.data) 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /migration/src/main/scala/akka/persistence/postgres/migration/snapshot/SnapshotMigrationQueries.scala: -------------------------------------------------------------------------------- 1 | package akka.persistence.postgres.migration.snapshot 2 | 3 | import akka.persistence.postgres.config.SnapshotTableConfiguration 4 | import akka.persistence.postgres.db.ExtendedPostgresProfile.api._ 5 | import akka.persistence.postgres.snapshot.dao.SnapshotTables.SnapshotRow 6 | import io.circe.Json 7 | 8 | import scala.concurrent.ExecutionContext 9 | 10 | private[snapshot] class SnapshotMigrationQueries(snapshotTableCfg: SnapshotTableConfiguration, tempTableName: String) { 11 | 12 | def insertOrUpdate(rows: List[SnapshotRow])(implicit ec: ExecutionContext): DBIOAction[Int, NoStream, Effect.Write] = 13 | TempSnapshotTable.insertOrUpdateAll(rows.sortBy(_.created)).map(_ => rows.size) 14 | 15 | class TempSnapshotTable(_tableTag: Tag) 16 | extends Table[SnapshotRow](_tableTag, _schemaName = snapshotTableCfg.schemaName, _tableName = tempTableName) { 17 | def * = 18 | (persistenceId, sequenceNumber, created, snapshot, metadata) <> (SnapshotRow.tupled, SnapshotRow.unapply) 19 | 20 | val persistenceId: Rep[String] = 21 | column[String](snapshotTableCfg.columnNames.persistenceId, O.Length(255, varying = true)) 22 | val sequenceNumber: Rep[Long] = column[Long](snapshotTableCfg.columnNames.sequenceNumber) 23 | val created: Rep[Long] = column[Long](snapshotTableCfg.columnNames.created) 24 | val snapshot: Rep[Array[Byte]] = column[Array[Byte]](snapshotTableCfg.columnNames.snapshot) 25 | val metadata: Rep[Json] = column[Json](snapshotTableCfg.columnNames.metadata) 26 | val pk = primaryKey(s"${tableName}_pk", (persistenceId, sequenceNumber)) 27 | } 28 | 29 | lazy val TempSnapshotTable = new TableQuery(tag => new TempSnapshotTable(tag)) 30 | } 31 | -------------------------------------------------------------------------------- /migration/src/test/resources/base-migration.conf: -------------------------------------------------------------------------------- 1 | akka { 2 | stdout-loglevel = off // defaults to WARNING can be disabled with off. The stdout-loglevel is only in effect during system startup and shutdown 3 | log-dead-letters-during-shutdown = on 4 | loglevel = WARNING 5 | log-dead-letters = on 6 | log-config-on-start = off // Log the complete configuration at INFO level when the actor system is started 7 | 8 | loggers = ["akka.event.slf4j.Slf4jLogger"] 9 | logging-filter = "akka.event.slf4j.Slf4jLoggingFilter" 10 | 11 | persistence { 12 | journal.plugin = "postgres-journal" 13 | snapshot-store.plugin = "postgres-snapshot-store" 14 | } 15 | 16 | actor { 17 | serialize-messages = off // verify that messages are serializable 18 | 19 | debug { 20 | receive = on // log all messages sent to an actor if that actors receive method is a LoggingReceive 21 | autoreceive = on // log all special messages like Kill, PoisoffPill etc sent to all actors 22 | lifecycle = on // log all actor lifecycle events of all actors 23 | fsm = off // enable logging of all events, transitioffs and timers of FSM Actors that extend LoggingFSM 24 | event-stream = on // enable logging of subscriptions (subscribe/unsubscribe) on the ActorSystem.eventStream 25 | } 26 | 27 | serializers { 28 | jackson-cbor = "akka.serialization.jackson.JacksonCborSerializer" 29 | } 30 | } 31 | } 32 | 33 | slick { 34 | profile = "akka.persistence.postgres.db.ExtendedPostgresProfile$" 35 | db { 36 | host = "localhost" 37 | url = "jdbc:postgresql://"localhost":5432/docker?reWriteBatchedInserts=true" 38 | user = "docker" 39 | password = "docker" 40 | 41 | driver = "org.postgresql.Driver" 42 | } 43 | } 44 | 45 | postgres-journal { 46 | slick = ${slick} 47 | tables { 48 | journal { 49 | schemaName = "migration" 50 | tableName = "fancy_journal" 51 | columnNames = { 52 | tags = "j_tags" 53 | created = "j_created" 54 | deleted = "j_del" 55 | message = "j_msg" 56 | metadata = "j_meta" 57 | persistenceId = "j_per_id" 58 | sequenceNumber = "j_seq_num" 59 | ordering = "j_ord" 60 | } 61 | partitions { 62 | prefix = "fancy_j" 63 | size = 50 64 | } 65 | } 66 | journalMetadata { 67 | schemaName = "migration" 68 | tableName = "fancy_journal_metadata" 69 | columnNames = { 70 | persistenceId = "jm_per_id" 71 | maxSequenceNumber = "jm_max_seq_num" 72 | maxOrdering = "jm_max_ord" 73 | minOrdering = "jm_min_ord" 74 | } 75 | } 76 | tags { 77 | schemaName = "migration" 78 | tableName = "fancy_tags" 79 | columnNames { 80 | id = "t_id" 81 | name = "t_name" 82 | } 83 | } 84 | } 85 | } 86 | 87 | postgres-read-journal { 88 | slick = ${slick} 89 | tables = ${postgres-journal.tables} 90 | 91 | // We do not want to wait for JournalSequenceAcor 92 | journal-sequence-retrieval { 93 | batch-size = 10000 94 | max-tries = 1 95 | query-delay = 100 millis 96 | max-backoff-query-delay = 100 millis 97 | ask-timeout = 100 millis 98 | } 99 | 100 | } 101 | 102 | postgres-snapshot-store { 103 | slick = ${slick} 104 | tables { 105 | snapshot { 106 | schemaName = "migration" 107 | tableName = "fancy_snapshot" 108 | columnNames { 109 | persistenceId = "s_per_id" 110 | sequenceNumber = "s_seq_num" 111 | created = "s_created" 112 | snapshot = "s_snapshot" 113 | metadata = "s_meta" 114 | } 115 | } 116 | } 117 | } 118 | -------------------------------------------------------------------------------- /migration/src/test/resources/flat-migration.conf: -------------------------------------------------------------------------------- 1 | include "base-migration.conf" 2 | 3 | postgres-journal.dao = "akka.persistence.postgres.journal.dao.FlatJournalDao" 4 | -------------------------------------------------------------------------------- /migration/src/test/resources/logback-test.xml: -------------------------------------------------------------------------------- 1 | 2 | 17 | 18 | 19 | 20 | debug 21 | 22 | 23 | %date{ISO8601} - %logger -> %-5level[%thread] %logger{0} - %msg%n 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | -------------------------------------------------------------------------------- /migration/src/test/resources/nested-partitions-migration.conf: -------------------------------------------------------------------------------- 1 | include "base-migration.conf" 2 | 3 | postgres-journal.dao = "akka.persistence.postgres.journal.dao.NestedPartitionsJournalDao" 4 | -------------------------------------------------------------------------------- /migration/src/test/resources/partitioned-migration.conf: -------------------------------------------------------------------------------- 1 | include "base-migration.conf" 2 | 3 | postgres-journal.dao = "akka.persistence.postgres.journal.dao.PartitionedJournalDao" 4 | -------------------------------------------------------------------------------- /project/AutomaticModuleName.scala: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright (C) 2009-2018 Lightbend Inc. 3 | */ 4 | import sbt.Keys._ 5 | import sbt.{ Def, _ } 6 | 7 | /** 8 | * Helper to set Automatic-Module-Name in projects. 9 | * 10 | * !! DO NOT BE TEMPTED INTO AUTOMATICALLY DERIVING THE NAMES FROM PROJECT NAMES !! 11 | * 12 | * The names carry a lot of implications and DO NOT have to always align 1:1 with the group ids or package names, though 13 | * there should be of course a strong relationship between them. 14 | */ 15 | object AutomaticModuleName { 16 | private val AutomaticModuleName = "Automatic-Module-Name" 17 | 18 | def settings(name: String): Seq[Def.Setting[Task[Seq[PackageOption]]]] = Seq( 19 | Compile / packageBin / packageOptions += Package.ManifestAttributes(AutomaticModuleName -> name)) 20 | } 21 | -------------------------------------------------------------------------------- /project/Dependencies.scala: -------------------------------------------------------------------------------- 1 | import sbt._ 2 | 3 | object Dependencies { 4 | val Scala213 = "2.13.12" 5 | val ScalaVersions = Seq(Scala213) 6 | 7 | val AkkaVersion = "2.6.16" 8 | val FlywayVersion = "9.20.0" 9 | val ScaffeineVersion = "5.2.1" 10 | val ScalaTestVersion = "3.2.17" 11 | val SlickVersion = "3.4.1" 12 | val SlickPgVersion = "0.21.1" 13 | val SslConfigVersion = "0.6.1" 14 | 15 | val LogbackVersion = "1.4.14" 16 | 17 | val JdbcDrivers = Seq("org.postgresql" % "postgresql" % "42.7.1") 18 | 19 | val Libraries: Seq[ModuleID] = Seq( 20 | "ch.qos.logback" % "logback-classic" % LogbackVersion % Test, 21 | "com.github.blemale" %% "scaffeine" % ScaffeineVersion, 22 | "com.github.tminglei" %% "slick-pg" % SlickPgVersion, 23 | "com.github.tminglei" %% "slick-pg_circe-json" % SlickPgVersion, 24 | "com.typesafe.akka" %% "akka-slf4j" % AkkaVersion % Test, 25 | "com.typesafe.akka" %% "akka-persistence-tck" % AkkaVersion % Test, 26 | "com.typesafe.akka" %% "akka-stream-testkit" % AkkaVersion % Test, 27 | "com.typesafe.akka" %% "akka-testkit" % AkkaVersion % Test, 28 | "com.typesafe.akka" %% "akka-persistence-query" % AkkaVersion % Provided, 29 | "com.typesafe.slick" %% "slick" % SlickVersion, 30 | "com.typesafe.slick" %% "slick-hikaricp" % SlickVersion, 31 | "org.scalatest" %% "scalatest" % ScalaTestVersion % Test) ++ JdbcDrivers.map(_ % Test) 32 | 33 | val Migration: Seq[ModuleID] = 34 | Seq( 35 | ("com.typesafe.akka" %% "akka-persistence-query" % AkkaVersion).exclude("com.typesafe", "ssl-config-core"), 36 | "com.typesafe" %% "ssl-config-core" % SslConfigVersion).map(_ % Compile) ++ (Seq( 37 | "org.scalatest" %% "scalatest" % ScalaTestVersion, 38 | "com.typesafe.akka" %% "akka-testkit" % AkkaVersion, 39 | "ch.qos.logback" % "logback-classic" % LogbackVersion, 40 | "com.typesafe.akka" %% "akka-slf4j" % AkkaVersion, 41 | "com.typesafe.akka" %% "akka-serialization-jackson" % AkkaVersion, 42 | "org.flywaydb" % "flyway-core" % FlywayVersion) ++ JdbcDrivers).map(_ % Test) 43 | } 44 | -------------------------------------------------------------------------------- /project/ProjectAutoPlugin.scala: -------------------------------------------------------------------------------- 1 | import sbt.Keys._ 2 | import sbt._ 3 | import sbt.plugins.JvmPlugin 4 | 5 | object ProjectAutoPlugin extends AutoPlugin { 6 | object autoImport {} 7 | 8 | override val requires = JvmPlugin 9 | override val trigger: PluginTrigger = allRequirements 10 | 11 | override def globalSettings = 12 | Seq( 13 | organization := "com.swissborg", 14 | organizationName := "SwissBorg", 15 | organizationHomepage := None, 16 | homepage := Some(url("https://github.com/SwissBorg/akka-persistence-postgres")), 17 | licenses := Seq("Apache-2.0" -> url("https://opensource.org/licenses/Apache-2.0")), 18 | description := "A plugin for storing events in a PostgreSQL journal", 19 | startYear := Some(2020), 20 | developers := List( 21 | Developer("mkubala", "Marcin Kubala", "marcin.kubala+oss@softwaremill.com", url("https://softwaremill.com")))) 22 | 23 | override val projectSettings: Seq[Setting[_]] = Seq( 24 | crossVersion := CrossVersion.binary, 25 | crossScalaVersions := Dependencies.ScalaVersions, 26 | scalaVersion := Dependencies.Scala213, 27 | Test / fork := true, 28 | Test / parallelExecution := false, 29 | Test / logBuffered := true, 30 | scalacOptions ++= Seq( 31 | "-encoding", 32 | "UTF-8", 33 | "-deprecation", 34 | "-feature", 35 | "-unchecked", 36 | "-Xlog-reflective-calls", 37 | "-language:higherKinds", 38 | "-language:implicitConversions", 39 | "-Ydelambdafy:method", 40 | "-release:11"), 41 | Compile / doc / scalacOptions := scalacOptions.value ++ Seq( 42 | "-doc-title", 43 | "Akka Persistence Postgres", 44 | "-doc-version", 45 | version.value, 46 | "-sourcepath", 47 | (ThisBuild / baseDirectory).value.toString, 48 | "-skip-packages", 49 | "akka.pattern", // for some reason Scaladoc creates this 50 | "-doc-source-url", { 51 | val branch = if (isSnapshot.value) "master" else s"v${version.value}" 52 | s"https://github.com/SwissBorg/akka-persistence-postgres/tree/${branch}€{FILE_PATH_EXT}#L€{FILE_LINE}" 53 | }), 54 | // show full stack traces and test case durations 55 | Test / testOptions += Tests.Argument("-oDF"), 56 | scmInfo := Some( 57 | ScmInfo( 58 | url("https://github.com/SwissBorg/akka-persistence-postgres"), 59 | "git@github.com:SwissBorg/akka-persistence-postgres.git"))) 60 | 61 | } 62 | -------------------------------------------------------------------------------- /project/build.properties: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright 2016 Dennis Vriend 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # 16 | sbt.version=1.9.8 17 | -------------------------------------------------------------------------------- /project/plugins.sbt: -------------------------------------------------------------------------------- 1 | // compliance 2 | addSbtPlugin("org.scalameta" % "sbt-scalafmt" % "2.5.2") 3 | addSbtPlugin("com.typesafe" % "sbt-mima-plugin" % "1.1.3") 4 | // release 5 | addSbtPlugin("com.github.sbt" % "sbt-ci-release" % "1.5.12") 6 | // docs 7 | addSbtPlugin("com.github.sbt" % "sbt-unidoc" % "0.5.0") 8 | -------------------------------------------------------------------------------- /project/project-info.conf: -------------------------------------------------------------------------------- 1 | project-info { 2 | version: "current" 3 | shared-info { 4 | jdk-versions: ["Adopt OpenJDK 8", "Adopt OpenJDK 11"] 5 | issues: { 6 | url: "https://github.com/SwissBorg/akka-persistence-postgres/issues" 7 | text: "GitHub issues" 8 | } 9 | release-notes: { 10 | url: "https://github.com/SwissBorg/akka-persistence-postgres/releases" 11 | text: "GitHub releases" 12 | } 13 | } 14 | core: ${project-info.shared-info} { 15 | title: "Akka Persistence Postgres" 16 | jpms-name: "akka.persistence.postgres" 17 | levels: [ 18 | { 19 | readiness: CommunityDriven 20 | since: "2020-11-01" 21 | since-version: "0.4.0" 22 | } 23 | ] 24 | } 25 | migration: ${project-info.shared-info} { 26 | title: "Akka Persistence Postgres Migration" 27 | jpms-name: "akka.persistence.postgres.migration" 28 | levels: [ 29 | { 30 | readiness: CommunityDriven 31 | since: "2020-11-01" 32 | since-version: "0.4.0" 33 | } 34 | ] 35 | } 36 | } 37 | -------------------------------------------------------------------------------- /scripts/archivisation/1-create-archivisation-table.sql: -------------------------------------------------------------------------------- 1 | CREATE OR REPLACE PROCEDURE create_archivisation_table(IN schema TEXT, IN archivisation_table TEXT) AS 2 | $$ 3 | BEGIN 4 | EXECUTE 'CREATE TABLE IF NOT EXISTS ' || schema || '.' || archivisation_table || ' 5 | ( 6 | persistence_id TEXT NOT NULL, 7 | min_sequence_number BIGINT NOT NULL, 8 | max_sequence_number BIGINT NOT NULL, 9 | tablename TEXT NOT NULL, 10 | schemaname TEXT NOT NULL, 11 | parent_tablename TEXT NOT NULL, 12 | parent_schemaname TEXT NOT NULL, 13 | status TEXT NOT NULL, 14 | PRIMARY KEY (schemaname,tablename) 15 | );'; 16 | END ; 17 | $$ LANGUAGE plpgsql; 18 | -------------------------------------------------------------------------------- /scripts/archivisation/2-select-partitions-to-detach.sql: -------------------------------------------------------------------------------- 1 | CREATE OR REPLACE PROCEDURE find_and_mark_journal_nested_partitions_as_detached(IN schema TEXT, IN journal_table TEXT, 2 | IN snapshot_table TEXT, 3 | IN archivisation_table TEXT) AS 4 | $$ 5 | DECLARE 6 | row record; 7 | min_sequence_number BIGINT; 8 | inner_row record; 9 | sequence_number_range record; 10 | BEGIN 11 | FOR row IN 12 | SELECT nmsp_child.nspname AS child_schema, 13 | child.relname AS child 14 | FROM pg_inherits 15 | JOIN pg_class parent ON pg_inherits.inhparent = parent.oid 16 | JOIN pg_class child ON pg_inherits.inhrelid = child.oid 17 | JOIN pg_namespace nmsp_parent ON nmsp_parent.oid = parent.relnamespace 18 | JOIN pg_namespace nmsp_child ON nmsp_child.oid = child.relnamespace 19 | WHERE parent.relname = journal_table 20 | AND nmsp_parent.nspname = schema 21 | LOOP 22 | EXECUTE 'SELECT max(snp.sequence_number) ' || 23 | 'FROM ' || quote_ident(schema) || '.' || quote_ident(snapshot_table) || ' AS snp ' || 24 | 'JOIN ' || quote_ident(row.child_schema) || '.' || quote_ident(row.child) || ' AS jrn ' || 25 | 'ON snp.persistence_id = jrn.persistence_id ' || 26 | 'AND snp.sequence_number = jrn.sequence_number' 27 | INTO min_sequence_number; 28 | 29 | FOR inner_row IN 30 | SELECT nmsp_child.nspname AS child_schema, 31 | child.relname AS child 32 | FROM pg_inherits 33 | JOIN pg_class parent ON pg_inherits.inhparent = parent.oid 34 | JOIN pg_class child ON pg_inherits.inhrelid = child.oid 35 | JOIN pg_namespace nmsp_parent ON nmsp_parent.oid = parent.relnamespace 36 | JOIN pg_namespace nmsp_child ON nmsp_child.oid = child.relnamespace 37 | WHERE parent.relname = row.child 38 | AND nmsp_parent.nspname = row.child_schema 39 | LOOP 40 | 41 | EXECUTE 'SELECT max(sequence_number) AS max, min(sequence_number) AS min, persistence_id 42 | FROM ' || quote_ident(inner_row.child_schema) || '.' || quote_ident(inner_row.child) || 43 | ' GROUP BY persistence_id' 44 | INTO sequence_number_range; 45 | 46 | -- > - because we would like that last event remain in journal 47 | IF min_sequence_number > sequence_number_range.max THEN 48 | EXECUTE ' 49 | INSERT INTO ' || schema || '.' || archivisation_table || '(persistence_id, min_sequence_number, max_sequence_number, schemaname, 50 | tablename, parent_schemaname, parent_tablename, status) 51 | VALUES (''' || sequence_number_range.persistence_id || ''',' || sequence_number_range.min || ', ' || 52 | sequence_number_range.max || ', ''' || inner_row.child_schema || ''',''' || 53 | inner_row.child || ''',''' || row.child_schema || ''',''' || row.child || ''', ''NEW'') 54 | ON CONFLICT DO NOTHING; 55 | '; 56 | END IF; 57 | END LOOP; 58 | END LOOP; 59 | END; 60 | $$ LANGUAGE plpgsql; 61 | -------------------------------------------------------------------------------- /scripts/archivisation/3-detach.sql: -------------------------------------------------------------------------------- 1 | CREATE OR REPLACE PROCEDURE detach_partitions_from_archivisation(IN schema TEXT, IN archivisation_table TEXT) AS 2 | $$ 3 | DECLARE 4 | row record; 5 | BEGIN 6 | FOR row IN EXECUTE ' 7 | SELECT schemaname, tablename, parent_schemaname, parent_tablename 8 | FROM ' || schema || '.' || archivisation_table || ' 9 | WHERE STATUS = ''NEW''' 10 | LOOP 11 | EXECUTE 'ALTER TABLE ' || row.parent_schemaname || '.' || row.parent_tablename || 12 | ' DETACH PARTITION ' || row.schemaname || '.' || row.tablename; 13 | EXECUTE 'UPDATE ' || schema || '.' || archivisation_table || 14 | ' SET STATUS = ''DETACHED'' 15 | WHERE schemaname = ''' || row.schemaname || ''' AND tablename = ''' || row.tablename || ''';'; 16 | END LOOP; 17 | END ; 18 | $$ LANGUAGE plpgsql; 19 | -------------------------------------------------------------------------------- /scripts/archivisation/4-export-detached.sh: -------------------------------------------------------------------------------- 1 | SCHEMA=$1 2 | ARCHIVISATION_TABLE=$2 3 | 4 | for i in $(psql -qt ${CONNECTION_OPTIONS} --command="SELECT schemaname || '.' || tablename FROM ${SCHEMA}.${ARCHIVISATION_TABLE} WHERE status='DETACHED';") 5 | do 6 | echo "dumping $i" 7 | pg_dump ${CONNECTION_OPTIONS} --table="$i" > "$i.dump" 8 | psql -qt ${CONNECTION_OPTIONS} --command="UPDATE ${SCHEMA}.${ARCHIVISATION_TABLE} SET status='DUMPED' WHERE schemaname || '.' || tablename = '$i';" 9 | echo "dumped $i" 10 | done 11 | -------------------------------------------------------------------------------- /scripts/archivisation/5-drop-detached.sql: -------------------------------------------------------------------------------- 1 | CREATE OR REPLACE PROCEDURE drop_detached_partitions(IN schema TEXT, IN archivisation_table TEXT) AS 2 | $$ 3 | DECLARE 4 | row record; 5 | BEGIN 6 | FOR row IN EXECUTE 'SELECT schemaname, tablename 7 | FROM ' || schema || '.' || archivisation_table || ' 8 | WHERE STATUS = ''DUMPED''' 9 | LOOP 10 | EXECUTE 'DROP TABLE ' || row.schemaname || '.' || row.tablename; 11 | EXECUTE 'UPDATE ' || schema || '.' || archivisation_table || 12 | ' SET STATUS = ''DROPPED'' WHERE schemaname = ''' || row.schemaname || ''' AND tablename = ''' || row.tablename || ''';'; 13 | END LOOP; 14 | END ; 15 | $$ LANGUAGE plpgsql; 16 | -------------------------------------------------------------------------------- /scripts/archivisation/8-import-deleted.sh: -------------------------------------------------------------------------------- 1 | PARTITION_NAME=$1 2 | SCHEMA=$2 3 | ARCHIVISATION_TABLE=$3 4 | 5 | COUNT=$(psql -qt ${CONNECTION_OPTIONS} --command="SELECT COUNT(*) FROM ${SCHEMA}.${ARCHIVISATION_TABLE} WHERE status='DROPPED' AND schemaname || '.' || tablename = '${PARTITION_NAME}';") 6 | if [ "$COUNT" -eq "0" ]; then 7 | echo "Partition '${PARTITION_NAME}' does not exist in table $COUNT"; 8 | else 9 | psql --set ON_ERROR_STOP=on ${CONNECTION_OPTIONS} < "${PARTITION_NAME}.dump" 10 | psql -qt ${CONNECTION_OPTIONS} --command="UPDATE ${SCHEMA}.${ARCHIVISATION_TABLE} SET status='REIMPORTED' WHERE schemaname || '.' || tablename = '${PARTITION_NAME}';" 11 | echo "Dump ${PARTITION_NAME} imported" 12 | fi 13 | -------------------------------------------------------------------------------- /scripts/archivisation/9-attach.sql: -------------------------------------------------------------------------------- 1 | CREATE OR REPLACE PROCEDURE reattach_partitions_from_archivisation(IN schema TEXT, IN archivisation_table TEXT) AS 2 | $$ 3 | DECLARE 4 | row record; 5 | BEGIN 6 | FOR row IN EXECUTE ' 7 | SELECT schemaname, tablename, parent_schemaname, parent_tablename, min_sequence_number, max_sequence_number 8 | FROM ' || schema || '.' || archivisation_table || ' 9 | WHERE STATUS = ''REIMPORTED''' 10 | LOOP 11 | EXECUTE 'ALTER TABLE ' || row.parent_schemaname || '.' || row.parent_tablename || 12 | ' ATTACH PARTITION ' || row.schemaname || '.' || row.tablename || 13 | ' FOR VALUES FROM (' || row.min_sequence_number || ') TO (' || row.max_sequence_number + 1 || ')'; 14 | EXECUTE 'DELETE FROM ' || schema || '.' || archivisation_table || 15 | ' WHERE schemaname = ''' || row.schemaname || ''' AND tablename = ''' || row.tablename || ''';'; 16 | END LOOP; 17 | END ; 18 | $$ LANGUAGE plpgsql; 19 | -------------------------------------------------------------------------------- /scripts/archivisation/demo-prepare.sql: -------------------------------------------------------------------------------- 1 | DROP TABLE IF EXISTS public.archivisation; 2 | 3 | CREATE TABLE IF NOT EXISTS public.j_p_1 PARTITION OF public.journal FOR VALUES IN ('p-1') PARTITION BY RANGE (sequence_number); 4 | CREATE TABLE IF NOT EXISTS public.j_p_1_1 PARTITION OF public.j_p_1 FOR VALUES FROM (0) TO (10); 5 | CREATE TABLE IF NOT EXISTS public.j_p_1_2 PARTITION OF public.j_p_1 FOR VALUES FROM (10) TO (20); 6 | CREATE TABLE IF NOT EXISTS public.j_p_1_3 PARTITION OF public.j_p_1 FOR VALUES FROM (20) TO (30); 7 | CREATE TABLE IF NOT EXISTS public.j_p_1_4 PARTITION OF public.j_p_1 FOR VALUES FROM (30) TO (40); 8 | CREATE TABLE IF NOT EXISTS public.j_p_1_5 PARTITION OF public.j_p_1 FOR VALUES FROM (40) TO (50); 9 | CREATE TABLE IF NOT EXISTS public.j_p_1_6 PARTITION OF public.j_p_1 FOR VALUES FROM (50) TO (60); 10 | CREATE TABLE IF NOT EXISTS public.j_p_1_7 PARTITION OF public.j_p_1 FOR VALUES FROM (60) TO (70); 11 | CREATE TABLE IF NOT EXISTS public.j_p_1_8 PARTITION OF public.j_p_1 FOR VALUES FROM (70) TO (80); 12 | CREATE TABLE IF NOT EXISTS public.j_p_1_9 PARTITION OF public.j_p_1 FOR VALUES FROM (80) TO (90); 13 | CREATE TABLE IF NOT EXISTS public.j_p_1_10 PARTITION OF public.j_p_1 FOR VALUES FROM (90) TO (100); 14 | CREATE TABLE IF NOT EXISTS public.j_p_1_11 PARTITION OF public.j_p_1 FOR VALUES FROM (100) TO (110); 15 | CREATE TABLE IF NOT EXISTS public.j_p_1_12 PARTITION OF public.j_p_1 FOR VALUES FROM (110) TO (120); 16 | 17 | INSERT INTO public.journal(persistence_id, sequence_number, deleted, tags, message) 18 | select 'p-1', i, false , '{2,4}', '0x22' 19 | from generate_series(1, 115) s(i); 20 | -------------------------------------------------------------------------------- /scripts/create-release-issue.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | VERSION=$1 4 | if [ -z $VERSION ] 5 | then 6 | echo specify the version name to be released, eg. 1.0.0 7 | else 8 | sed -e 's/\$VERSION\$/'$VERSION'/g' docs/release-train-issue-template.md > /tmp/release-$VERSION.md 9 | echo Created $(hub issue create -F /tmp/release-$VERSION.md --milestone $VERSION --browse) 10 | fi 11 | -------------------------------------------------------------------------------- /scripts/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: "3" 2 | 3 | services: 4 | postgres: 5 | image: postgres:latest 6 | container_name: postgres 7 | environment: 8 | - "TZ=Europe/Amsterdam" 9 | - "POSTGRES_USER=docker" 10 | - "POSTGRES_PASSWORD=docker" 11 | ports: 12 | - "5432:5432" # credentials (docker:docker) 13 | -------------------------------------------------------------------------------- /scripts/launch-postgres.sh: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright 2016 Dennis Vriend 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # 16 | #!/bin/bash 17 | export VM_HOST="${VM_HOST:-localhost}" 18 | 19 | # Wait for a certain service to become available 20 | wait() { 21 | while true; do 22 | if ! nc -z $VM_HOST $1 23 | then 24 | echo "$2 not available, retrying..." 25 | sleep 1 26 | else 27 | echo "$2 is available" 28 | break; 29 | fi 30 | done; 31 | } 32 | 33 | docker-compose -f scripts/docker-compose.yml kill postgres 34 | docker-compose -f scripts/docker-compose.yml rm -f postgres 35 | docker-compose -f scripts/docker-compose.yml up -d postgres 36 | wait 5432 Postgres 37 | -------------------------------------------------------------------------------- /scripts/migration-0.5.0/partitioned/1-add-indices-manually.sql: -------------------------------------------------------------------------------- 1 | DO $$ 2 | DECLARE 3 | -- adapt those to match journal configuration 4 | v_journal_table_name constant text := 'journal'; 5 | v_schema constant text := 'public'; 6 | v_column_persistence_id text = 'persistence_id'; 7 | v_column_sequence_number text = 'sequence_number'; 8 | -- do not change values below 9 | v_persistence_seq_idx constant text := '_persistence_sequence_idx'; 10 | v_rec record; 11 | v_sql text; 12 | BEGIN 13 | FOR v_rec IN 14 | -- get list of partitions 15 | SELECT 16 | child.relname AS child 17 | FROM 18 | pg_inherits 19 | JOIN pg_class parent ON pg_inherits.inhparent = parent.oid 20 | JOIN pg_class child ON pg_inherits.inhrelid = child.oid 21 | JOIN pg_namespace nmsp_parent ON nmsp_parent.oid = parent.relnamespace 22 | WHERE 23 | parent.relname = v_journal_table_name AND 24 | nmsp_parent.nspname = v_schema 25 | LOOP 26 | PERFORM 27 | FROM 28 | pg_indexes 29 | WHERE 30 | schemaname = v_schema 31 | AND tablename = v_rec.child 32 | AND indexname = v_rec.child || v_persistence_seq_idx; 33 | IF NOT FOUND THEN 34 | -- unique btree on (persistence_id, sequence_number) 35 | v_sql := 'CREATE UNIQUE INDEX CONCURRENTLY ' || quote_ident(v_rec.child || v_persistence_seq_idx) || ' ON ' || quote_ident(v_schema) || '.' || quote_ident(v_rec.child) || ' USING BTREE (' || quote_ident(v_column_persistence_id) || ',' || quote_ident(v_column_sequence_number) || ');'; 36 | RAISE notice 'Run DDL: %', v_sql; 37 | END IF; 38 | 39 | END LOOP; 40 | END; 41 | $$; 42 | -------------------------------------------------------------------------------- /scripts/migration-0.5.0/partitioned/2-add-indices-flyway.sql: -------------------------------------------------------------------------------- 1 | -- Ensure indexes exist on partitions, actual indexes are created manually before migration using CONCURRENTLY option. 2 | -- This block is needed to avoid missing indexes if there was a new partition created between manual index creation and 3 | -- actual migration. We cannot create indexes CONCURRENTLY here, as is not possible to create indexes CONCURRENTLY 4 | -- inside transaction and functions are executed inside transaction. 5 | DO $$ 6 | DECLARE 7 | -- adapt those to match journal configuration 8 | v_journal_table_name constant text := 'journal'; 9 | v_column_persistence_id text = 'persistence_id'; 10 | v_column_sequence_number text = 'sequence_number'; 11 | -- do not change values below 12 | v_persistence_seq_idx constant text := '_persistence_sequence_idx'; 13 | -- detect why schema flyway uses 14 | v_schema constant text := (select trim(both '"' from split_part(setting,',',1)) FROM pg_settings WHERE name = 'search_path'); 15 | v_rec record; 16 | v_sql text; 17 | BEGIN 18 | FOR v_rec IN 19 | -- get list of partitions 20 | SELECT 21 | child.relname AS child 22 | FROM 23 | pg_inherits 24 | JOIN pg_class parent ON pg_inherits.inhparent = parent.oid 25 | JOIN pg_class child ON pg_inherits.inhrelid = child.oid 26 | JOIN pg_namespace nmsp_parent ON nmsp_parent.oid = parent.relnamespace 27 | WHERE 28 | parent.relname = v_journal_table_name AND 29 | nmsp_parent.nspname = v_schema 30 | LOOP 31 | -- unique btree on (persistence_id, sequence_number) 32 | v_sql := 'CREATE UNIQUE INDEX IF NOT EXISTS ' || quote_ident(v_rec.child || v_persistence_seq_idx) || ' ON ' || quote_ident(v_schema) || '.' || quote_ident(v_rec.child) || ' USING BTREE (' || quote_ident(v_column_persistence_id) || ',' || quote_ident(v_column_sequence_number) || ');'; 33 | RAISE notice 'Running DDL: %', v_sql; 34 | EXECUTE v_sql; 35 | 36 | END LOOP; 37 | END; 38 | $$; 39 | 40 | -- drop global, non-unique index 41 | DROP INDEX IF EXISTS journal_persistence_id_sequence_number_idx; 42 | -------------------------------------------------------------------------------- /scripts/migration-0.6.0/1-create-journal-metadata-table.sql: -------------------------------------------------------------------------------- 1 | -- Creates table and the amount of partitions defined by jm_partitions_number. Default is 10. 2 | DO $$ 3 | DECLARE 4 | -- replace with appropriate values 5 | schema CONSTANT TEXT := 'public'; 6 | jm_table_name CONSTANT TEXT := 'journal_metadata'; 7 | jm_id_column CONSTANT TEXT := 'id'; 8 | jm_persistence_id_column CONSTANT TEXT := 'persistence_id'; 9 | jm_max_sequence_number_column CONSTANT TEXT := 'max_sequence_number'; 10 | jm_max_ordering_column CONSTANT TEXT := 'max_ordering'; 11 | jm_min_ordering_column CONSTANT TEXT := 'min_ordering'; 12 | jm_partitions_table_name_prefix CONSTANT TEXT := 'journal_metadata_'; 13 | jm_partitions_number CONSTANT INTEGER := 10; 14 | 15 | -- variables 16 | jm_table TEXT; 17 | jm_partition_table TEXT; 18 | sql TEXT; 19 | BEGIN 20 | jm_table := schema || '.' || jm_table_name; 21 | jm_partition_table := schema || '.' || jm_partitions_table_name_prefix; 22 | 23 | sql := 'CREATE TABLE IF NOT EXISTS ' || jm_table || 24 | '(' || 25 | jm_id_column || ' BIGINT GENERATED ALWAYS AS IDENTITY, ' || 26 | jm_max_sequence_number_column || ' BIGINT NOT NULL, ' || 27 | jm_max_ordering_column || ' BIGINT NOT NULL, ' || 28 | jm_min_ordering_column || ' BIGINT NOT NULL, ' || 29 | jm_persistence_id_column || ' TEXT NOT NULL, ' || 30 | 'PRIMARY KEY (' || jm_persistence_id_column || ')' || 31 | ') PARTITION BY HASH(' || jm_persistence_id_column || ')'; 32 | 33 | EXECUTE sql; 34 | 35 | FOR i IN 0..(jm_partitions_number - 1) LOOP 36 | EXECUTE 'CREATE TABLE IF NOT EXISTS ' || jm_partition_table || i || 37 | ' PARTITION OF ' || jm_table || 38 | ' FOR VALUES WITH (MODULUS ' || jm_partitions_number || ', REMAINDER ' || i || ')'; 39 | END LOOP; 40 | END; 41 | $$ LANGUAGE plpgsql; 42 | -------------------------------------------------------------------------------- /scripts/migration-0.6.0/2-create-function-update-journal-metadata.sql: -------------------------------------------------------------------------------- 1 | -- replace schema value if required 2 | CREATE OR REPLACE FUNCTION public.update_journal_metadata() RETURNS TRIGGER AS 3 | $$ 4 | DECLARE 5 | -- replace with appropriate values 6 | schema CONSTANT TEXT := 'public'; 7 | j_table_name CONSTANT TEXT := 'journal'; 8 | j_persistence_id_column CONSTANT TEXT := 'persistence_id'; 9 | j_sequence_number_column CONSTANT TEXT := 'sequence_number'; 10 | j_ordering_column CONSTANT TEXT := 'ordering'; 11 | jm_table_name CONSTANT TEXT := 'journal_metadata'; 12 | jm_persistence_id_column CONSTANT TEXT := 'persistence_id'; 13 | jm_max_sequence_number_column CONSTANT TEXT := 'max_sequence_number'; 14 | jm_max_ordering_column CONSTANT TEXT := 'max_ordering'; 15 | jm_min_ordering_column CONSTANT TEXT := 'min_ordering'; 16 | first_sequence_number_value CONSTANT INTEGER := 1; 17 | unset_min_ordering_value CONSTANT INTEGER := -1; 18 | 19 | -- variables 20 | j_table TEXT; 21 | jm_table TEXT; 22 | cols TEXT; 23 | vals TEXT; 24 | upds TEXT; 25 | sql TEXT; 26 | BEGIN 27 | j_table := schema || '.' || j_table_name; 28 | jm_table := schema || '.' || jm_table_name; 29 | cols := jm_persistence_id_column || ', ' || jm_max_sequence_number_column || ', ' || jm_max_ordering_column || ', ' || jm_min_ordering_column; 30 | vals := '($1).' || j_persistence_id_column || ', ($1).' || j_sequence_number_column || ', ($1).' || j_ordering_column || 31 | ', CASE WHEN ($1).' || j_sequence_number_column || ' = ' || first_sequence_number_value || ' THEN ($1).' || j_ordering_column || ' ELSE ' || unset_min_ordering_value || ' END'; 32 | upds := jm_max_sequence_number_column || ' = GREATEST(' || jm_table || '.' || jm_max_sequence_number_column || ', ($1).' || j_sequence_number_column || '), ' || 33 | jm_max_ordering_column || ' = GREATEST(' || jm_table || '.' || jm_max_ordering_column || ', ($1).' || j_ordering_column || '), ' || 34 | jm_min_ordering_column || ' = LEAST(' || jm_table || '.' || jm_min_ordering_column || ', ($1).' || j_ordering_column || ')'; 35 | 36 | sql := 'INSERT INTO ' || jm_table || ' (' || cols || ') VALUES (' || vals || ') ' || 37 | 'ON CONFLICT (' || jm_persistence_id_column || ') DO UPDATE SET ' || upds; 38 | 39 | EXECUTE sql USING NEW; 40 | RETURN NEW; 41 | END; 42 | $$ LANGUAGE plpgsql; 43 | -------------------------------------------------------------------------------- /scripts/migration-0.6.0/3-create-trigger-update-journal-metadata.sql: -------------------------------------------------------------------------------- 1 | DO $$ 2 | DECLARE 3 | -- replace with appropriate values 4 | schema CONSTANT TEXT := 'public'; 5 | j_table_name CONSTANT TEXT := 'journal'; 6 | 7 | -- variables 8 | j_table TEXT; 9 | sql TEXT; 10 | BEGIN 11 | j_table := schema || '.' || j_table_name; 12 | 13 | sql := 'CREATE TRIGGER trig_update_journal_metadata 14 | AFTER INSERT ON ' || j_table || ' FOR EACH ROW 15 | EXECUTE PROCEDURE ' || schema || '.update_journal_metadata()'; 16 | 17 | EXECUTE sql; 18 | END; 19 | $$ LANGUAGE plpgsql; 20 | -------------------------------------------------------------------------------- /scripts/migration-0.6.0/4-populate-journal-metadata.sql: -------------------------------------------------------------------------------- 1 | /* 2 | ATTENTION: This is a simplistic migration, which is not prepared to handle a large number of rows. 3 | If that is your situation, please consider running some kind of batched ad-hoc program that will read the journal, 4 | compute the necessary values and then insert them to the journal metadata table. 5 | 6 | When you upgrade to the 0.6.x series, the crucial part is adding the metadata insert trigger, which will take care of all new events, 7 | meaning that it is totally safe to solve the back filling of data in a ad-hoc manner. 8 | */ 9 | DO $$ 10 | DECLARE 11 | -- replace with appropriate values 12 | schema CONSTANT TEXT := 'public'; 13 | j_table_name CONSTANT TEXT := 'journal'; 14 | j_persistence_id_column CONSTANT TEXT := 'persistence_id'; 15 | j_sequence_number_column CONSTANT TEXT := 'sequence_number'; 16 | j_ordering_column CONSTANT TEXT := 'ordering'; 17 | jpi_table_name CONSTANT TEXT := 'journal_persistence_ids'; 18 | jpi_max_sequence_number_column CONSTANT TEXT := 'max_sequence_number'; 19 | jpi_max_ordering_column CONSTANT TEXT := 'max_ordering'; 20 | jpi_min_ordering_column CONSTANT TEXT := 'min_ordering'; 21 | 22 | -- variables 23 | j_table TEXT; 24 | jpi_table TEXT; 25 | sql TEXT; 26 | BEGIN 27 | j_table := schema || '.' || j_table_name; 28 | jpi_table := schema || '.' || jpi_table_name; 29 | sql := 'INSERT INTO ' || jpi_table || 30 | ' SELECT ' || 31 | j_persistence_id_column || ', ' || 32 | 'max(' || j_sequence_number_column || '), ' || 33 | 'max(' || j_ordering_column || '), ' || 34 | 'min(' || j_ordering_column || ')' || 35 | ' FROM ' || j_table || ' GROUP BY ' || j_persistence_id_column; 36 | 37 | EXECUTE sql; 38 | END; 39 | $$ LANGUAGE plpgsql; -------------------------------------------------------------------------------- /scripts/migration/flat/1-create-schema.sql: -------------------------------------------------------------------------------- 1 | CREATE OR REPLACE PROCEDURE create_schema(IN destination_schema TEXT, IN destination_journal_table TEXT, IN destination_tag_table TEXT) AS 2 | $$ 3 | DECLARE 4 | destination_journal TEXT; 5 | destination_tag TEXT; 6 | BEGIN 7 | destination_journal := destination_schema || '.' || destination_journal_table; 8 | destination_tag := destination_schema || '.' || destination_tag_table; 9 | EXECUTE 'CREATE TABLE ' || destination_journal || ' 10 | ( 11 | ordering BIGSERIAL, 12 | sequence_number BIGINT NOT NULL, 13 | deleted BOOLEAN DEFAULT FALSE NOT NULL, 14 | persistence_id TEXT NOT NULL, 15 | message BYTEA NOT NULL, 16 | tags int[], 17 | PRIMARY KEY (persistence_id, sequence_number) 18 | );'; 19 | 20 | EXECUTE 'CREATE TABLE ' || destination_tag || ' 21 | ( 22 | id BIGSERIAL, 23 | name TEXT NOT NULL, 24 | PRIMARY KEY (id) 25 | );'; 26 | 27 | EXECUTE 'CREATE UNIQUE INDEX ' || destination_tag_table || '_name_idx on ' || destination_tag || ' (name);'; 28 | END ; 29 | $$ LANGUAGE plpgsql; 30 | -------------------------------------------------------------------------------- /scripts/migration/flat/2-fill-event-tag.sql: -------------------------------------------------------------------------------- 1 | CREATE OR REPLACE PROCEDURE fill_tags(IN source_tag_separator TEXT, IN source_schema TEXT, IN source_journal_table_name TEXT, IN destination_schema TEXT, IN destination_tag_table TEXT) AS 2 | $$ 3 | DECLARE 4 | row record; 5 | tag TEXT; 6 | BEGIN 7 | FOR row IN EXECUTE 'SELECT DISTINCT tags FROM ' || source_schema || '.' || source_journal_table_name 8 | LOOP 9 | FOR tag in (SELECT single_tag FROM regexp_split_to_table(row.tags, source_tag_separator) AS single_tag) 10 | LOOP 11 | EXECUTE 'INSERT INTO ' || destination_schema || '.' || destination_tag_table || '(name) VALUES (''' || tag || ''') ON CONFLICT DO NOTHING;'; 12 | END LOOP; 13 | END LOOP; 14 | END ; 15 | $$ LANGUAGE plpgsql; 16 | -------------------------------------------------------------------------------- /scripts/migration/flat/3-copy-data.sql: -------------------------------------------------------------------------------- 1 | CREATE OR REPLACE PROCEDURE copy_piece_of_data(IN from_ordering BIGINT, IN to_ordering BIGINT, 2 | IN source_tag_separator TEXT, IN source_schema TEXT, 3 | IN source_journal_table_name TEXT, IN destination_schema TEXT, 4 | IN destination_journal_table_name TEXT, IN destination_tag_table TEXT) AS 5 | $$ 6 | BEGIN 7 | EXECUTE ' 8 | INSERT INTO ' || destination_schema || '.' || destination_journal_table_name || '(ordering, persistence_id, sequence_number, deleted, tags, message) 9 | SELECT jrn.ordering , persistence_id , sequence_number , deleted , (SELECT array_agg(id) FROM regexp_split_to_table(tags, ''' || source_tag_separator || ''') AS single_tag JOIN ' || destination_schema || '.' || destination_tag_table || ' ON name = single_tag) , message 10 | FROM ' || source_schema || '.' || source_journal_table_name || ' jrn 11 | WHERE ordering >= ' || from_ordering || ' AND ordering <= ' || to_ordering || ' 12 | ON CONFLICT DO NOTHING; 13 | '; 14 | COMMIT; 15 | END ; 16 | $$ LANGUAGE plpgsql; 17 | 18 | CREATE OR REPLACE PROCEDURE copy_data(IN from_ordering BIGINT, IN batch_size BIGINT, IN source_tag_separator TEXT, 19 | IN source_schema TEXT, IN source_journal_table_name TEXT, 20 | IN destination_schema TEXT, IN destination_journal_table_name TEXT, 21 | IN destination_tag_table TEXT) AS 22 | $$ 23 | DECLARE 24 | batch_number BIGINT; 25 | max_ordering BIGINT; 26 | BEGIN 27 | EXECUTE 'SELECT max(ordering) FROM ' || source_schema || '.' || source_journal_table_name || ';' INTO max_ordering; 28 | FOR batch_number IN 0..((max_ordering - from_ordering) / batch_size) 29 | LOOP 30 | CALL copy_piece_of_data(from_ordering + batch_number * batch_size, 31 | from_ordering + batch_size + batch_number * batch_size - 1, source_tag_separator, 32 | source_schema, source_journal_table_name, destination_schema, 33 | destination_journal_table_name, destination_tag_table); 34 | END LOOP; 35 | END ; 36 | $$ LANGUAGE plpgsql; 37 | -------------------------------------------------------------------------------- /scripts/migration/flat/4-move-sequence.sql: -------------------------------------------------------------------------------- 1 | CREATE OR REPLACE PROCEDURE move_sequence(IN destination_schema TEXT, IN destination_journal_table_name TEXT) AS 2 | $$ 3 | DECLARE 4 | max_ordering BIGINT; 5 | BEGIN 6 | EXECUTE 'SELECT max(ordering) FROM ' || destination_schema || '.' || destination_journal_table_name || 7 | ';' INTO max_ordering; 8 | PERFORM setval(destination_schema || '.' || destination_journal_table_name || '_ordering_seq', max_ordering, true); 9 | END ; 10 | $$ LANGUAGE plpgsql; 11 | -------------------------------------------------------------------------------- /scripts/migration/flat/5-create-indexes.sql: -------------------------------------------------------------------------------- 1 | CREATE OR REPLACE PROCEDURE create_indexes(IN destination_schema TEXT, IN destination_journal_table TEXT) AS 2 | $$ 3 | DECLARE 4 | destination_journal TEXT; 5 | BEGIN 6 | destination_journal := destination_schema || '.' || destination_journal_table; 7 | 8 | EXECUTE 'CREATE EXTENSION IF NOT EXISTS intarray WITH SCHEMA ' || destination_schema || ';'; 9 | EXECUTE 'CREATE INDEX ' || destination_journal_table || '_tags_idx ON ' || destination_journal || ' USING GIN (tags gin__int_ops);'; 10 | EXECUTE 'CREATE INDEX ' || destination_journal_table || '_ordering_idx ON ' || destination_journal || ' USING BRIN (ordering);'; 11 | END ; 12 | $$ LANGUAGE plpgsql; 13 | -------------------------------------------------------------------------------- /scripts/migration/flat/6-drop-migration-procedures.sql: -------------------------------------------------------------------------------- 1 | DROP PROCEDURE IF EXISTS create_schema; 2 | DROP PROCEDURE IF EXISTS fill_tags; 3 | DROP PROCEDURE IF EXISTS copy_data; 4 | DROP PROCEDURE IF EXISTS copy_piece_of_data; 5 | DROP PROCEDURE IF EXISTS move_sequence; 6 | DROP PROCEDURE IF EXISTS create_indexes; 7 | 8 | -------------------------------------------------------------------------------- /scripts/migration/flat/demo-prepare.sql: -------------------------------------------------------------------------------- 1 | DROP TABLE IF EXISTS public.journal_flat; 2 | DROP TABLE IF EXISTS public.tags; 3 | DROP TABLE IF EXISTS public.journal; 4 | DROP TABLE IF EXISTS public.tag_definition; 5 | 6 | CREATE TABLE IF NOT EXISTS public.journal 7 | ( 8 | ordering BIGSERIAL, 9 | persistence_id VARCHAR(255) NOT NULL, 10 | sequence_number BIGINT NOT NULL, 11 | deleted BOOLEAN DEFAULT FALSE NOT NULL, 12 | tags VARCHAR(255) DEFAULT NULL, 13 | message BYTEA NOT NULL, 14 | PRIMARY KEY (persistence_id, sequence_number) 15 | ); 16 | 17 | CREATE UNIQUE INDEX journal_ordering_idx ON public.journal (ordering); 18 | 19 | 20 | CREATE TABLE IF NOT EXISTS public.tag_definition 21 | ( 22 | orders INT, 23 | tag VARCHAR(255) DEFAULT NULL, 24 | PRIMARY KEY (orders) 25 | ); 26 | -- tagSeparator = "," 27 | INSERT INTO public.tag_definition(orders, tag) 28 | VALUES (0, ''), 29 | (1, 'firstEvent'), 30 | (2, 'longtag'), 31 | (3, 'multiT1,multiT2'), 32 | (4, 'firstUnique'), 33 | (5, 'tag'), 34 | (6, 'expected'), 35 | (7, 'multi,companion'), 36 | (8, 'companion,multiT1,T3,T4'), 37 | (9, 'xxx'), 38 | (10, 'ended'), 39 | (11, 'expected'); 40 | 41 | INSERT INTO public.journal(persistence_id, sequence_number, deleted, tags, message) 42 | select 'pp-1', i, false, tag, '\x0a0708141203612d3110011a03702d316a2461313164393136332d633365322d343136322d386630362d39623233396663386635383070a8ccefd2dd5c' 43 | from generate_series(1, 1000000) s(i) 44 | JOIN public.tag_definition on orders = mod(i, 12); 45 | 46 | select nextval('journal_ordering_seq'::regclass); 47 | 48 | INSERT INTO public.journal(persistence_id, sequence_number, deleted, tags, message) 49 | select 'pp-2', i, false, tag, '\x0a0708141203612d3110011a03702d316a2461313164393136332d633365322d343136322d386630362d39623233396663386635383070a8ccefd2dd5c' 50 | from generate_series(1, 100000) s(i) 51 | JOIN public.tag_definition on orders = mod(i, 12); 52 | 53 | select nextval('journal_ordering_seq'::regclass); 54 | 55 | INSERT INTO public.journal(persistence_id, sequence_number, deleted, tags, message) 56 | select 'pp-3', i, false, tag, '\x0a0708141203612d3110011a03702d316a2461313164393136332d633365322d343136322d386630362d39623233396663386635383070a8ccefd2dd5c' 57 | from generate_series(1, 100000) s(i) 58 | JOIN public.tag_definition on orders = mod(i, 12); 59 | 60 | select nextval('journal_ordering_seq'::regclass); 61 | 62 | INSERT INTO public.journal(persistence_id, sequence_number, deleted, tags, message) 63 | select 'pp-4', i, false, tag, '\x0a0708141203612d3110011a03702d316a2461313164393136332d633365322d343136322d386630362d39623233396663386635383070a8ccefd2dd5c' 64 | from generate_series(1, 100000) s(i) 65 | JOIN public.tag_definition on orders = mod(i, 12); 66 | 67 | select nextval('journal_ordering_seq'::regclass); 68 | 69 | INSERT INTO public.journal(persistence_id, sequence_number, deleted, tags, message) 70 | select 'pp-5', i, false, tag, '\x0a0708141203612d3110011a03702d316a2461313164393136332d633365322d343136322d386630362d39623233396663386635383070a8ccefd2dd5c' 71 | from generate_series(1, 99999) s(i) 72 | JOIN public.tag_definition on orders = mod(i, 12); 73 | -------------------------------------------------------------------------------- /scripts/migration/nested-partitions/1-create-schema.sql: -------------------------------------------------------------------------------- 1 | CREATE OR REPLACE PROCEDURE create_schema(IN destination_schema TEXT, IN destination_journal_table TEXT, IN destination_tag_table TEXT) AS 2 | $$ 3 | DECLARE 4 | destination_journal TEXT; 5 | destination_tag TEXT; 6 | BEGIN 7 | destination_journal := destination_schema || '.' || destination_journal_table; 8 | destination_tag := destination_schema || '.' || destination_tag_table; 9 | EXECUTE 'CREATE TABLE ' || destination_journal || ' 10 | ( 11 | ordering BIGSERIAL, 12 | sequence_number BIGINT NOT NULL, 13 | deleted BOOLEAN DEFAULT FALSE NOT NULL, 14 | persistence_id TEXT NOT NULL, 15 | message BYTEA NOT NULL, 16 | tags int[], 17 | PRIMARY KEY (persistence_id, sequence_number) 18 | ) PARTITION BY LIST (persistence_id);'; 19 | 20 | EXECUTE 'CREATE TABLE ' || destination_tag || ' 21 | ( 22 | id BIGSERIAL, 23 | name TEXT NOT NULL, 24 | PRIMARY KEY (id) 25 | );'; 26 | 27 | EXECUTE 'CREATE UNIQUE INDEX ' || destination_tag_table || '_name_idx on ' || destination_tag || ' (name);'; 28 | END ; 29 | $$ LANGUAGE plpgsql; 30 | -------------------------------------------------------------------------------- /scripts/migration/nested-partitions/2-create-partitions.sql: -------------------------------------------------------------------------------- 1 | CREATE OR REPLACE PROCEDURE create_sub_partitions(IN partition_size BIGINT, IN source_schema TEXT, IN source_journal_table_name TEXT, IN destination_schema TEXT, IN destination_journal_table_name TEXT, IN destination_partition_prefix TEXT) AS 2 | $$ 3 | DECLARE 4 | row record; 5 | transformed_partition_id TEXT; 6 | BEGIN 7 | FOR row IN EXECUTE 'SELECT persistence_id, max(sequence_number) as max_sequence_number FROM ' || source_schema || '.' || source_journal_table_name || ' GROUP BY persistence_id' 8 | LOOP 9 | transformed_partition_id = REGEXP_REPLACE(row.persistence_id, '\W', '_', 'g'); 10 | EXECUTE 'CREATE TABLE IF NOT EXISTS ' || destination_schema || '.' || destination_partition_prefix ||'_' || transformed_partition_id || ' PARTITION OF ' || destination_schema || '.' || destination_journal_table_name || ' FOR VALUES IN (''' || row.persistence_id || ''') PARTITION BY RANGE (sequence_number);'; 11 | 12 | FOR i IN 0..(row.max_sequence_number/partition_size) LOOP 13 | EXECUTE 'CREATE TABLE IF NOT EXISTS ' || destination_schema || '.' || destination_partition_prefix ||'_' || transformed_partition_id || '_' || i ||' PARTITION OF ' || destination_schema || '.' || destination_partition_prefix ||'_' || transformed_partition_id || ' FOR VALUES FROM (' || i*partition_size || ') TO (' || (i+1)*partition_size || ');'; 14 | END LOOP ; 15 | END LOOP; 16 | END ; 17 | $$ LANGUAGE plpgsql; 18 | -------------------------------------------------------------------------------- /scripts/migration/nested-partitions/3-fill-event-tag.sql: -------------------------------------------------------------------------------- 1 | CREATE OR REPLACE PROCEDURE fill_tags(IN source_tag_separator TEXT, IN source_schema TEXT, IN source_journal_table_name TEXT, IN destination_schema TEXT, IN destination_tag_table TEXT) AS 2 | $$ 3 | DECLARE 4 | row record; 5 | tag TEXT; 6 | BEGIN 7 | FOR row IN EXECUTE 'SELECT DISTINCT tags FROM ' || source_schema || '.' || source_journal_table_name 8 | LOOP 9 | FOR tag in (SELECT single_tag FROM regexp_split_to_table(row.tags, source_tag_separator) AS single_tag) 10 | LOOP 11 | EXECUTE 'INSERT INTO ' || destination_schema || '.' || destination_tag_table || '(name) VALUES (''' || tag || ''') ON CONFLICT DO NOTHING;'; 12 | END LOOP; 13 | END LOOP; 14 | END ; 15 | $$ LANGUAGE plpgsql; 16 | -------------------------------------------------------------------------------- /scripts/migration/nested-partitions/4-copy-data.sql: -------------------------------------------------------------------------------- 1 | CREATE OR REPLACE PROCEDURE copy_piece_of_data(IN from_ordering BIGINT, IN to_ordering BIGINT, 2 | IN source_tag_separator TEXT, IN source_schema TEXT, 3 | IN source_journal_table_name TEXT, IN destination_schema TEXT, 4 | IN destination_journal_table_name TEXT, IN destination_tag_table TEXT) AS 5 | $$ 6 | BEGIN 7 | EXECUTE ' 8 | INSERT INTO ' || destination_schema || '.' || destination_journal_table_name || '(ordering, persistence_id, sequence_number, deleted, tags, message) 9 | SELECT jrn.ordering , persistence_id , sequence_number , deleted , (SELECT array_agg(id) FROM regexp_split_to_table(tags, ''' || source_tag_separator || ''') AS single_tag JOIN ' || destination_schema || '.' || destination_tag_table || ' ON name = single_tag) , message 10 | FROM ' || source_schema || '.' || source_journal_table_name || ' jrn 11 | WHERE ordering >= ' || from_ordering || ' AND ordering <= ' || to_ordering || ' 12 | ON CONFLICT DO NOTHING; 13 | '; 14 | COMMIT; 15 | END ; 16 | $$ LANGUAGE plpgsql; 17 | 18 | CREATE OR REPLACE PROCEDURE copy_data(IN from_ordering BIGINT, IN batch_size BIGINT, IN source_tag_separator TEXT, 19 | IN source_schema TEXT, IN source_journal_table_name TEXT, 20 | IN destination_schema TEXT, IN destination_journal_table_name TEXT, 21 | IN destination_tag_table TEXT) AS 22 | $$ 23 | DECLARE 24 | batch_number BIGINT; 25 | max_ordering BIGINT; 26 | BEGIN 27 | EXECUTE 'SELECT max(ordering) FROM ' || source_schema || '.' || source_journal_table_name || ';' INTO max_ordering; 28 | FOR batch_number IN 0..((max_ordering - from_ordering) / batch_size) 29 | LOOP 30 | CALL copy_piece_of_data(from_ordering + batch_number * batch_size, 31 | from_ordering + batch_size + batch_number * batch_size - 1, source_tag_separator, 32 | source_schema, source_journal_table_name, destination_schema, 33 | destination_journal_table_name, destination_tag_table); 34 | END LOOP; 35 | END ; 36 | $$ LANGUAGE plpgsql; 37 | -------------------------------------------------------------------------------- /scripts/migration/nested-partitions/5-move-sequence.sql: -------------------------------------------------------------------------------- 1 | CREATE OR REPLACE PROCEDURE move_sequence(IN destination_schema TEXT, IN destination_journal_table_name TEXT) AS 2 | $$ 3 | DECLARE 4 | max_ordering BIGINT; 5 | BEGIN 6 | EXECUTE 'SELECT max(ordering) FROM ' || destination_schema || '.' || destination_journal_table_name || 7 | ';' INTO max_ordering; 8 | PERFORM setval(destination_schema || '.' || destination_journal_table_name || '_ordering_seq', max_ordering, true); 9 | END ; 10 | $$ LANGUAGE plpgsql; 11 | -------------------------------------------------------------------------------- /scripts/migration/nested-partitions/6-create-indexes.sql: -------------------------------------------------------------------------------- 1 | CREATE OR REPLACE PROCEDURE create_indexes(IN destination_schema TEXT, IN destination_journal_table TEXT) AS 2 | $$ 3 | DECLARE 4 | destination_journal TEXT; 5 | BEGIN 6 | destination_journal := destination_schema || '.' || destination_journal_table; 7 | 8 | EXECUTE 'CREATE EXTENSION IF NOT EXISTS intarray WITH SCHEMA ' || destination_schema || ';'; 9 | EXECUTE 'CREATE INDEX ' || destination_journal_table || '_tags_idx ON ' || destination_journal || ' USING GIN (tags gin__int_ops);'; 10 | EXECUTE 'CREATE INDEX ' || destination_journal_table || '_ordering_idx ON ' || destination_journal || ' USING BRIN (ordering);'; 11 | END ; 12 | $$ LANGUAGE plpgsql; 13 | -------------------------------------------------------------------------------- /scripts/migration/nested-partitions/7-drop-migration-procedures.sql: -------------------------------------------------------------------------------- 1 | DROP PROCEDURE IF EXISTS create_schema; 2 | DROP PROCEDURE IF EXISTS create_sub_partitions; 3 | DROP PROCEDURE IF EXISTS fill_tags; 4 | DROP PROCEDURE IF EXISTS copy_data; 5 | DROP PROCEDURE IF EXISTS copy_piece_of_data; 6 | DROP PROCEDURE IF EXISTS move_sequence; 7 | DROP PROCEDURE IF EXISTS create_indexes; 8 | 9 | -------------------------------------------------------------------------------- /scripts/migration/nested-partitions/demo-prepare.sql: -------------------------------------------------------------------------------- 1 | DROP TABLE IF EXISTS public.journal_nested; 2 | DROP TABLE IF EXISTS public.tags; 3 | DROP TABLE IF EXISTS public.journal; 4 | DROP TABLE IF EXISTS public.tag_definition; 5 | 6 | CREATE TABLE IF NOT EXISTS public.journal 7 | ( 8 | ordering BIGSERIAL, 9 | persistence_id VARCHAR(255) NOT NULL, 10 | sequence_number BIGINT NOT NULL, 11 | deleted BOOLEAN DEFAULT FALSE NOT NULL, 12 | tags VARCHAR(255) DEFAULT NULL, 13 | message BYTEA NOT NULL, 14 | PRIMARY KEY (persistence_id, sequence_number) 15 | ); 16 | 17 | CREATE UNIQUE INDEX journal_ordering_idx ON public.journal (ordering); 18 | 19 | 20 | CREATE TABLE IF NOT EXISTS public.tag_definition 21 | ( 22 | orders INT, 23 | tag VARCHAR(255) DEFAULT NULL, 24 | PRIMARY KEY (orders) 25 | ); 26 | -- tagSeparator = "," 27 | INSERT INTO public.tag_definition(orders, tag) 28 | VALUES (0, ''), 29 | (1, 'firstEvent'), 30 | (2, 'longtag'), 31 | (3, 'multiT1,multiT2'), 32 | (4, 'firstUnique'), 33 | (5, 'tag'), 34 | (6, 'expected'), 35 | (7, 'multi,companion'), 36 | (8, 'companion,multiT1,T3,T4'), 37 | (9, 'xxx'), 38 | (10, 'ended'), 39 | (11, 'expected'); 40 | 41 | INSERT INTO public.journal(persistence_id, sequence_number, deleted, tags, message) 42 | select 'pp-1', i, false, tag, '\x0a0708141203612d3110011a03702d316a2461313164393136332d633365322d343136322d386630362d39623233396663386635383070a8ccefd2dd5c' 43 | from generate_series(1, 1000000) s(i) 44 | JOIN public.tag_definition on orders = mod(i, 12); 45 | 46 | select nextval('journal_ordering_seq'::regclass); 47 | 48 | INSERT INTO public.journal(persistence_id, sequence_number, deleted, tags, message) 49 | select 'pp-2', i, false, tag, '\x0a0708141203612d3110011a03702d316a2461313164393136332d633365322d343136322d386630362d39623233396663386635383070a8ccefd2dd5c' 50 | from generate_series(1, 100000) s(i) 51 | JOIN public.tag_definition on orders = mod(i, 12); 52 | 53 | select nextval('journal_ordering_seq'::regclass); 54 | 55 | INSERT INTO public.journal(persistence_id, sequence_number, deleted, tags, message) 56 | select 'pp-3', i, false, tag, '\x0a0708141203612d3110011a03702d316a2461313164393136332d633365322d343136322d386630362d39623233396663386635383070a8ccefd2dd5c' 57 | from generate_series(1, 100000) s(i) 58 | JOIN public.tag_definition on orders = mod(i, 12); 59 | 60 | select nextval('journal_ordering_seq'::regclass); 61 | 62 | INSERT INTO public.journal(persistence_id, sequence_number, deleted, tags, message) 63 | select 'pp-4', i, false, tag, '\x0a0708141203612d3110011a03702d316a2461313164393136332d633365322d343136322d386630362d39623233396663386635383070a8ccefd2dd5c' 64 | from generate_series(1, 100000) s(i) 65 | JOIN public.tag_definition on orders = mod(i, 12); 66 | 67 | select nextval('journal_ordering_seq'::regclass); 68 | 69 | INSERT INTO public.journal(persistence_id, sequence_number, deleted, tags, message) 70 | select 'pp-5', i, false, tag, '\x0a0708141203612d3110011a03702d316a2461313164393136332d633365322d343136322d386630362d39623233396663386635383070a8ccefd2dd5c' 71 | from generate_series(1, 99999) s(i) 72 | JOIN public.tag_definition on orders = mod(i, 12); 73 | -------------------------------------------------------------------------------- /scripts/migration/partitioned/1-create-schema.sql: -------------------------------------------------------------------------------- 1 | CREATE OR REPLACE PROCEDURE create_schema(IN destination_schema TEXT, IN destination_journal_table TEXT, IN destination_tag_table TEXT) AS 2 | $$ 3 | DECLARE 4 | destination_journal TEXT; 5 | destination_tag TEXT; 6 | BEGIN 7 | destination_journal := destination_schema || '.' || destination_journal_table; 8 | destination_tag := destination_schema || '.' || destination_tag_table; 9 | EXECUTE 'CREATE TABLE ' || destination_journal || ' 10 | ( 11 | ordering BIGINT, 12 | sequence_number BIGINT NOT NULL, 13 | deleted BOOLEAN DEFAULT FALSE NOT NULL, 14 | persistence_id TEXT NOT NULL, 15 | message BYTEA NOT NULL, 16 | tags int[], 17 | PRIMARY KEY (ordering) 18 | ) PARTITION BY RANGE (ordering);'; 19 | 20 | EXECUTE 'CREATE SEQUENCE ' || destination_journal || '_ordering_seq OWNED BY ' || destination_journal || '.ordering;'; 21 | 22 | EXECUTE 'CREATE TABLE ' || destination_tag || ' 23 | ( 24 | id BIGSERIAL, 25 | name TEXT NOT NULL, 26 | PRIMARY KEY (id) 27 | );'; 28 | 29 | EXECUTE 'CREATE UNIQUE INDEX ' || destination_tag_table || '_name_idx on ' || destination_tag || ' (name);'; 30 | END ; 31 | $$ LANGUAGE plpgsql; 32 | -------------------------------------------------------------------------------- /scripts/migration/partitioned/2-create-partitions.sql: -------------------------------------------------------------------------------- 1 | CREATE OR REPLACE PROCEDURE create_partitions(IN partition_size BIGINT, IN source_schema TEXT, IN source_journal_table_name TEXT, IN destination_schema TEXT, IN destination_journal_table_name TEXT, IN destination_partition_prefix TEXT) AS 2 | $$ 3 | DECLARE 4 | row record; 5 | max_ordering BIGINT; 6 | BEGIN 7 | EXECUTE 'SELECT max(ordering) as max_ordering FROM ' || source_schema || '.' || source_journal_table_name || ';' INTO max_ordering; 8 | FOR i IN 0..(max_ordering/partition_size) LOOP 9 | EXECUTE 'CREATE TABLE IF NOT EXISTS ' || destination_schema || '.' || destination_partition_prefix ||'_' || i || ' PARTITION OF ' || destination_schema || '.' || destination_journal_table_name || ' FOR VALUES FROM (' || i*partition_size || ') TO (' || (i+1)*partition_size || ');'; 10 | END LOOP; 11 | END ; 12 | $$ LANGUAGE plpgsql; 13 | -------------------------------------------------------------------------------- /scripts/migration/partitioned/3-fill-event-tag.sql: -------------------------------------------------------------------------------- 1 | CREATE OR REPLACE PROCEDURE fill_tags(IN source_tag_separator TEXT, IN source_schema TEXT, IN source_journal_table_name TEXT, IN destination_schema TEXT, IN destination_tag_table TEXT) AS 2 | $$ 3 | DECLARE 4 | row record; 5 | tag TEXT; 6 | BEGIN 7 | FOR row IN EXECUTE 'SELECT DISTINCT tags FROM ' || source_schema || '.' || source_journal_table_name 8 | LOOP 9 | FOR tag in (SELECT single_tag FROM regexp_split_to_table(row.tags, source_tag_separator) AS single_tag) 10 | LOOP 11 | EXECUTE 'INSERT INTO ' || destination_schema || '.' || destination_tag_table || '(name) VALUES (''' || tag || ''') ON CONFLICT DO NOTHING;'; 12 | END LOOP; 13 | END LOOP; 14 | END ; 15 | $$ LANGUAGE plpgsql; 16 | -------------------------------------------------------------------------------- /scripts/migration/partitioned/4-copy-data.sql: -------------------------------------------------------------------------------- 1 | CREATE OR REPLACE PROCEDURE copy_piece_of_data(IN from_ordering BIGINT, IN to_ordering BIGINT, 2 | IN source_tag_separator TEXT, IN source_schema TEXT, IN source_journal_table_name TEXT, 3 | IN destination_schema TEXT, IN destination_journal_table_name TEXT, IN destination_tag_table TEXT) AS 4 | $$ 5 | BEGIN 6 | EXECUTE ' 7 | INSERT INTO ' || destination_schema || '.' || destination_journal_table_name || '(ordering, persistence_id, sequence_number, deleted, tags, message) 8 | SELECT jrn.ordering , persistence_id , sequence_number , deleted , (SELECT array_agg(id) FROM regexp_split_to_table(tags, ''' || source_tag_separator || ''') AS single_tag JOIN ' || destination_schema || '.' || destination_tag_table || ' ON name = single_tag) , message 9 | FROM ' || source_schema || '.' || source_journal_table_name || ' jrn 10 | WHERE ordering >= ' || from_ordering || ' AND ordering <= ' || to_ordering || ' 11 | ON CONFLICT DO NOTHING;'; 12 | COMMIT; 13 | END ; 14 | $$ LANGUAGE plpgsql; 15 | 16 | CREATE OR REPLACE PROCEDURE copy_data(IN from_ordering BIGINT, IN batch_size BIGINT, 17 | IN source_tag_separator TEXT, IN source_schema TEXT, IN source_journal_table_name TEXT, 18 | IN destination_schema TEXT, IN destination_journal_table_name TEXT, IN destination_tag_table TEXT) AS 19 | $$ 20 | DECLARE 21 | batch_number BIGINT; 22 | max_ordering BIGINT; 23 | BEGIN 24 | EXECUTE 'SELECT max(ordering) FROM ' || source_schema || '.' || source_journal_table_name || ';' INTO max_ordering; 25 | FOR batch_number IN 0..((max_ordering - from_ordering) / batch_size) 26 | LOOP 27 | CALL copy_piece_of_data(from_ordering + batch_number * batch_size, 28 | from_ordering + batch_size + batch_number * batch_size - 1, source_tag_separator, 29 | source_schema, source_journal_table_name, destination_schema, 30 | destination_journal_table_name, destination_tag_table); 31 | END LOOP; 32 | END ; 33 | $$ LANGUAGE plpgsql; 34 | -------------------------------------------------------------------------------- /scripts/migration/partitioned/5-move-sequence.sql: -------------------------------------------------------------------------------- 1 | CREATE OR REPLACE PROCEDURE move_sequence(IN destination_schema TEXT, IN destination_journal_table_name TEXT) AS 2 | $$ 3 | DECLARE 4 | max_ordering BIGINT; 5 | BEGIN 6 | EXECUTE 'SELECT max(ordering) FROM ' || destination_schema || '.' || destination_journal_table_name || 7 | ';' INTO max_ordering; 8 | PERFORM setval(destination_schema || '.' || destination_journal_table_name || '_ordering_seq', max_ordering, true); 9 | END ; 10 | $$ LANGUAGE plpgsql; 11 | -------------------------------------------------------------------------------- /scripts/migration/partitioned/6-create-indexes.sql: -------------------------------------------------------------------------------- 1 | CREATE OR REPLACE PROCEDURE create_indexes(IN destination_schema TEXT, IN destination_journal_table TEXT) AS 2 | $$ 3 | DECLARE 4 | destination_journal TEXT; 5 | BEGIN 6 | destination_journal := destination_schema || '.' || destination_journal_table; 7 | 8 | EXECUTE 'CREATE EXTENSION IF NOT EXISTS intarray WITH SCHEMA ' || destination_schema || ';'; 9 | EXECUTE 'CREATE INDEX ' || destination_journal_table || '_tags_idx ON ' || destination_journal || ' USING GIN (tags gin__int_ops);'; 10 | EXECUTE 'CREATE INDEX ' || destination_journal_table || '_persistence_sequence_idx ON ' || destination_journal || ' USING BTREE (persistence_id, sequence_number);'; 11 | END ; 12 | $$ LANGUAGE plpgsql; 13 | -------------------------------------------------------------------------------- /scripts/migration/partitioned/7-drop-migration-procedures.sql: -------------------------------------------------------------------------------- 1 | DROP PROCEDURE IF EXISTS create_schema; 2 | DROP PROCEDURE IF EXISTS create_partitions; 3 | DROP PROCEDURE IF EXISTS fill_tags; 4 | DROP PROCEDURE IF EXISTS copy_data; 5 | DROP PROCEDURE IF EXISTS copy_piece_of_data; 6 | DROP PROCEDURE IF EXISTS move_sequence; 7 | DROP PROCEDURE IF EXISTS create_indexes; 8 | -------------------------------------------------------------------------------- /scripts/migration/partitioned/demo-prepare.sql: -------------------------------------------------------------------------------- 1 | DROP TABLE IF EXISTS public.journal_partitioned; 2 | DROP TABLE IF EXISTS public.tags; 3 | DROP TABLE IF EXISTS public.journal; 4 | DROP TABLE IF EXISTS public.tag_definition; 5 | 6 | CREATE TABLE IF NOT EXISTS public.journal 7 | ( 8 | ordering BIGSERIAL, 9 | persistence_id VARCHAR(255) NOT NULL, 10 | sequence_number BIGINT NOT NULL, 11 | deleted BOOLEAN DEFAULT FALSE NOT NULL, 12 | tags VARCHAR(255) DEFAULT NULL, 13 | message BYTEA NOT NULL, 14 | PRIMARY KEY (persistence_id, sequence_number) 15 | ); 16 | 17 | CREATE UNIQUE INDEX journal_ordering_idx ON public.journal (ordering); 18 | 19 | 20 | CREATE TABLE IF NOT EXISTS public.tag_definition 21 | ( 22 | orders INT, 23 | tag VARCHAR(255) DEFAULT NULL, 24 | PRIMARY KEY (orders) 25 | ); 26 | -- tagSeparator = "," 27 | INSERT INTO public.tag_definition(orders, tag) 28 | VALUES (0, ''), 29 | (1, 'firstEvent'), 30 | (2, 'longtag'), 31 | (3, 'multiT1,multiT2'), 32 | (4, 'firstUnique'), 33 | (5, 'tag'), 34 | (6, 'expected'), 35 | (7, 'multi,companion'), 36 | (8, 'companion,multiT1,T3,T4'), 37 | (9, 'xxx'), 38 | (10, 'ended'), 39 | (11, 'expected'); 40 | 41 | INSERT INTO public.journal(persistence_id, sequence_number, deleted, tags, message) 42 | select 'pp-1', i, false, tag, '\x0a0708141203612d3110011a03702d316a2461313164393136332d633365322d343136322d386630362d39623233396663386635383070a8ccefd2dd5c' 43 | from generate_series(1, 1000000) s(i) 44 | JOIN public.tag_definition on orders = mod(i, 12); 45 | 46 | select nextval('journal_ordering_seq'::regclass); 47 | 48 | INSERT INTO public.journal(persistence_id, sequence_number, deleted, tags, message) 49 | select 'pp-2', i, false, tag, '\x0a0708141203612d3110011a03702d316a2461313164393136332d633365322d343136322d386630362d39623233396663386635383070a8ccefd2dd5c' 50 | from generate_series(1, 100000) s(i) 51 | JOIN public.tag_definition on orders = mod(i, 12); 52 | 53 | select nextval('journal_ordering_seq'::regclass); 54 | 55 | INSERT INTO public.journal(persistence_id, sequence_number, deleted, tags, message) 56 | select 'pp-3', i, false, tag, '\x0a0708141203612d3110011a03702d316a2461313164393136332d633365322d343136322d386630362d39623233396663386635383070a8ccefd2dd5c' 57 | from generate_series(1, 100000) s(i) 58 | JOIN public.tag_definition on orders = mod(i, 12); 59 | 60 | select nextval('journal_ordering_seq'::regclass); 61 | 62 | INSERT INTO public.journal(persistence_id, sequence_number, deleted, tags, message) 63 | select 'pp-4', i, false, tag, '\x0a0708141203612d3110011a03702d316a2461313164393136332d633365322d343136322d386630362d39623233396663386635383070a8ccefd2dd5c' 64 | from generate_series(1, 100000) s(i) 65 | JOIN public.tag_definition on orders = mod(i, 12); 66 | 67 | select nextval('journal_ordering_seq'::regclass); 68 | 69 | INSERT INTO public.journal(persistence_id, sequence_number, deleted, tags, message) 70 | select 'pp-5', i, false, tag, '\x0a0708141203612d3110011a03702d316a2461313164393136332d633365322d343136322d386630362d39623233396663386635383070a8ccefd2dd5c' 71 | from generate_series(1, 99999) s(i) 72 | JOIN public.tag_definition on orders = mod(i, 12); 73 | -------------------------------------------------------------------------------- /scripts/psql-cli.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | echo "================== Help for psql =========================" 3 | echo "\l or \list : shows all databases" 4 | echo "\d : shows all tables, views and sequences" 5 | echo "\dn : shows all schemas" 6 | echo "\d table_name : describe table, view, sequence, or index" 7 | echo "\c database_name : connect to a database" 8 | echo "\q : quit" 9 | echo "\? : for more commands" 10 | echo "==================== Extensions ===========================" 11 | echo "create extension pgcrypto; : installs cryptographic functions" 12 | echo "==================== Some SQL =============================" 13 | echo "select gen_random_uuid(); : returns a random uuid (pgcrypto)" 14 | echo "select version(); : return the server version" 15 | echo "select current_date; : returns the current date" 16 | echo "=================================================================" 17 | docker exec -it postgres psql --dbname=docker --username=docker --------------------------------------------------------------------------------