├── .asf.yaml
├── .git-blame-ignore-revs
├── .gitattributes
├── .github
├── actions
│ └── sync-nightlies
│ │ └── action.yml
└── workflows
│ ├── binary-compatibility-checks.yml
│ ├── build-test.yml
│ ├── dependency-graph.yml
│ ├── format.yml
│ ├── headers.yml
│ ├── link-validator.yml
│ ├── publish-1.0-docs.yml
│ ├── publish-1.1-docs.yml
│ ├── publish-nightly.yml
│ └── scala-steward.yml
├── .gitignore
├── .jvmopts
├── .jvmopts-ci
├── .scala-steward.conf
├── .scalafmt.conf
├── CHANGELOG.md
├── CONTRIBUTING.md
├── LICENSE
├── NOTICE
├── README.md
├── build.sbt
├── core
└── src
│ ├── main
│ ├── resources
│ │ └── reference.conf
│ └── scala
│ │ └── org
│ │ └── apache
│ │ └── pekko
│ │ └── persistence
│ │ └── r2dbc
│ │ ├── ConnectionFactoryProvider.scala
│ │ ├── R2dbcSettings.scala
│ │ ├── internal
│ │ ├── BySliceQuery.scala
│ │ ├── ContinuousQuery.scala
│ │ ├── MonoToFuture.scala
│ │ ├── PubSub.scala
│ │ ├── R2dbcExecutor.scala
│ │ └── Sql.scala
│ │ ├── journal
│ │ ├── JournalDao.scala
│ │ ├── R2dbcJournal.scala
│ │ └── mysql
│ │ │ └── MySQLJournalDao.scala
│ │ ├── query
│ │ ├── R2dbcReadJournalProvider.scala
│ │ ├── javadsl
│ │ │ └── R2dbcReadJournal.scala
│ │ └── scaladsl
│ │ │ ├── QueryDao.scala
│ │ │ ├── R2dbcReadJournal.scala
│ │ │ └── mysql
│ │ │ └── MySQLQueryDao.scala
│ │ ├── snapshot
│ │ ├── R2dbcSnapshotStore.scala
│ │ ├── SnapshotDao.scala
│ │ └── mysql
│ │ │ └── MySQLSnapshotDao.scala
│ │ └── state
│ │ ├── R2dbcDurableStateStoreProvider.scala
│ │ ├── javadsl
│ │ └── R2dbcDurableStateStore.scala
│ │ └── scaladsl
│ │ ├── DurableStateDao.scala
│ │ ├── DurableStateExceptionSupport.scala
│ │ ├── R2dbcDurableStateStore.scala
│ │ └── mysql
│ │ └── MySQLDurableStateDao.scala
│ └── test
│ ├── resources
│ └── logback-test.xml
│ └── scala
│ └── org
│ └── apache
│ └── pekko
│ └── persistence
│ └── r2dbc
│ ├── CborSerializable.scala
│ ├── ConnectionFactoryOptionsCustomizerSpec.scala
│ ├── R2dbcSettingsSpec.scala
│ ├── TestActors.scala
│ ├── TestConfig.scala
│ ├── TestData.scala
│ ├── TestDbLifecycle.scala
│ ├── internal
│ ├── BySliceQueryBucketsSpec.scala
│ ├── ContinuousQuerySpec.scala
│ ├── MonoToFutureSpec.scala
│ └── SqlSpec.scala
│ ├── journal
│ ├── PersistTagsSpec.scala
│ ├── PersistTimestampSpec.scala
│ ├── R2dbcJournalPerfManyActorsSpec.scala
│ ├── R2dbcJournalPerfSpec.scala
│ ├── R2dbcJournalSpec.scala
│ └── TestDataGenerator.scala
│ ├── query
│ ├── CurrentPersistenceIdsQuerySpec.scala
│ ├── EventsByPersistenceIdSpec.scala
│ ├── EventsBySliceBacktrackingSpec.scala
│ ├── EventsBySlicePerfSpec.scala
│ ├── EventsBySlicePubSubSpec.scala
│ └── EventsBySliceSpec.scala
│ ├── snapshot
│ └── R2dbcSnapshotStoreSpec.scala
│ └── state
│ ├── CurrentPersistenceIdsQuerySpec.scala
│ ├── DurableStateBySliceSpec.scala
│ ├── DurableStateStoreSpec.scala
│ └── TestDataGenerator.scala
├── ddl-scripts
├── create_tables_mysql.sql
├── create_tables_postgres.sql
├── create_tables_yugabyte.sql
├── drop_tables_mysql.sql
└── drop_tables_postgres.sql
├── design-notes.md
├── docker
├── docker-compose-mysql.yml
├── docker-compose-postgres.yml
└── docker-compose-yugabyte.yml
├── docs
└── src
│ ├── main
│ └── paradox
│ │ ├── connection-config.md
│ │ ├── contributing.md
│ │ ├── durable-state-store.md
│ │ ├── getting-started.md
│ │ ├── index.md
│ │ ├── journal.md
│ │ ├── migration-guides.md
│ │ ├── migration.md
│ │ ├── overview.md
│ │ ├── projection.md
│ │ ├── query.md
│ │ ├── release-notes
│ │ ├── index.md
│ │ ├── releases-1.0.md
│ │ └── releases-1.1.md
│ │ └── snapshots.md
│ └── test
│ ├── java
│ └── jdocs
│ │ └── home
│ │ ├── projection
│ │ └── R2dbcProjectionDocExample.java
│ │ └── query
│ │ └── QueryDocCompileOnly.java
│ ├── resources
│ ├── application-mysql.conf
│ ├── application-postgres.conf
│ └── application-yugabyte.conf
│ └── scala
│ └── docs
│ └── home
│ ├── CborSerializable.scala
│ ├── projection
│ └── R2dbcProjectionDocExample.scala
│ └── query
│ └── QueryDocCompileOnly.scala
├── migration
└── src
│ ├── main
│ ├── resources
│ │ └── reference.conf
│ └── scala
│ │ └── org
│ │ └── apache
│ │ └── pekko
│ │ └── persistence
│ │ └── r2dbc
│ │ └── migration
│ │ ├── MigrationTool.scala
│ │ └── MigrationToolDao.scala
│ └── test
│ ├── resources
│ ├── application.conf
│ └── logback-main.xml
│ └── scala
│ └── org
│ └── apache
│ └── pekko
│ └── persistence
│ └── r2dbc
│ └── migration
│ └── MigrationToolSpec.scala
├── project
├── AutomaticModuleName.scala
├── CommonSettings.scala
├── CopyrightHeader.scala
├── Dependencies.scala
├── LicenseReport.scala
├── MetaInfLicenseNoticeCopy.scala
├── PekkoCoreDependency.scala
├── PekkoPersistenceJdbcDependency.scala
├── PekkoProjectionDependency.scala
├── build.properties
├── plugins.sbt
└── project-info.conf
├── projection
└── src
│ ├── main
│ ├── mima-filters
│ │ └── 1.0.x.backwards.excludes
│ │ │ ├── r2dbcprojectionsettings.excludes
│ │ │ └── r2dbcsession.excludes
│ ├── resources
│ │ └── reference.conf
│ └── scala
│ │ └── org
│ │ └── apache
│ │ └── pekko
│ │ └── projection
│ │ └── r2dbc
│ │ ├── R2dbcProjectionSettings.scala
│ │ ├── internal
│ │ ├── BySliceSourceProviderAdapter.scala
│ │ ├── R2dbcHandlerAdapter.scala
│ │ ├── R2dbcOffsetStore.scala
│ │ ├── R2dbcProjectionImpl.scala
│ │ └── mysql
│ │ │ └── MySQLR2dbcOffsetStore.scala
│ │ ├── javadsl
│ │ ├── R2dbcHandler.scala
│ │ ├── R2dbcProjection.scala
│ │ └── R2dbcSession.scala
│ │ └── scaladsl
│ │ ├── R2dbcHandler.scala
│ │ ├── R2dbcProjection.scala
│ │ └── R2dbcSession.scala
│ └── test
│ ├── resources
│ └── logback-test.xml
│ └── scala
│ └── org
│ └── apache
│ └── pekko
│ └── projection
│ ├── TestStatusObserver.scala
│ └── r2dbc
│ ├── DurableStateEndToEndSpec.scala
│ ├── EventSourcedChaosSpec.scala
│ ├── EventSourcedEndToEndSpec.scala
│ ├── EventSourcedPubSubSpec.scala
│ ├── R2dbcOffsetStoreSpec.scala
│ ├── R2dbcOffsetStoreStateSpec.scala
│ ├── R2dbcProjectionSpec.scala
│ ├── R2dbcTimestampOffsetProjectionSpec.scala
│ ├── R2dbcTimestampOffsetStoreSpec.scala
│ ├── TestClock.scala
│ ├── TestConfig.scala
│ ├── TestData.scala
│ ├── TestDbLifecycle.scala
│ └── TestSourceProviderWithInput.scala
└── scripts
└── link-validator.conf
/.asf.yaml:
--------------------------------------------------------------------------------
1 | # https://cwiki.apache.org/confluence/display/INFRA/Git+-+.asf.yaml+features
2 |
3 | github:
4 | description: "Asynchronously writes journal and snapshot entries to configured R2DBC databases so that Apache Pekko Actors can recover state"
5 | homepage: https://pekko.apache.org/
6 | labels:
7 | - pekko
8 | - pekko-persistence
9 | - r2dbc
10 | - journal
11 |
12 | protected_tags:
13 | - "v*.*.*"
14 |
15 | dependabot_alerts: true
16 | dependabot_updates: true
17 |
18 | features:
19 | # Enable wiki for documentation
20 | wiki: false
21 | # Enable issue management
22 | issues: true
23 | # Enable projects for project management boards
24 | projects: false
25 | # Enable github discussions
26 | discussions: true
27 |
28 | enabled_merge_buttons:
29 | squash: true
30 | merge: false
31 | rebase: true
32 |
33 | protected_branches:
34 | main:
35 | required_status_checks:
36 | # strict means "Require branches to be up to date before merging".
37 | strict: false
38 | # contexts are the names of checks that must pass
39 | contexts:
40 | - Code is formatted
41 | - Check headers
42 | required_pull_request_reviews:
43 | dismiss_stale_reviews: false
44 | require_code_owner_reviews: false
45 | required_approving_review_count: 1
46 | 1.0.x:
47 | required_status_checks:
48 | # strict means "Require branches to be up to date before merging".
49 | strict: false
50 | # contexts are the names of checks that must pass
51 | contexts:
52 | - Code is formatted
53 | - Check headers
54 | required_pull_request_reviews:
55 | dismiss_stale_reviews: false
56 | require_code_owner_reviews: false
57 | required_approving_review_count: 1
58 |
59 | notifications:
60 | commits: commits@pekko.apache.org
61 | issues: notifications@pekko.apache.org
62 | pullrequests: notifications@pekko.apache.org
63 | discussions: notifications@pekko.apache.org
64 | jobs: notifications@pekko.apache.org
65 |
--------------------------------------------------------------------------------
/.git-blame-ignore-revs:
--------------------------------------------------------------------------------
1 | # scalafmt
2 | 97a7214a3ec0aa1a2ee6debcd0fce9ab6aa67492
3 |
4 | # manual
5 | 0dd31ce2c4f9673bd1a8c4b9ef4233559a188af4
6 |
7 | # Scala Steward: Reformat with scalafmt 3.8.2
8 | a141610a37f23c8843cc34213b9813a89b850b31
9 |
10 | # Scala Steward: Reformat with scalafmt 3.8.5
11 | 356e00197c017085fea10db78c825631f24bb1c2
12 |
--------------------------------------------------------------------------------
/.gitattributes:
--------------------------------------------------------------------------------
1 | # Activate line ending normalization, setting eol will make the behavior match core.autocrlf = input
2 | * text=auto eol=lf
3 | # Force batch scripts to always use CRLF line endings
4 | *.{cmd,[cC][mM][dD]} text eol=crlf
5 | *.{bat,[bB][aA][tT]} text eol=crlf
6 |
--------------------------------------------------------------------------------
/.github/actions/sync-nightlies/action.yml:
--------------------------------------------------------------------------------
1 | # Licensed to the Apache Software Foundation (ASF) under one
2 | # or more contributor license agreements. See the NOTICE file
3 | # distributed with this work for additional information
4 | # regarding copyright ownership. The ASF licenses this file
5 | # to you under the Apache License, Version 2.0 (the
6 | # "License"); you may not use this file except in compliance
7 | # with the License. You may obtain a copy of the License at
8 | #
9 | # http://www.apache.org/licenses/LICENSE-2.0
10 | #
11 | # Unless required by applicable law or agreed to in writing,
12 | # software distributed under the License is distributed on an
13 | # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14 | # KIND, either express or implied. See the License for the
15 | # specific language governing permissions and limitations
16 | # under the License.
17 |
18 | # Based on Apache Arrow's sync-nightlies action
19 | # https://github.com/apache/arrow/blob/master/.github/actions/sync-nightlies/action.yml
20 | name: 'Sync Nightlies'
21 | description: 'Sync files to and from nightlies.apache.org'
22 | inputs:
23 | upload:
24 | description: 'Sync from local to remote'
25 | default: false
26 | required: false
27 | switches:
28 | description: 'see rsync --help'
29 | required: true
30 | local_path:
31 | description: 'The relative local path within $GITHUB_WORKSPACE'
32 | required: true
33 | remote_path:
34 | description: 'The remote path incl. sub dirs e.g. {{secrets.path}}/arrow/r'
35 | required: true
36 | remote_host:
37 | description: 'The remote host'
38 | required: true
39 | remote_port:
40 | description: 'The remote port'
41 | required: false
42 | default: 22
43 | remote_user:
44 | description: 'The remote user'
45 | required: true
46 | remote_key:
47 | description: 'The remote key'
48 | required: true
49 |
50 | runs:
51 | using: "composite"
52 | steps:
53 | - name: Sync files
54 | shell: bash
55 | env:
56 | SWITCHES: "${{ inputs.switches }}"
57 | LOCAL_PATH: "${{ github.workspace }}/${{ inputs.local_path }}"
58 |
59 | SSH_KEY: "${{ inputs.remote_key }}"
60 | PORT: "${{ inputs.remote_port }}"
61 | USER: "${{ inputs.remote_user }}"
62 | HOST: "${{ inputs.remote_host }}"
63 | REMOTE_PATH: "${{ inputs.remote_path }}"
64 | run: |
65 | # Make SSH key available and add remote to known hosts
66 | eval "$(ssh-agent)" > /dev/null
67 | echo "$SSH_KEY" | tr -d '\r' | ssh-add - >/dev/null
68 | mkdir -p .ssh
69 | chmod go-rwx .ssh
70 | echo "$HOST_KEY" >> .ssh/known_hosts
71 | # strict errors
72 | set -eu
73 | # We have to use a custom RSH to supply the port
74 | RSH="ssh -o StrictHostKeyChecking=no -p $PORT"
75 | DSN="$USER@$HOST"
76 | # It is important to append '/' to the source path otherwise
77 | # the entire source dir will be created as a sub dir in the destination
78 | if [ "${{ inputs.upload }}" = true ]
79 | then
80 | SOURCE=$LOCAL_PATH/
81 | DEST=$DSN:$REMOTE_PATH
82 | else
83 | SOURCE=$DSN:$REMOTE_PATH/
84 | DEST=$LOCAL_PATH
85 | fi
86 | rsync $SWITCHES --rsh="$RSH" $SOURCE $DEST
87 |
--------------------------------------------------------------------------------
/.github/workflows/binary-compatibility-checks.yml:
--------------------------------------------------------------------------------
1 | name: Binary Compatibility
2 |
3 | on:
4 | pull_request:
5 | push:
6 | branches:
7 | - main
8 |
9 | permissions: {}
10 |
11 | jobs:
12 | check-binary-compatibility:
13 | name: Check / Binary Compatibility
14 | runs-on: ubuntu-22.04
15 | if: github.repository == 'apache/pekko-persistence-r2dbc'
16 | steps:
17 | - name: Checkout
18 | uses: actions/checkout@v4
19 | with:
20 | fetch-depth: 0
21 | fetch-tags: 0
22 |
23 | - name: Setup Java 11
24 | uses: actions/setup-java@v4
25 | with:
26 | distribution: temurin
27 | java-version: 11
28 |
29 | - name: Install sbt
30 | uses: sbt/setup-sbt@v1
31 |
32 | - name: Cache Coursier cache
33 | uses: coursier/cache-action@v6
34 |
35 | - name: Enable jvm-opts
36 | run: cp .jvmopts-ci .jvmopts
37 |
38 | - name: Compile code
39 | run: sbt +compile
40 |
41 | - name: Report MiMa Binary Issues
42 | run: |-
43 | sbt +mimaReportBinaryIssues
44 |
45 | - name: Check correct MiMa filter directories
46 | run: |
47 | sbt checkMimaFilterDirectories
48 |
--------------------------------------------------------------------------------
/.github/workflows/dependency-graph.yml:
--------------------------------------------------------------------------------
1 | name: Update Dependency Graph
2 | on:
3 | push:
4 | branches:
5 | - main # default branch of the project
6 | jobs:
7 | dependency-graph:
8 | name: Update Dependency Graph
9 | runs-on: ubuntu-latest
10 | if: github.repository == 'apache/pekko-persistence-r2dbc'
11 | steps:
12 | - uses: actions/checkout@v4
13 | - name: Install sbt
14 | uses: sbt/setup-sbt@v1
15 | - uses: scalacenter/sbt-dependency-submission@v2
16 |
--------------------------------------------------------------------------------
/.github/workflows/format.yml:
--------------------------------------------------------------------------------
1 | name: Scalafmt
2 |
3 | permissions: {}
4 |
5 | on:
6 | pull_request:
7 | branches: ['**']
8 |
9 | jobs:
10 | build:
11 | name: Code is formatted
12 | runs-on: ubuntu-latest
13 | steps:
14 | - name: Checkout current branch (full)
15 | uses: actions/checkout@v4
16 | with:
17 | fetch-depth: 0
18 | persist-credentials: false
19 |
20 | - name: Check project is formatted
21 | uses: jrouly/scalafmt-native-action@v4
22 | with:
23 | arguments: '--list --mode diff-ref=origin/main'
24 |
--------------------------------------------------------------------------------
/.github/workflows/headers.yml:
--------------------------------------------------------------------------------
1 | name: Headers
2 |
3 | on:
4 | pull_request:
5 |
6 | permissions: {}
7 |
8 | jobs:
9 | check-headers:
10 | name: Check headers
11 | runs-on: ubuntu-22.04
12 | steps:
13 | - name: Checkout
14 | uses: actions/checkout@v4
15 |
16 | - name: Setup Java 11
17 | uses: actions/setup-java@v4
18 | with:
19 | distribution: temurin
20 | java-version: 11
21 |
22 | - name: Install sbt
23 | uses: sbt/setup-sbt@v1
24 |
25 | - name: Cache Coursier cache
26 | uses: coursier/cache-action@v6
27 |
28 | - name: Check headers
29 | run: |-
30 | sbt \
31 | -Dsbt.override.build.repos=false \
32 | -Dsbt.log.noformat=false \
33 | +headerCheckAll
34 |
--------------------------------------------------------------------------------
/.github/workflows/link-validator.yml:
--------------------------------------------------------------------------------
1 | name: Link Validator
2 |
3 | permissions: {}
4 |
5 | on:
6 | pull_request:
7 | workflow_dispatch:
8 | schedule:
9 | - cron: '0 6 * * 1'
10 |
11 | jobs:
12 | validate-links:
13 | runs-on: ubuntu-22.04
14 | if: github.repository == 'apache/pekko-persistence-r2dbc'
15 | steps:
16 | - name: Checkout
17 | uses: actions/checkout@v4
18 |
19 | - name: Checkout GitHub merge
20 | if: github.event.pull_request
21 | run: |-
22 | git fetch origin pull/${{ github.event.pull_request.number }}/merge:scratch
23 | git checkout scratch
24 |
25 | - name: Setup Java 8
26 | uses: actions/setup-java@v4
27 | with:
28 | distribution: temurin
29 | java-version: 8
30 |
31 | - name: Install sbt
32 | uses: sbt/setup-sbt@v1
33 |
34 | - name: Cache Coursier cache
35 | uses: coursier/cache-action@v6
36 |
37 | - name: Setup Coursier
38 | uses: coursier/setup-action@v1
39 |
40 | - name: Create the Pekko site
41 | run: sbt "unidoc; docs/paradox"
42 |
43 | - name: Run Link Validator
44 | run: cs launch net.runne::site-link-validator:0.2.5 -- scripts/link-validator.conf
45 |
--------------------------------------------------------------------------------
/.github/workflows/publish-1.0-docs.yml:
--------------------------------------------------------------------------------
1 | name: Publish 1.0 docs
2 |
3 | on:
4 | workflow_dispatch:
5 |
6 | jobs:
7 | publish-docs:
8 | if: github.repository == 'apache/pekko-persistence-r2dbc'
9 | name: Publish 1.0 Docs
10 | runs-on: ubuntu-22.04
11 | env:
12 | JAVA_OPTS: -Xms2G -Xmx2G -Xss2M -XX:ReservedCodeCacheSize=256M -Dfile.encoding=UTF-8
13 | steps:
14 | - name: Checkout
15 | uses: actions/checkout@v4
16 | with:
17 | # we don't know what commit the last tag was it's safer to get entire repo so previousStableVersion resolves
18 | fetch-depth: 0
19 | fetch-tags: true
20 | ref: 1.0.x
21 |
22 | - name: Setup Java 8
23 | uses: actions/setup-java@v4
24 | with:
25 | distribution: temurin
26 | java-version: 8
27 |
28 | - name: Install sbt
29 | uses: sbt/setup-sbt@v1
30 |
31 | - name: Install Graphviz
32 | run: |-
33 | sudo apt-get install graphviz
34 |
35 | - name: Build 1.0.x Documentation
36 | run: |-
37 | sbt "set ThisBuild / version := \"1.0.0\"; docs/paradox; unidoc"
38 |
39 | # Create directory structure upfront since rsync does not create intermediate directories otherwise
40 | - name: Create 1.0.x directory structure
41 | run: |-
42 | mkdir -p target/nightly-docs/docs/pekko-persistence-r2dbc/1.0.0/
43 | mkdir -p target/nightly-docs/docs/pekko-persistence-r2dbc/1.0/
44 | cp -r docs/target/paradox/site/main/ target/nightly-docs/docs/pekko-persistence-r2dbc/1.0.0/docs
45 | cp -r docs/target/paradox/site/main/ target/nightly-docs/docs/pekko-persistence-r2dbc/1.0/docs
46 | rm -r docs/target/paradox/site/main/
47 | cp -r docs/target/scala-2.13/unidoc target/nightly-docs/docs/pekko-persistence-r2dbc/1.0.0/api
48 | cp -r docs/target/scala-2.13/unidoc target/nightly-docs/docs/pekko-persistence-r2dbc/1.0/api
49 | rm -r docs/target/scala-2.13/unidoc
50 |
51 | - name: Upload 1.0.x docs
52 | uses: ./.github/actions/sync-nightlies
53 | with:
54 | upload: true
55 | switches: --archive --compress --update --delete --progress --relative
56 | local_path: target/nightly-docs/./docs/pekko-persistence-r2dbc/1.0.0 # The intermediate dot is to show `--relative` which paths to operate on
57 | remote_path: ${{ secrets.NIGHTLIES_RSYNC_PATH }}/pekko/
58 | remote_host: ${{ secrets.NIGHTLIES_RSYNC_HOST }}
59 | remote_port: ${{ secrets.NIGHTLIES_RSYNC_PORT }}
60 | remote_user: ${{ secrets.NIGHTLIES_RSYNC_USER }}
61 | remote_key: ${{ secrets.NIGHTLIES_RSYNC_KEY }}
62 |
63 | - name: Upload 1.0 docs
64 | uses: ./.github/actions/sync-nightlies
65 | with:
66 | upload: true
67 | switches: --archive --compress --update --delete --progress --relative
68 | local_path: target/nightly-docs/./docs/pekko-persistence-r2dbc/1.0 # The intermediate dot is to show `--relative` which paths to operate on
69 | remote_path: ${{ secrets.NIGHTLIES_RSYNC_PATH }}/pekko/
70 | remote_host: ${{ secrets.NIGHTLIES_RSYNC_HOST }}
71 | remote_port: ${{ secrets.NIGHTLIES_RSYNC_PORT }}
72 | remote_user: ${{ secrets.NIGHTLIES_RSYNC_USER }}
73 | remote_key: ${{ secrets.NIGHTLIES_RSYNC_KEY }}
74 |
--------------------------------------------------------------------------------
/.github/workflows/publish-1.1-docs.yml:
--------------------------------------------------------------------------------
1 | name: Publish 1.1 docs
2 |
3 | on:
4 | workflow_dispatch:
5 |
6 | jobs:
7 | publish-docs:
8 | if: github.repository == 'apache/pekko-persistence-r2dbc'
9 | name: Publish 1.1 Docs
10 | runs-on: ubuntu-22.04
11 | env:
12 | JAVA_OPTS: -Xms2G -Xmx2G -Xss2M -XX:ReservedCodeCacheSize=256M -Dfile.encoding=UTF-8
13 | steps:
14 | - name: Checkout
15 | uses: actions/checkout@v4
16 | with:
17 | # we don't know what commit the last tag was it's safer to get entire repo so previousStableVersion resolves
18 | fetch-depth: 0
19 | fetch-tags: true
20 |
21 | - name: Setup Java 8
22 | uses: actions/setup-java@v4
23 | with:
24 | distribution: temurin
25 | java-version: 8
26 |
27 | - name: Install sbt
28 | uses: sbt/setup-sbt@v1
29 |
30 | - name: Install Graphviz
31 | run: |-
32 | sudo apt-get install graphviz
33 |
34 | - name: Build 1.1.x Documentation
35 | run: |-
36 | sbt "set ThisBuild / version := \"1.1.0-M1\"; docs/paradox; unidoc"
37 |
38 | # Create directory structure upfront since rsync does not create intermediate directories otherwise
39 | - name: Create 1.1.x directory structure
40 | run: |-
41 | mkdir -p target/nightly-docs/docs/pekko-persistence-r2dbc/1.1.0-M1/
42 | mkdir -p target/nightly-docs/docs/pekko-persistence-r2dbc/1.1/
43 | cp -r docs/target/paradox/site/main/ target/nightly-docs/docs/pekko-persistence-r2dbc/1.1.0-M1/docs
44 | cp -r docs/target/paradox/site/main/ target/nightly-docs/docs/pekko-persistence-r2dbc/1.1/docs
45 | rm -r docs/target/paradox/site/main/
46 | cp -r docs/target/scala-2.13/unidoc target/nightly-docs/docs/pekko-persistence-r2dbc/1.1.0-M1/api
47 | cp -r docs/target/scala-2.13/unidoc target/nightly-docs/docs/pekko-persistence-r2dbc/1.1/api
48 | rm -r docs/target/scala-2.13/unidoc
49 |
50 | - name: Upload 1.1.x docs
51 | uses: ./.github/actions/sync-nightlies
52 | with:
53 | upload: true
54 | switches: --archive --compress --update --delete --progress --relative
55 | local_path: target/nightly-docs/./docs/pekko-persistence-r2dbc/1.1.0-M1 # The intermediate dot is to show `--relative` which paths to operate on
56 | remote_path: ${{ secrets.NIGHTLIES_RSYNC_PATH }}/pekko/
57 | remote_host: ${{ secrets.NIGHTLIES_RSYNC_HOST }}
58 | remote_port: ${{ secrets.NIGHTLIES_RSYNC_PORT }}
59 | remote_user: ${{ secrets.NIGHTLIES_RSYNC_USER }}
60 | remote_key: ${{ secrets.NIGHTLIES_RSYNC_KEY }}
61 |
62 | - name: Upload 1.1 docs
63 | uses: ./.github/actions/sync-nightlies
64 | with:
65 | upload: true
66 | switches: --archive --compress --update --delete --progress --relative
67 | local_path: target/nightly-docs/./docs/pekko-persistence-r2dbc/1.1 # The intermediate dot is to show `--relative` which paths to operate on
68 | remote_path: ${{ secrets.NIGHTLIES_RSYNC_PATH }}/pekko/
69 | remote_host: ${{ secrets.NIGHTLIES_RSYNC_HOST }}
70 | remote_port: ${{ secrets.NIGHTLIES_RSYNC_PORT }}
71 | remote_user: ${{ secrets.NIGHTLIES_RSYNC_USER }}
72 | remote_key: ${{ secrets.NIGHTLIES_RSYNC_KEY }}
73 |
--------------------------------------------------------------------------------
/.github/workflows/publish-nightly.yml:
--------------------------------------------------------------------------------
1 | name: Publish Nightly
2 |
3 | on:
4 | workflow_dispatch:
5 | schedule:
6 | - cron: "22 0 * * *"
7 |
8 | jobs:
9 | publish:
10 | # runs on main repo only
11 | if: github.repository == 'apache/pekko-persistence-r2dbc'
12 | name: Publish
13 | runs-on: ubuntu-22.04
14 | env:
15 | JAVA_OPTS: -Xms2G -Xmx2G -Xss2M -XX:ReservedCodeCacheSize=256M -Dfile.encoding=UTF-8
16 | steps:
17 | - name: Checkout
18 | uses: actions/checkout@v4
19 | with:
20 | # we don't know what commit the last tag was it's safer to get entire repo so previousStableVersion resolves
21 | fetch-depth: 0
22 | fetch-tags: true
23 |
24 | - name: Setup Java 8
25 | uses: actions/setup-java@v4
26 | with:
27 | distribution: temurin
28 | java-version: 8
29 |
30 | - name: Install sbt
31 | uses: sbt/setup-sbt@v1
32 |
33 | - name: Install Graphviz
34 | run: |-
35 | sudo apt-get install graphviz
36 |
37 | - name: Publish to Apache Maven repo
38 | env:
39 | NEXUS_USER: ${{ secrets.NEXUS_USER }}
40 | NEXUS_PW: ${{ secrets.NEXUS_PW }}
41 | run: sbt +publish
42 |
43 | - name: Build Documentation
44 | run: |-
45 | sbt docs/paradox unidoc
46 |
47 | # Create directory structure upfront since rsync does not create intermediate directories otherwise
48 | - name: Create nightly directory structure
49 | run: |-
50 | mkdir -p target/nightly-docs/docs/pekko-persistence-r2dbc/${{ github.ref_name }}-snapshot/
51 | mv docs/target/paradox/site/main/ target/nightly-docs/docs/pekko-persistence-r2dbc/${{ github.ref_name }}-snapshot/docs
52 | mv docs/target/scala-2.13/unidoc target/nightly-docs/docs/pekko-persistence-r2dbc/${{ github.ref_name }}-snapshot/api
53 |
54 | - name: Upload nightly docs
55 | uses: ./.github/actions/sync-nightlies
56 | with:
57 | upload: true
58 | switches: --archive --compress --update --delete --progress --relative
59 | local_path: target/nightly-docs/./docs/pekko-persistence-r2dbc/${{ github.ref_name }}-snapshot # The intermediate dot is to show `--relative` which paths to operate on
60 | remote_path: ${{ secrets.NIGHTLIES_RSYNC_PATH }}/pekko/
61 | remote_host: ${{ secrets.NIGHTLIES_RSYNC_HOST }}
62 | remote_port: ${{ secrets.NIGHTLIES_RSYNC_PORT }}
63 | remote_user: ${{ secrets.NIGHTLIES_RSYNC_USER }}
64 | remote_key: ${{ secrets.NIGHTLIES_RSYNC_KEY }}
65 |
--------------------------------------------------------------------------------
/.github/workflows/scala-steward.yml:
--------------------------------------------------------------------------------
1 | on:
2 | workflow_dispatch:
3 | schedule:
4 | - cron: '0 0 * * 0'
5 |
6 | name: Launch Scala Steward
7 |
8 | jobs:
9 | scala-steward:
10 | runs-on: ubuntu-22.04
11 | name: Launch Scala Steward
12 | if: github.repository == 'apache/pekko-persistence-r2dbc'
13 | steps:
14 | - name: Launch Scala Steward
15 | uses: scala-steward-org/scala-steward-action@v2
16 | env:
17 | JAVA_OPTS: "-Xms1G -Xmx1G -Xss2M"
18 | with:
19 | github-app-id: ${{ secrets.SCALA_STEWARD_APP_ID }}
20 | github-app-installation-id: ${{ secrets.SCALA_STEWARD_INSTALLATION_ID }}
21 | github-app-key: ${{ secrets.SCALA_STEWARD_PRIVATE_KEY }}
22 | github-app-auth-only: true
23 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | .idea*
2 | *.env
3 | *.log
4 | *.iml
5 | target/
6 | /.target/
7 | .DS_Store
8 | .cache*
9 | .classpath
10 | .project
11 | .settings
12 | .tmpBin/
13 | *.sublime-project
14 | /bin/
15 | ext-lib-src/
16 | .classpath_nb
17 | .bsp
18 | metals.sbt
19 |
--------------------------------------------------------------------------------
/.jvmopts:
--------------------------------------------------------------------------------
1 | # This is used to configure the sbt instance for local development
2 |
3 | -Xms2G
4 | -Xmx2G
5 | -Xss2M
6 |
--------------------------------------------------------------------------------
/.jvmopts-ci:
--------------------------------------------------------------------------------
1 | # This is used to configure the sbt instance that Travis launches
2 |
3 | -Xms1G
4 | -Xmx1G
5 | -Xss2M
--------------------------------------------------------------------------------
/.scala-steward.conf:
--------------------------------------------------------------------------------
1 | updates.ignore = [
2 | ]
3 |
4 | updates.pin = [
5 |
6 | # Pin logback to v1.3.x because v1.4.x needs JDK11
7 | { groupId = "ch.qos.logback", version="1.3." }
8 |
9 | # Pin sbt-paradox to v0.9.x because 0.10.x needs JDK 11
10 | { groupId = "com.lightbend.paradox", artifactId = "sbt-paradox-project-info", version = "0.9." },
11 | { groupId = "com.lightbend.paradox", artifactId = "sbt-paradox", version = "0.9." }
12 |
13 | # Pin sbt-java-formatter to v0.9.x because 0.10.x needs JDK 11
14 | { groupId = "com.github.sbt", artifactId = "sbt-java-formatter", version = "0.9." }
15 |
16 | # Scala 3.3 is a LTS
17 | { groupId = "org.scala-lang", artifactId = "scala3-library", version = "3.3." }
18 | ]
19 |
20 | updatePullRequests = "always"
21 |
--------------------------------------------------------------------------------
/.scalafmt.conf:
--------------------------------------------------------------------------------
1 | version = 3.9.3
2 | runner.dialect = scala213
3 | project.git = true
4 | style = defaultWithAlign
5 | docstrings.style = Asterisk
6 | docstrings.wrap = false
7 | indentOperator.preset = spray
8 | maxColumn = 120
9 | lineEndings = preserve
10 | rewrite.rules = [RedundantParens, SortImports, AvoidInfix]
11 | indentOperator.exemptScope = all
12 | align.preset = some
13 | align.tokens."+" = [
14 | {
15 | code = "~>"
16 | owners = [
17 | { regex = "Term.ApplyInfix" }
18 | ]
19 | }
20 | ]
21 | literals.hexDigits = upper
22 | literals.hexPrefix = lower
23 | binPack.unsafeCallSite = always
24 | binPack.unsafeDefnSite = always
25 | binPack.indentCallSiteSingleArg = false
26 | binPack.indentCallSiteOnce = true
27 | newlines.avoidForSimpleOverflow = [slc]
28 | newlines.source = keep
29 | newlines.beforeMultiline = keep
30 | align.openParenDefnSite = false
31 | align.openParenCallSite = false
32 | align.allowOverflow = true
33 | optIn.breakChainOnFirstMethodDot = false
34 | optIn.configStyleArguments = false
35 | danglingParentheses.preset = false
36 | spaces.inImportCurlyBraces = true
37 | rewrite.neverInfix.excludeFilters = [
38 | and
39 | min
40 | max
41 | until
42 | to
43 | by
44 | eq
45 | ne
46 | "should.*"
47 | "contain.*"
48 | "must.*"
49 | in
50 | ignore
51 | be
52 | taggedAs
53 | thrownBy
54 | synchronized
55 | have
56 | when
57 | size
58 | only
59 | noneOf
60 | oneElementOf
61 | noElementsOf
62 | atLeastOneElementOf
63 | atMostOneElementOf
64 | allElementsOf
65 | inOrderElementsOf
66 | theSameElementsAs
67 | theSameElementsInOrderAs
68 | ]
69 | rewriteTokens = {
70 | "⇒": "=>"
71 | "→": "->"
72 | "←": "<-"
73 | }
74 | project.excludeFilters = [
75 | "scripts/authors.scala"
76 | ]
77 | project.layout = StandardConvention
78 |
--------------------------------------------------------------------------------
/CHANGELOG.md:
--------------------------------------------------------------------------------
1 | # Apache Pekko Persistence R2DBC Releases
2 |
3 | The [Release Notes](https://pekko.apache.org/docs/pekko-persistence-r2dbc/current/release-notes.html) are available on the Apache Pekko web site.
4 |
--------------------------------------------------------------------------------
/NOTICE:
--------------------------------------------------------------------------------
1 | Apache Pekko Persistence R2DBC
2 | Copyright 2022-2025 The Apache Software Foundation
3 |
4 | This product includes software developed at
5 | The Apache Software Foundation (https://www.apache.org/).
6 |
7 | This product contains significant parts that were originally based on software from Lightbend (Akka ).
8 | Copyright (C) 2021-2022 Lightbend Inc.
9 |
10 | Apache Pekko Persistence R2DBC is derived from Akka Persistence R2DBC 0.7.x, the last version that was distributed under the
11 | Apache License, Version 2.0 License.
12 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # R2DBC Plugin for Apache Pekko Persistence
2 |
3 | Pekko Persistence journal and snapshot
4 | store for SQL databases with RD2BC connectivity.
5 |
6 | ## Project status
7 |
8 | Version 1.0.0 has been released. Any contributions are welcome. Please observe the [Code of Conduct](https://github.com/apache/pekko-persistence-r2dbc?tab=coc-ov-file#readme).
9 |
10 | ## Documentation
11 |
12 | * [Current Apache Pekko Persistence R2DBC documentation](https://pekko.apache.org/docs/pekko-persistence-r2dbc/current/)
13 |
14 | ## Community
15 |
16 | There are several ways to interact with the Pekko community:
17 |
18 | - [GitHub discussions](https://github.com/apache/pekko-persistence-r2dbc/discussions): for questions and general discussion.
19 | - [Pekko dev mailing list](https://lists.apache.org/list.html?dev@pekko.apache.org): for Pekko development discussions.
20 | - [Pekko users mailing list](https://lists.apache.org/list.html?users@pekko.apache.org): for Pekko user discussions.
21 | - [GitHub issues](https://github.com/apache/pekko-persistence-r2dbc/issues): for bug reports and feature requests. Please search the existing issues before creating new ones. If you are unsure whether you have found a bug, consider asking in GitHub discussions or the mailing list first.
22 |
23 | ## License
24 |
25 | Pekko Persistence RD2BC is Open Source and available under the Apache 2 License.
26 |
--------------------------------------------------------------------------------
/core/src/main/scala/org/apache/pekko/persistence/r2dbc/internal/MonoToFuture.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Licensed to the Apache Software Foundation (ASF) under one or more
3 | * contributor license agreements. See the NOTICE file distributed with
4 | * this work for additional information regarding copyright ownership.
5 | * The ASF licenses this file to You under the Apache License, Version 2.0
6 | * (the "License"); you may not use this file except in compliance with
7 | * the License. You may obtain a copy of the License at
8 | *
9 | * http://www.apache.org/licenses/LICENSE-2.0
10 | *
11 | * Unless required by applicable law or agreed to in writing, software
12 | * distributed under the License is distributed on an "AS IS" BASIS,
13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 | * See the License for the specific language governing permissions and
15 | * limitations under the License.
16 | */
17 |
18 | package org.apache.pekko.persistence.r2dbc.internal
19 |
20 | import org.apache.pekko.annotation.InternalApi
21 | import org.reactivestreams.Subscription
22 | import reactor.core.CoreSubscriber
23 | import reactor.core.publisher.Operators
24 |
25 | import java.util.concurrent.atomic.AtomicReference
26 | import scala.concurrent.{ Future, Promise }
27 |
28 | /**
29 | * INTERNAL API
30 | */
31 | @InternalApi
32 | private[r2dbc] final class MonoToFuture[T] extends AtomicReference[Subscription] with CoreSubscriber[T] {
33 | private final val promise = Promise[T]()
34 |
35 | override def onSubscribe(s: Subscription): Unit = {
36 | if (Operators.validate(getAndSet(s), s)) {
37 | s.request(1) // we just need 1 value.
38 | } else {
39 | s.cancel()
40 | }
41 | }
42 |
43 | override def onNext(t: T): Unit = {
44 | val currentSubscription = getAndSet(null)
45 | if (currentSubscription ne null) {
46 | promise.success(t)
47 | // NOTE: We should not call cancel here when subscribe to a Mono
48 | // https://github.com/reactor/reactor-core/issues/2070
49 | } else Operators.onNextDropped(t, currentContext())
50 | }
51 |
52 | override def onError(t: Throwable): Unit = {
53 | if (getAndSet(null) ne null) {
54 | promise.failure(t)
55 | } else Operators.onErrorDropped(t, currentContext())
56 | }
57 |
58 | override def onComplete(): Unit = {
59 | if (getAndSet(null) ne null) {
60 | promise.success(null.asInstanceOf[T])
61 | }
62 | }
63 |
64 | def future: Future[T] = promise.future
65 | }
66 |
--------------------------------------------------------------------------------
/core/src/main/scala/org/apache/pekko/persistence/r2dbc/internal/PubSub.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Licensed to the Apache Software Foundation (ASF) under one or more
3 | * license agreements; and to You under the Apache License, version 2.0:
4 | *
5 | * https://www.apache.org/licenses/LICENSE-2.0
6 | *
7 | * This file is part of the Apache Pekko project, which was derived from Akka.
8 | */
9 |
10 | /*
11 | * Copyright (C) 2021 Lightbend Inc.
12 | */
13 |
14 | package org.apache.pekko.persistence.r2dbc.internal
15 |
16 | import java.net.URLEncoder
17 | import java.nio.charset.StandardCharsets
18 | import java.time.Instant
19 | import java.util.concurrent.ConcurrentHashMap
20 |
21 | import org.apache.pekko
22 | import pekko.actor.typed.ActorRef
23 | import pekko.actor.typed.ActorSystem
24 | import pekko.actor.typed.Extension
25 | import pekko.actor.typed.ExtensionId
26 | import pekko.actor.typed.pubsub.Topic
27 | import pekko.annotation.InternalApi
28 | import pekko.persistence.Persistence
29 | import pekko.persistence.PersistentRepr
30 | import pekko.persistence.journal.Tagged
31 | import pekko.persistence.query.TimestampOffset
32 | import pekko.persistence.query.typed.EventEnvelope
33 | import pekko.persistence.typed.PersistenceId
34 |
35 | /**
36 | * INTERNAL API
37 | */
38 | @InternalApi private[pekko] object PubSub extends ExtensionId[PubSub] {
39 | def createExtension(system: ActorSystem[_]): PubSub = new PubSub(system)
40 |
41 | // Java API
42 | def get(system: ActorSystem[_]): PubSub = apply(system)
43 |
44 | }
45 |
46 | /**
47 | * INTERNAL API
48 | */
49 | @InternalApi private[pekko] class PubSub(system: ActorSystem[_]) extends Extension {
50 | private val topics = new ConcurrentHashMap[String, ActorRef[Any]]
51 | private val persistenceExt = Persistence(system)
52 |
53 | def eventTopic[Event](entityType: String, slice: Int): ActorRef[Topic.Command[EventEnvelope[Event]]] = {
54 | val name = topicName(entityType, slice)
55 | topics
56 | .computeIfAbsent(name, _ => system.systemActorOf(Topic[EventEnvelope[Event]](name), name).unsafeUpcast[Any])
57 | .narrow[Topic.Command[EventEnvelope[Event]]]
58 | }
59 |
60 | private def topicName(entityType: String, slice: Int): String =
61 | URLEncoder.encode(s"r2dbc-$entityType-$slice", StandardCharsets.UTF_8.name())
62 |
63 | def publish(pr: PersistentRepr, timestamp: Instant): Unit = {
64 | val pid = pr.persistenceId
65 | val entityType = PersistenceId.extractEntityType(pid)
66 | val slice = persistenceExt.sliceForPersistenceId(pid)
67 |
68 | val offset = TimestampOffset(timestamp, timestamp, Map(pid -> pr.sequenceNr))
69 | val payload =
70 | pr.payload match {
71 | case Tagged(payload, _) =>
72 | // eventsByTag not implemented (see issue #82), but events can still be tagged, so we unwrap this tagged event.
73 | payload
74 |
75 | case other => other
76 | }
77 |
78 | val envelope = new EventEnvelope(
79 | offset,
80 | pid,
81 | pr.sequenceNr,
82 | Option(payload),
83 | timestamp.toEpochMilli,
84 | pr.metadata,
85 | entityType,
86 | slice)
87 | eventTopic(entityType, slice) ! Topic.Publish(envelope)
88 | }
89 | }
90 |
--------------------------------------------------------------------------------
/core/src/main/scala/org/apache/pekko/persistence/r2dbc/internal/Sql.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Licensed to the Apache Software Foundation (ASF) under one or more
3 | * license agreements; and to You under the Apache License, version 2.0:
4 | *
5 | * https://www.apache.org/licenses/LICENSE-2.0
6 | *
7 | * This file is part of the Apache Pekko project, which was derived from Akka.
8 | */
9 |
10 | /*
11 | * Copyright (C) 2021 Lightbend Inc.
12 | */
13 |
14 | package org.apache.pekko.persistence.r2dbc.internal
15 |
16 | import scala.annotation.varargs
17 | import org.apache.pekko
18 | import pekko.annotation.InternalApi
19 | import pekko.annotation.InternalStableApi
20 | import pekko.persistence.r2dbc.Dialect
21 |
22 | /**
23 | * INTERNAL API: Utility to format SQL strings. Replaces `?` with numbered `\$1`, `\$2` for bind parameters. Trims
24 | * whitespace, including line breaks.
25 | */
26 | @InternalStableApi
27 | object Sql {
28 |
29 | /**
30 | * INTERNAL API
31 | */
32 | @InternalApi
33 | private[r2dbc] implicit class DialectOps(dialect: Dialect) {
34 | def replaceParameters(sql: String): String = {
35 | dialect match {
36 | case Dialect.Postgres | Dialect.Yugabyte =>
37 | fillInParameterNumbers(sql)
38 | case Dialect.MySQL =>
39 | sql
40 | }
41 | }
42 | }
43 |
44 | /**
45 | * Scala string interpolation with `sql` prefix. Replaces `?` with numbered `\$1`, `\$2` for bind parameters. Trims
46 | * whitespace, including line breaks. Standard string interpolation arguments `$` can be used.
47 | */
48 | implicit class Interpolation(val sc: StringContext) extends AnyVal {
49 | def sql(args: Any*): String =
50 | fillInParameterNumbers(trimLineBreaks(sc.s(args: _*)))
51 | }
52 |
53 | /**
54 | * INTERNAL API
55 | */
56 | @InternalApi
57 | private[pekko] implicit class DialectInterpolation(val sc: StringContext) extends AnyVal {
58 | def sql(args: Any*)(implicit dialect: Dialect): String =
59 | dialect.replaceParameters(trimLineBreaks(sc.s(args: _*)))
60 | }
61 |
62 | /**
63 | * Java API: Replaces `?` with numbered `\$1`, `\$2` for bind parameters. Trims whitespace, including line breaks. The
64 | * arguments are used like in [[java.lang.String.format]].
65 | */
66 | @varargs
67 | def format(sql: String, args: AnyRef*): String =
68 | fillInParameterNumbers(trimLineBreaks(sql.format(args)))
69 |
70 | private def fillInParameterNumbers(sql: String): String = {
71 | if (sql.indexOf('?') == -1) {
72 | sql
73 | } else {
74 | val sb = new java.lang.StringBuilder(sql.length + 10)
75 | var n = 0
76 | var i = 0
77 | while (i < sql.length) {
78 | val c = sql.charAt(i)
79 | if (c == '?') {
80 | n += 1
81 | sb.append('$').append(n)
82 | } else {
83 | sb.append(c)
84 | }
85 | i += 1
86 | }
87 | sb.toString
88 | }
89 | }
90 |
91 | private def trimLineBreaks(sql: String): String = {
92 | if (sql.indexOf('\n') == -1) {
93 | sql.trim
94 | } else {
95 | sql.trim.split('\n').map(_.trim).mkString(" ")
96 | }
97 | }
98 |
99 | }
100 |
--------------------------------------------------------------------------------
/core/src/main/scala/org/apache/pekko/persistence/r2dbc/journal/mysql/MySQLJournalDao.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Licensed to the Apache Software Foundation (ASF) under one
3 | * or more contributor license agreements. See the NOTICE file
4 | * distributed with this work for additional information
5 | * regarding copyright ownership. The ASF licenses this file
6 | * to you under the Apache License, Version 2.0 (the
7 | * "License"); you may not use this file except in compliance
8 | * with the License. You may obtain a copy of the License at
9 | *
10 | * http://www.apache.org/licenses/LICENSE-2.0
11 | *
12 | * Unless required by applicable law or agreed to in writing,
13 | * software distributed under the License is distributed on an
14 | * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
15 | * KIND, either express or implied. See the License for the
16 | * specific language governing permissions and limitations
17 | * under the License.
18 | */
19 |
20 | package org.apache.pekko.persistence.r2dbc.journal.mysql
21 |
22 | import scala.concurrent.ExecutionContext
23 | import io.r2dbc.spi.ConnectionFactory
24 | import org.apache.pekko
25 | import pekko.actor.typed.ActorSystem
26 | import pekko.annotation.InternalApi
27 | import pekko.persistence.r2dbc.R2dbcSettings
28 | import pekko.persistence.r2dbc.internal.Sql.DialectInterpolation
29 | import pekko.persistence.r2dbc.journal.JournalDao
30 |
31 | /**
32 | * INTERNAL API
33 | */
34 | @InternalApi
35 | private[r2dbc] object MySQLJournalDao {
36 | def settingRequirements(journalSettings: R2dbcSettings): Unit = {
37 | // Application timestamps are used because MySQL does not have transaction_timestamp like Postgres. In future releases
38 | // they could be tried to be emulated, but the benefits are questionable - no matter where the timestamps are generated,
39 | // risk of clock skews remains.
40 | require(journalSettings.useAppTimestamp,
41 | "use-app-timestamp config must be on for MySQL support")
42 | // Supporting the non-monotonic increasing timestamps by incrementing the timestamp within the insert queries based on
43 | // latest row in the database seems to cause deadlocks when running tests like PersistTimestampSpec. Possibly this could
44 | // be fixed.
45 | require(journalSettings.dbTimestampMonotonicIncreasing,
46 | "db-timestamp-monotonic-increasing config must be on for MySQL support")
47 | // Also, missing RETURNING implementation makes grabbing the timestamp generated by the database less efficient - this
48 | // applies for both of the requirements above.
49 | }
50 | }
51 |
52 | /**
53 | * INTERNAL API
54 | */
55 | @InternalApi
56 | private[r2dbc] class MySQLJournalDao(
57 | journalSettings: R2dbcSettings,
58 | connectionFactory: ConnectionFactory)(
59 | implicit ec: ExecutionContext, system: ActorSystem[_]
60 | ) extends JournalDao(journalSettings, connectionFactory) {
61 | MySQLJournalDao.settingRequirements(journalSettings)
62 |
63 | override lazy val timestampSql: String = "NOW(6)"
64 |
65 | override val insertEventWithParameterTimestampSql: String =
66 | sql"INSERT INTO $journalTable " +
67 | "(slice, entity_type, persistence_id, seq_nr, writer, adapter_manifest, event_ser_id, event_ser_manifest, " +
68 | "event_payload, tags, meta_ser_id, meta_ser_manifest, meta_payload, db_timestamp) " +
69 | s"VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)"
70 | }
71 |
--------------------------------------------------------------------------------
/core/src/main/scala/org/apache/pekko/persistence/r2dbc/query/R2dbcReadJournalProvider.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Licensed to the Apache Software Foundation (ASF) under one or more
3 | * license agreements; and to You under the Apache License, version 2.0:
4 | *
5 | * https://www.apache.org/licenses/LICENSE-2.0
6 | *
7 | * This file is part of the Apache Pekko project, which was derived from Akka.
8 | */
9 |
10 | /*
11 | * Copyright (C) 2021 Lightbend Inc.
12 | */
13 |
14 | package org.apache.pekko.persistence.r2dbc.query
15 |
16 | import org.apache.pekko
17 | import pekko.actor.ExtendedActorSystem
18 | import pekko.persistence.query.ReadJournalProvider
19 | import com.typesafe.config.Config
20 |
21 | final class R2dbcReadJournalProvider(system: ExtendedActorSystem, config: Config, cfgPath: String)
22 | extends ReadJournalProvider {
23 | private val readJournalScala: scaladsl.R2dbcReadJournal =
24 | new scaladsl.R2dbcReadJournal(system, config, cfgPath)
25 |
26 | private val readJournalJava: javadsl.R2dbcReadJournal = new javadsl.R2dbcReadJournal(readJournalScala)
27 |
28 | override def scaladslReadJournal(): scaladsl.R2dbcReadJournal = readJournalScala
29 |
30 | override def javadslReadJournal(): javadsl.R2dbcReadJournal = readJournalJava
31 | }
32 |
--------------------------------------------------------------------------------
/core/src/main/scala/org/apache/pekko/persistence/r2dbc/query/javadsl/R2dbcReadJournal.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Licensed to the Apache Software Foundation (ASF) under one or more
3 | * license agreements; and to You under the Apache License, version 2.0:
4 | *
5 | * https://www.apache.org/licenses/LICENSE-2.0
6 | *
7 | * This file is part of the Apache Pekko project, which was derived from Akka.
8 | */
9 |
10 | /*
11 | * Copyright (C) 2021 Lightbend Inc.
12 | */
13 |
14 | package org.apache.pekko.persistence.r2dbc.query.javadsl
15 |
16 | import java.time.Instant
17 | import java.util
18 | import java.util.Optional
19 | import java.util.concurrent.CompletionStage
20 |
21 | import org.apache.pekko
22 | import pekko.NotUsed
23 | import pekko.dispatch.ExecutionContexts
24 | import pekko.japi.Pair
25 | import pekko.persistence.query.{ EventEnvelope => ClassicEventEnvelope }
26 | import pekko.persistence.query.Offset
27 | import pekko.persistence.query.javadsl._
28 | import pekko.persistence.query.typed.EventEnvelope
29 | import pekko.persistence.query.typed.javadsl.CurrentEventsBySliceQuery
30 | import pekko.persistence.query.typed.javadsl.EventTimestampQuery
31 | import pekko.persistence.query.typed.javadsl.EventsBySliceQuery
32 | import pekko.persistence.query.typed.javadsl.LoadEventQuery
33 | import pekko.persistence.r2dbc.query.scaladsl
34 | import pekko.stream.javadsl.Source
35 | import pekko.util.OptionConverters._
36 | import pekko.util.FutureConverters._
37 |
38 | object R2dbcReadJournal {
39 | val Identifier: String = scaladsl.R2dbcReadJournal.Identifier
40 | }
41 |
42 | final class R2dbcReadJournal(delegate: scaladsl.R2dbcReadJournal)
43 | extends ReadJournal
44 | with CurrentEventsBySliceQuery
45 | with EventsBySliceQuery
46 | with EventTimestampQuery
47 | with LoadEventQuery
48 | with CurrentEventsByPersistenceIdQuery
49 | with EventsByPersistenceIdQuery
50 | with CurrentPersistenceIdsQuery
51 | with PagedPersistenceIdsQuery {
52 |
53 | override def sliceForPersistenceId(persistenceId: String): Int =
54 | delegate.sliceForPersistenceId(persistenceId)
55 |
56 | override def currentEventsBySlices[Event](
57 | entityType: String,
58 | minSlice: Int,
59 | maxSlice: Int,
60 | offset: Offset): Source[EventEnvelope[Event], NotUsed] =
61 | delegate.currentEventsBySlices(entityType, minSlice, maxSlice, offset).asJava
62 |
63 | override def eventsBySlices[Event](
64 | entityType: String,
65 | minSlice: Int,
66 | maxSlice: Int,
67 | offset: Offset): Source[EventEnvelope[Event], NotUsed] =
68 | delegate.eventsBySlices(entityType, minSlice, maxSlice, offset).asJava
69 |
70 | override def sliceRanges(numberOfRanges: Int): util.List[Pair[Integer, Integer]] = {
71 | import pekko.util.ccompat.JavaConverters._
72 | delegate
73 | .sliceRanges(numberOfRanges)
74 | .map(range => Pair(Integer.valueOf(range.min), Integer.valueOf(range.max)))
75 | .asJava
76 | }
77 |
78 | override def currentEventsByPersistenceId(
79 | persistenceId: String,
80 | fromSequenceNr: Long,
81 | toSequenceNr: Long): Source[ClassicEventEnvelope, NotUsed] =
82 | delegate.currentEventsByPersistenceId(persistenceId, fromSequenceNr, toSequenceNr).asJava
83 |
84 | override def eventsByPersistenceId(
85 | persistenceId: String,
86 | fromSequenceNr: Long,
87 | toSequenceNr: Long): Source[ClassicEventEnvelope, NotUsed] =
88 | delegate.eventsByPersistenceId(persistenceId, fromSequenceNr, toSequenceNr).asJava
89 |
90 | override def currentPersistenceIds(): Source[String, NotUsed] =
91 | delegate.currentPersistenceIds().asJava
92 |
93 | override def currentPersistenceIds(afterId: Optional[String], limit: Long): Source[String, NotUsed] =
94 | delegate.currentPersistenceIds(afterId.toScala, limit).asJava
95 |
96 | override def timestampOf(persistenceId: String, sequenceNr: Long): CompletionStage[Optional[Instant]] =
97 | delegate.timestampOf(persistenceId, sequenceNr).map(_.toJava)(ExecutionContexts.parasitic).asJava
98 |
99 | override def loadEnvelope[Event](persistenceId: String, sequenceNr: Long): CompletionStage[EventEnvelope[Event]] =
100 | delegate.loadEnvelope[Event](persistenceId, sequenceNr).asJava
101 |
102 | }
103 |
--------------------------------------------------------------------------------
/core/src/main/scala/org/apache/pekko/persistence/r2dbc/query/scaladsl/mysql/MySQLQueryDao.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Licensed to the Apache Software Foundation (ASF) under one
3 | * or more contributor license agreements. See the NOTICE file
4 | * distributed with this work for additional information
5 | * regarding copyright ownership. The ASF licenses this file
6 | * to you under the Apache License, Version 2.0 (the
7 | * "License"); you may not use this file except in compliance
8 | * with the License. You may obtain a copy of the License at
9 | *
10 | * http://www.apache.org/licenses/LICENSE-2.0
11 | *
12 | * Unless required by applicable law or agreed to in writing,
13 | * software distributed under the License is distributed on an
14 | * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
15 | * KIND, either express or implied. See the License for the
16 | * specific language governing permissions and limitations
17 | * under the License.
18 | */
19 |
20 | package org.apache.pekko.persistence.r2dbc.query.scaladsl.mysql
21 |
22 | import java.time.Instant
23 |
24 | import scala.concurrent.ExecutionContext
25 | import scala.concurrent.Future
26 | import scala.concurrent.duration.Duration
27 | import scala.concurrent.duration.FiniteDuration
28 | import io.r2dbc.spi.ConnectionFactory
29 | import org.apache.pekko
30 | import pekko.actor.typed.ActorSystem
31 | import pekko.annotation.InternalApi
32 | import pekko.persistence.r2dbc.R2dbcSettings
33 | import pekko.persistence.r2dbc.internal.Sql.DialectInterpolation
34 | import pekko.persistence.r2dbc.query.scaladsl.QueryDao
35 |
36 | /**
37 | * INTERNAL API
38 | */
39 | @InternalApi
40 | private[r2dbc] class MySQLQueryDao(
41 | journalSettings: R2dbcSettings,
42 | connectionFactory: ConnectionFactory
43 | )(implicit ec: ExecutionContext, system: ActorSystem[_]) extends QueryDao(journalSettings, connectionFactory) {
44 |
45 | override lazy val statementTimestampSql: String = "NOW(6)"
46 |
47 | override def eventsBySlicesRangeSql(
48 | toDbTimestampParam: Boolean,
49 | behindCurrentTime: FiniteDuration,
50 | backtracking: Boolean,
51 | minSlice: Int,
52 | maxSlice: Int): String = {
53 |
54 | def toDbTimestampParamCondition =
55 | if (toDbTimestampParam) "AND db_timestamp <= ?" else ""
56 |
57 | def behindCurrentTimeIntervalCondition =
58 | if (behindCurrentTime > Duration.Zero)
59 | s"AND db_timestamp < DATE_SUB($statementTimestampSql, INTERVAL '${behindCurrentTime.toMicros}' MICROSECOND)"
60 | else ""
61 |
62 | val selectColumns = {
63 | if (backtracking)
64 | s"SELECT slice, persistence_id, seq_nr, db_timestamp, $statementTimestampSql AS read_db_timestamp "
65 | else
66 | s"SELECT slice, persistence_id, seq_nr, db_timestamp, $statementTimestampSql AS read_db_timestamp, event_ser_id, event_ser_manifest, event_payload, meta_ser_id, meta_ser_manifest, meta_payload "
67 | }
68 |
69 | sql"""
70 | $selectColumns
71 | FROM $journalTable
72 | WHERE entity_type = ?
73 | AND slice BETWEEN $minSlice AND $maxSlice
74 | AND db_timestamp >= ? $toDbTimestampParamCondition $behindCurrentTimeIntervalCondition
75 | AND deleted = false
76 | ORDER BY db_timestamp, seq_nr
77 | LIMIT ?"""
78 | }
79 |
80 | override def selectBucketsSql(minSlice: Int, maxSlice: Int): String = {
81 | sql"""
82 | SELECT CAST(UNIX_TIMESTAMP(db_timestamp) AS SIGNED) / 10 AS bucket, count(*) AS count
83 | FROM $journalTable
84 | WHERE entity_type = ?
85 | AND slice BETWEEN $minSlice AND $maxSlice
86 | AND db_timestamp >= ? AND db_timestamp <= ?
87 | AND deleted = false
88 | GROUP BY bucket ORDER BY bucket LIMIT ?
89 | """
90 | }
91 |
92 | override def currentDbTimestamp(): Future[Instant] = Future.successful(Instant.now())
93 | }
94 |
--------------------------------------------------------------------------------
/core/src/main/scala/org/apache/pekko/persistence/r2dbc/snapshot/mysql/MySQLSnapshotDao.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Licensed to the Apache Software Foundation (ASF) under one
3 | * or more contributor license agreements. See the NOTICE file
4 | * distributed with this work for additional information
5 | * regarding copyright ownership. The ASF licenses this file
6 | * to you under the Apache License, Version 2.0 (the
7 | * "License"); you may not use this file except in compliance
8 | * with the License. You may obtain a copy of the License at
9 | *
10 | * http://www.apache.org/licenses/LICENSE-2.0
11 | *
12 | * Unless required by applicable law or agreed to in writing,
13 | * software distributed under the License is distributed on an
14 | * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
15 | * KIND, either express or implied. See the License for the
16 | * specific language governing permissions and limitations
17 | * under the License.
18 | */
19 |
20 | package org.apache.pekko.persistence.r2dbc.snapshot.mysql
21 |
22 | import scala.concurrent.ExecutionContext
23 | import io.r2dbc.spi.ConnectionFactory
24 | import org.apache.pekko
25 | import pekko.actor.typed.ActorSystem
26 | import pekko.annotation.InternalApi
27 | import pekko.persistence.r2dbc.R2dbcSettings
28 | import pekko.persistence.r2dbc.internal.Sql.DialectInterpolation
29 | import pekko.persistence.r2dbc.snapshot.SnapshotDao
30 |
31 | /**
32 | * INTERNAL API
33 | */
34 | @InternalApi
35 | private[r2dbc] class MySQLSnapshotDao(
36 | settings: R2dbcSettings, connectionFactory: ConnectionFactory
37 | )(implicit ec: ExecutionContext, system: ActorSystem[_]) extends SnapshotDao(settings, connectionFactory) {
38 |
39 | override val upsertSql = sql"""
40 | INSERT INTO $snapshotTable
41 | (slice, entity_type, persistence_id, seq_nr, write_timestamp, snapshot, ser_id, ser_manifest, meta_payload, meta_ser_id, meta_ser_manifest)
42 | VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?) AS excluded
43 | ON DUPLICATE KEY UPDATE
44 | seq_nr = excluded.seq_nr,
45 | write_timestamp = excluded.write_timestamp,
46 | snapshot = excluded.snapshot,
47 | ser_id = excluded.ser_id,
48 | ser_manifest = excluded.ser_manifest,
49 | meta_payload = excluded.meta_payload,
50 | meta_ser_id = excluded.meta_ser_id,
51 | meta_ser_manifest = excluded.meta_ser_manifest"""
52 | }
53 |
--------------------------------------------------------------------------------
/core/src/main/scala/org/apache/pekko/persistence/r2dbc/state/R2dbcDurableStateStoreProvider.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Licensed to the Apache Software Foundation (ASF) under one or more
3 | * license agreements; and to You under the Apache License, version 2.0:
4 | *
5 | * https://www.apache.org/licenses/LICENSE-2.0
6 | *
7 | * This file is part of the Apache Pekko project, which was derived from Akka.
8 | */
9 |
10 | /*
11 | * Copyright (C) 2021 Lightbend Inc.
12 | */
13 |
14 | package org.apache.pekko.persistence.r2dbc.state
15 |
16 | import org.apache.pekko
17 | import pekko.actor.ExtendedActorSystem
18 | import pekko.persistence.state.DurableStateStoreProvider
19 | import com.typesafe.config.Config
20 | import pekko.persistence.state.javadsl.{ DurableStateStore => JDurableStateStore }
21 | import pekko.persistence.state.scaladsl.DurableStateStore
22 |
23 | class R2dbcDurableStateStoreProvider[A](system: ExtendedActorSystem, config: Config, cfgPath: String)
24 | extends DurableStateStoreProvider {
25 |
26 | override def scaladslDurableStateStore(): DurableStateStore[Any] =
27 | new scaladsl.R2dbcDurableStateStore(system, config, cfgPath)
28 |
29 | override def javadslDurableStateStore(): JDurableStateStore[AnyRef] =
30 | new javadsl.R2dbcDurableStateStore[AnyRef](new scaladsl.R2dbcDurableStateStore(system, config, cfgPath))(
31 | system.dispatcher)
32 | }
33 |
--------------------------------------------------------------------------------
/core/src/main/scala/org/apache/pekko/persistence/r2dbc/state/javadsl/R2dbcDurableStateStore.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Licensed to the Apache Software Foundation (ASF) under one or more
3 | * license agreements; and to You under the Apache License, version 2.0:
4 | *
5 | * https://www.apache.org/licenses/LICENSE-2.0
6 | *
7 | * This file is part of the Apache Pekko project, which was derived from Akka.
8 | */
9 |
10 | /*
11 | * Copyright (C) 2021 Lightbend Inc.
12 | */
13 |
14 | package org.apache.pekko.persistence.r2dbc.state.javadsl
15 |
16 | import java.util
17 | import java.util.Optional
18 | import java.util.concurrent.CompletionStage
19 |
20 | import scala.concurrent.ExecutionContext
21 |
22 | import org.apache.pekko
23 | import pekko.Done
24 | import pekko.NotUsed
25 | import pekko.japi.Pair
26 | import pekko.persistence.query.DurableStateChange
27 | import pekko.persistence.query.Offset
28 | import pekko.persistence.query.javadsl.DurableStateStorePagedPersistenceIdsQuery
29 | import pekko.persistence.query.typed.javadsl.DurableStateStoreBySliceQuery
30 | import pekko.persistence.r2dbc.state.scaladsl.{ R2dbcDurableStateStore => ScalaR2dbcDurableStateStore }
31 | import pekko.persistence.state.javadsl.DurableStateUpdateStore
32 | import pekko.persistence.state.javadsl.GetObjectResult
33 | import pekko.stream.javadsl.Source
34 | import pekko.util.FutureConverters._
35 |
36 | object R2dbcDurableStateStore {
37 | val Identifier: String = ScalaR2dbcDurableStateStore.Identifier
38 | }
39 |
40 | class R2dbcDurableStateStore[A](scalaStore: ScalaR2dbcDurableStateStore[A])(implicit ec: ExecutionContext)
41 | extends DurableStateUpdateStore[A]
42 | with DurableStateStoreBySliceQuery[A]
43 | with DurableStateStorePagedPersistenceIdsQuery[A] {
44 |
45 | override def getObject(persistenceId: String): CompletionStage[GetObjectResult[A]] =
46 | scalaStore
47 | .getObject(persistenceId)
48 | .map(x => GetObjectResult(Optional.ofNullable(x.value.getOrElse(null.asInstanceOf[A])), x.revision))
49 | .asJava
50 |
51 | override def upsertObject(persistenceId: String, revision: Long, value: A, tag: String): CompletionStage[Done] =
52 | scalaStore.upsertObject(persistenceId, revision, value, tag).asJava
53 |
54 | override def deleteObject(persistenceId: String): CompletionStage[Done] =
55 | scalaStore.deleteObject(persistenceId).asJava
56 |
57 | override def deleteObject(persistenceId: String, revision: Long): CompletionStage[Done] =
58 | scalaStore.deleteObject(persistenceId, revision).asJava
59 |
60 | override def currentChangesBySlices(
61 | entityType: String,
62 | minSlice: Int,
63 | maxSlice: Int,
64 | offset: Offset): Source[DurableStateChange[A], NotUsed] =
65 | scalaStore.currentChangesBySlices(entityType, minSlice, maxSlice, offset).asJava
66 |
67 | override def changesBySlices(
68 | entityType: String,
69 | minSlice: Int,
70 | maxSlice: Int,
71 | offset: Offset): Source[DurableStateChange[A], NotUsed] =
72 | scalaStore.changesBySlices(entityType, minSlice, maxSlice, offset).asJava
73 |
74 | override def sliceForPersistenceId(persistenceId: String): Int =
75 | scalaStore.sliceForPersistenceId(persistenceId)
76 |
77 | override def sliceRanges(numberOfRanges: Int): util.List[Pair[Integer, Integer]] = {
78 | import pekko.util.ccompat.JavaConverters._
79 | scalaStore
80 | .sliceRanges(numberOfRanges)
81 | .map(range => Pair(Integer.valueOf(range.min), Integer.valueOf(range.max)))
82 | .asJava
83 | }
84 |
85 | override def currentPersistenceIds(afterId: Optional[String], limit: Long): Source[String, NotUsed] = {
86 | import pekko.util.OptionConverters._
87 | scalaStore.currentPersistenceIds(afterId.toScala, limit).asJava
88 | }
89 |
90 | def currentPersistenceIds(): Source[String, NotUsed] =
91 | scalaStore.currentPersistenceIds().asJava
92 | }
93 |
--------------------------------------------------------------------------------
/core/src/main/scala/org/apache/pekko/persistence/r2dbc/state/scaladsl/DurableStateExceptionSupport.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Licensed to the Apache Software Foundation (ASF) under one or more
3 | * contributor license agreements. See the NOTICE file distributed with
4 | * this work for additional information regarding copyright ownership.
5 | * The ASF licenses this file to You under the Apache License, Version 2.0
6 | * (the "License"); you may not use this file except in compliance with
7 | * the License. You may obtain a copy of the License at
8 | *
9 | * http://www.apache.org/licenses/LICENSE-2.0
10 | *
11 | * Unless required by applicable law or agreed to in writing, software
12 | * distributed under the License is distributed on an "AS IS" BASIS,
13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 | * See the License for the specific language governing permissions and
15 | * limitations under the License.
16 | */
17 |
18 | package org.apache.pekko.persistence.r2dbc.state.scaladsl
19 |
20 | import java.lang.invoke.{ MethodHandles, MethodType }
21 |
22 | import scala.util.Try
23 |
24 | /**
25 | * INTERNAL API
26 | *
27 | * Support for creating a `DeleteRevisionException`if the class is
28 | * available on the classpath. Pekko 1.0 does not have this class, but
29 | * it is added in Pekko 1.1.
30 | */
31 | private[state] object DurableStateExceptionSupport {
32 | val DeleteRevisionExceptionClass =
33 | "org.apache.pekko.persistence.state.exception.DeleteRevisionException"
34 |
35 | private def exceptionClassOpt: Option[Class[_]] =
36 | Try(Class.forName(DeleteRevisionExceptionClass)).toOption
37 |
38 | private val constructorOpt = exceptionClassOpt.map { clz =>
39 | val mt = MethodType.methodType(classOf[Unit], classOf[String])
40 | MethodHandles.publicLookup().findConstructor(clz, mt)
41 | }
42 |
43 | def createDeleteRevisionExceptionIfSupported(message: String): Option[Exception] =
44 | constructorOpt.map { constructor =>
45 | constructor.invoke(message).asInstanceOf[Exception]
46 | }
47 |
48 | }
49 |
--------------------------------------------------------------------------------
/core/src/main/scala/org/apache/pekko/persistence/r2dbc/state/scaladsl/mysql/MySQLDurableStateDao.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Licensed to the Apache Software Foundation (ASF) under one
3 | * or more contributor license agreements. See the NOTICE file
4 | * distributed with this work for additional information
5 | * regarding copyright ownership. The ASF licenses this file
6 | * to you under the Apache License, Version 2.0 (the
7 | * "License"); you may not use this file except in compliance
8 | * with the License. You may obtain a copy of the License at
9 | *
10 | * http://www.apache.org/licenses/LICENSE-2.0
11 | *
12 | * Unless required by applicable law or agreed to in writing,
13 | * software distributed under the License is distributed on an
14 | * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
15 | * KIND, either express or implied. See the License for the
16 | * specific language governing permissions and limitations
17 | * under the License.
18 | */
19 |
20 | package org.apache.pekko.persistence.r2dbc.state.scaladsl.mysql
21 |
22 | import java.time.Instant
23 |
24 | import scala.concurrent.ExecutionContext
25 | import scala.concurrent.Future
26 | import scala.concurrent.duration.Duration
27 | import scala.concurrent.duration.FiniteDuration
28 | import io.r2dbc.spi.ConnectionFactory
29 | import org.apache.pekko
30 | import pekko.actor.typed.ActorSystem
31 | import pekko.annotation.InternalApi
32 | import pekko.persistence.r2dbc.R2dbcSettings
33 | import pekko.persistence.r2dbc.internal.Sql.DialectInterpolation
34 | import pekko.persistence.r2dbc.journal.mysql.MySQLJournalDao
35 | import pekko.persistence.r2dbc.state.scaladsl.DurableStateDao
36 |
37 | /**
38 | * INTERNAL API
39 | */
40 | @InternalApi
41 | private[r2dbc] class MySQLDurableStateDao(
42 | settings: R2dbcSettings,
43 | connectionFactory: ConnectionFactory
44 | )(implicit ec: ExecutionContext, system: ActorSystem[_]) extends DurableStateDao(settings, connectionFactory) {
45 | MySQLJournalDao.settingRequirements(settings)
46 |
47 | override lazy val transactionTimestampSql: String = "NOW(6)"
48 |
49 | override def selectBucketsSql(minSlice: Int, maxSlice: Int): String = {
50 | sql"""
51 | SELECT CAST(UNIX_TIMESTAMP(db_timestamp) AS SIGNED) / 10 AS bucket, count(*) AS count
52 | FROM $stateTable
53 | WHERE entity_type = ?
54 | AND slice BETWEEN $minSlice AND $maxSlice
55 | AND db_timestamp >= ? AND db_timestamp <= ?
56 | GROUP BY bucket ORDER BY bucket LIMIT ?
57 | """
58 | }
59 |
60 | override def stateBySlicesRangeSql(
61 | maxDbTimestampParam: Boolean,
62 | behindCurrentTime: FiniteDuration,
63 | backtracking: Boolean,
64 | minSlice: Int,
65 | maxSlice: Int): String = {
66 |
67 | def maxDbTimestampParamCondition =
68 | if (maxDbTimestampParam) s"AND db_timestamp < ?" else ""
69 |
70 | def behindCurrentTimeIntervalCondition =
71 | if (behindCurrentTime > Duration.Zero)
72 | s"AND db_timestamp < DATE_SUB(NOW(6), INTERVAL '${behindCurrentTime.toMicros}' MICROSECOND)"
73 | else ""
74 |
75 | val selectColumns =
76 | if (backtracking)
77 | "SELECT persistence_id, revision, db_timestamp, NOW(6) AS read_db_timestamp "
78 | else
79 | "SELECT persistence_id, revision, db_timestamp, NOW(6) AS read_db_timestamp, state_ser_id, state_ser_manifest, state_payload "
80 |
81 | sql"""
82 | $selectColumns
83 | FROM $stateTable
84 | WHERE entity_type = ?
85 | AND slice BETWEEN $minSlice AND $maxSlice
86 | AND db_timestamp >= ? $maxDbTimestampParamCondition $behindCurrentTimeIntervalCondition
87 | ORDER BY db_timestamp, revision
88 | LIMIT ?"""
89 | }
90 |
91 | override def currentDbTimestamp(): Future[Instant] = Future.successful(Instant.now())
92 | }
93 |
--------------------------------------------------------------------------------
/core/src/test/resources/logback-test.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 | [%date{ISO8601}] [%level] [%logger] [%X{pekkoAddress}] [%marker] [%thread] - %msg%n
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
--------------------------------------------------------------------------------
/core/src/test/scala/org/apache/pekko/persistence/r2dbc/CborSerializable.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Licensed to the Apache Software Foundation (ASF) under one or more
3 | * license agreements; and to You under the Apache License, version 2.0:
4 | *
5 | * https://www.apache.org/licenses/LICENSE-2.0
6 | *
7 | * This file is part of the Apache Pekko project, which was derived from Akka.
8 | */
9 |
10 | /*
11 | * Copyright (C) 2021 Lightbend Inc.
12 | */
13 |
14 | package org.apache.pekko.persistence.r2dbc
15 |
16 | trait CborSerializable
17 |
--------------------------------------------------------------------------------
/core/src/test/scala/org/apache/pekko/persistence/r2dbc/ConnectionFactoryOptionsCustomizerSpec.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Licensed to the Apache Software Foundation (ASF) under one or more
3 | * contributor license agreements. See the NOTICE file distributed with
4 | * this work for additional information regarding copyright ownership.
5 | * The ASF licenses this file to You under the Apache License, Version 2.0
6 | * (the "License"); you may not use this file except in compliance with
7 | * the License. You may obtain a copy of the License at
8 | *
9 | * http://www.apache.org/licenses/LICENSE-2.0
10 | *
11 | * Unless required by applicable law or agreed to in writing, software
12 | * distributed under the License is distributed on an "AS IS" BASIS,
13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 | * See the License for the specific language governing permissions and
15 | * limitations under the License.
16 | */
17 |
18 | package org.apache.pekko.persistence.r2dbc
19 |
20 | import com.typesafe.config.{ Config, ConfigFactory }
21 | import io.r2dbc.spi.ConnectionFactoryOptions
22 | import org.apache.pekko.actor.testkit.typed.scaladsl.{ ScalaTestWithActorTestKit, TestProbe }
23 | import org.apache.pekko.actor.typed.ActorSystem
24 | import org.apache.pekko.actor.typed.eventstream.EventStream
25 | import org.apache.pekko.persistence.r2dbc.ConnectionFactoryOptionsCustomizerSpec.{ config, CustomizerCalled }
26 | import org.apache.pekko.persistence.r2dbc.ConnectionFactoryProvider.ConnectionFactoryOptionsCustomizer
27 | import org.scalatest.wordspec.AnyWordSpecLike
28 |
29 | class ConnectionFactoryOptionsCustomizerSpec extends ScalaTestWithActorTestKit(config) with AnyWordSpecLike {
30 | "ConnectionFactoryProvider" should {
31 | "instantiate and apply a custom ConnectionFactoryOptionsCustomizer when connection-factory-options-customizer settings is set" in {
32 | val probe = TestProbe[CustomizerCalled.type]()
33 | system.eventStream.tell(EventStream.Subscribe(probe.ref))
34 |
35 | ConnectionFactoryProvider(system).connectionFactoryFor("pekko.persistence.r2dbc.connection-factory")
36 | probe.expectMessage(CustomizerCalled)
37 | }
38 | }
39 | }
40 |
41 | object ConnectionFactoryOptionsCustomizerSpec {
42 | object CustomizerCalled
43 |
44 | class Customizer(system: ActorSystem[_]) extends ConnectionFactoryOptionsCustomizer {
45 | override def apply(builder: ConnectionFactoryOptions.Builder, config: Config): ConnectionFactoryOptions.Builder = {
46 | system.eventStream.tell(EventStream.Publish(CustomizerCalled))
47 | builder
48 | }
49 | }
50 |
51 | val config: Config = ConfigFactory.parseString("""
52 | pekko.persistence.r2dbc.connection-factory {
53 | connection-factory-options-customizer = "org.apache.pekko.persistence.r2dbc.ConnectionFactoryOptionsCustomizerSpec$Customizer"
54 | }
55 | """).withFallback(TestConfig.config)
56 | }
57 |
--------------------------------------------------------------------------------
/core/src/test/scala/org/apache/pekko/persistence/r2dbc/R2dbcSettingsSpec.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Licensed to the Apache Software Foundation (ASF) under one or more
3 | * license agreements; and to You under the Apache License, version 2.0:
4 | *
5 | * https://www.apache.org/licenses/LICENSE-2.0
6 | *
7 | * This file is part of the Apache Pekko project, which was derived from Akka.
8 | */
9 |
10 | /*
11 | * Copyright (C) 2021 Lightbend Inc.
12 | */
13 |
14 | package org.apache.pekko.persistence.r2dbc
15 |
16 | import com.typesafe.config.ConfigFactory
17 | import io.r2dbc.postgresql.client.SSLMode
18 | import org.scalatest.TestSuite
19 | import org.scalatest.matchers.should.Matchers
20 | import org.scalatest.wordspec.AnyWordSpec
21 |
22 | class R2dbcSettingsSpec extends AnyWordSpec with TestSuite with Matchers {
23 |
24 | "Settings" should {
25 | "have table names with schema" in {
26 | val config = ConfigFactory.parseString("pekko.persistence.r2dbc.schema=s1").withFallback(ConfigFactory.load())
27 | val settings = R2dbcSettings(config.getConfig("pekko.persistence.r2dbc"))
28 | settings.journalTableWithSchema shouldBe "s1.event_journal"
29 | settings.snapshotsTableWithSchema shouldBe "s1.snapshot"
30 | settings.durableStateTableWithSchema shouldBe "s1.durable_state"
31 |
32 | // by default connection is configured with options
33 | settings.connectionFactorySettings shouldBe a[ConnectionFactorySettings]
34 | settings.connectionFactorySettings.urlOption should not be defined
35 | }
36 |
37 | "support connection settings build from url" in {
38 | val config =
39 | ConfigFactory
40 | .parseString("pekko.persistence.r2dbc.connection-factory.url=whatever-url")
41 | .withFallback(ConfigFactory.load())
42 |
43 | val settings = R2dbcSettings(config.getConfig("pekko.persistence.r2dbc"))
44 | settings.connectionFactorySettings shouldBe a[ConnectionFactorySettings]
45 | settings.connectionFactorySettings.urlOption shouldBe defined
46 | }
47 |
48 | "support ssl-mode as enum name" in {
49 | val config = ConfigFactory
50 | .parseString("pekko.persistence.r2dbc.connection-factory.ssl.mode=VERIFY_FULL")
51 | .withFallback(ConfigFactory.load())
52 | val settings = R2dbcSettings(config.getConfig("pekko.persistence.r2dbc"))
53 | settings.connectionFactorySettings.sslMode shouldBe "VERIFY_FULL"
54 | SSLMode.fromValue(settings.connectionFactorySettings.sslMode) shouldBe SSLMode.VERIFY_FULL
55 | }
56 |
57 | "support ssl-mode values in lower and dashes" in {
58 | val config = ConfigFactory
59 | .parseString("pekko.persistence.r2dbc.connection-factory.ssl.mode=verify-full")
60 | .withFallback(ConfigFactory.load())
61 | val settings = R2dbcSettings(config.getConfig("pekko.persistence.r2dbc"))
62 | settings.connectionFactorySettings.sslMode shouldBe "verify-full"
63 | SSLMode.fromValue(settings.connectionFactorySettings.sslMode) shouldBe SSLMode.VERIFY_FULL
64 | }
65 |
66 | "allow to specify ConnectionFactoryOptions customizer" in {
67 | val config = ConfigFactory
68 | .parseString("pekko.persistence.r2dbc.connection-factory.connection-factory-options-customizer=fqcn")
69 | .withFallback(ConfigFactory.load())
70 | val settings = R2dbcSettings(config.getConfig("pekko.persistence.r2dbc"))
71 | settings.connectionFactorySettings.connectionFactoryOptionsCustomizer shouldBe Some("fqcn")
72 | }
73 | }
74 | }
75 |
--------------------------------------------------------------------------------
/core/src/test/scala/org/apache/pekko/persistence/r2dbc/TestConfig.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Licensed to the Apache Software Foundation (ASF) under one or more
3 | * license agreements; and to You under the Apache License, version 2.0:
4 | *
5 | * https://www.apache.org/licenses/LICENSE-2.0
6 | *
7 | * This file is part of the Apache Pekko project, which was derived from Akka.
8 | */
9 |
10 | /*
11 | * Copyright (C) 2021 Lightbend Inc.
12 | */
13 |
14 | package org.apache.pekko.persistence.r2dbc
15 |
16 | import com.typesafe.config.Config
17 | import com.typesafe.config.ConfigFactory
18 |
19 | object TestConfig {
20 | lazy val config: Config = {
21 | val defaultConfig = ConfigFactory.load()
22 | val dialect = defaultConfig.getString("pekko.persistence.r2dbc.dialect")
23 |
24 | val dialectConfig = dialect match {
25 | case "postgres" =>
26 | ConfigFactory.parseString("""
27 | pekko.persistence.r2dbc.connection-factory {
28 | driver = "postgres"
29 | host = "localhost"
30 | port = 5432
31 | user = "postgres"
32 | password = "postgres"
33 | database = "postgres"
34 | }
35 | """)
36 | case "yugabyte" =>
37 | ConfigFactory.parseString("""
38 | pekko.persistence.r2dbc.connection-factory {
39 | driver = "postgres"
40 | host = "localhost"
41 | port = 5433
42 | user = "yugabyte"
43 | password = "yugabyte"
44 | database = "yugabyte"
45 | }
46 | """)
47 | case "mysql" =>
48 | ConfigFactory.parseString("""
49 | pekko.persistence.r2dbc{
50 | connection-factory {
51 | driver = "mysql"
52 | host = "localhost"
53 | port = 3306
54 | user = "root"
55 | password = "root"
56 | database = "mysql"
57 | }
58 | db-timestamp-monotonic-increasing = on
59 | use-app-timestamp = on
60 | }
61 | """)
62 | }
63 |
64 | // using load here so that connection-factory can be overridden
65 | ConfigFactory.load(dialectConfig.withFallback(ConfigFactory.parseString("""
66 | pekko.loglevel = DEBUG
67 | pekko.persistence.journal.plugin = "pekko.persistence.r2dbc.journal"
68 | pekko.persistence.snapshot-store.plugin = "pekko.persistence.r2dbc.snapshot"
69 | pekko.persistence.state.plugin = "pekko.persistence.r2dbc.state"
70 | pekko.persistence.r2dbc {
71 | query {
72 | refresh-interval = 1s
73 | }
74 | }
75 | pekko.actor {
76 | serialization-bindings {
77 | "org.apache.pekko.persistence.r2dbc.CborSerializable" = jackson-cbor
78 | }
79 | }
80 | pekko.actor.testkit.typed.default-timeout = 10s
81 | """)))
82 | }
83 |
84 | val backtrackingDisabledConfig: Config =
85 | ConfigFactory.parseString("pekko.persistence.r2dbc.query.backtracking.enabled = off")
86 | }
87 |
--------------------------------------------------------------------------------
/core/src/test/scala/org/apache/pekko/persistence/r2dbc/TestData.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Licensed to the Apache Software Foundation (ASF) under one or more
3 | * license agreements; and to You under the Apache License, version 2.0:
4 | *
5 | * https://www.apache.org/licenses/LICENSE-2.0
6 | *
7 | * This file is part of the Apache Pekko project, which was derived from Akka.
8 | */
9 |
10 | /*
11 | * Copyright (C) 2021 Lightbend Inc.
12 | */
13 |
14 | package org.apache.pekko.persistence.r2dbc
15 |
16 | import java.util.concurrent.atomic.AtomicLong
17 |
18 | object TestData {
19 | private val start = 0L // could be something more unique, like currentTimeMillis
20 | private val pidCounter = new AtomicLong(start)
21 | private val entityTypeCounter = new AtomicLong(start)
22 | }
23 |
24 | trait TestData {
25 | import TestData.pidCounter
26 | import TestData.entityTypeCounter
27 |
28 | def nextPid() = s"p-${pidCounter.incrementAndGet()}"
29 | // FIXME return PersistenceId instead
30 | def nextPid(entityType: String) = s"$entityType|p-${pidCounter.incrementAndGet()}"
31 |
32 | def nextEntityType() = s"TestEntity-${entityTypeCounter.incrementAndGet()}"
33 |
34 | }
35 |
--------------------------------------------------------------------------------
/core/src/test/scala/org/apache/pekko/persistence/r2dbc/TestDbLifecycle.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Licensed to the Apache Software Foundation (ASF) under one or more
3 | * license agreements; and to You under the Apache License, version 2.0:
4 | *
5 | * https://www.apache.org/licenses/LICENSE-2.0
6 | *
7 | * This file is part of the Apache Pekko project, which was derived from Akka.
8 | */
9 |
10 | /*
11 | * Copyright (C) 2021 Lightbend Inc.
12 | */
13 |
14 | package org.apache.pekko.persistence.r2dbc
15 |
16 | import scala.concurrent.Await
17 | import scala.concurrent.duration._
18 |
19 | import org.apache.pekko
20 | import pekko.actor.typed.ActorSystem
21 | import pekko.persistence.Persistence
22 | import pekko.persistence.r2dbc.internal.R2dbcExecutor
23 | import org.scalatest.BeforeAndAfterAll
24 | import org.scalatest.Suite
25 | import org.slf4j.LoggerFactory
26 |
27 | trait TestDbLifecycle extends BeforeAndAfterAll { this: Suite =>
28 |
29 | def typedSystem: ActorSystem[_]
30 |
31 | def testConfigPath: String = "pekko.persistence.r2dbc"
32 |
33 | lazy val r2dbcSettings: R2dbcSettings =
34 | new R2dbcSettings(typedSystem.settings.config.getConfig(testConfigPath))
35 |
36 | lazy val r2dbcExecutor: R2dbcExecutor = {
37 | new R2dbcExecutor(
38 | ConnectionFactoryProvider(typedSystem).connectionFactoryFor(testConfigPath + ".connection-factory"),
39 | LoggerFactory.getLogger(getClass),
40 | r2dbcSettings.logDbCallsExceeding)(typedSystem.executionContext, typedSystem)
41 | }
42 |
43 | lazy val persistenceExt: Persistence = Persistence(typedSystem)
44 |
45 | override protected def beforeAll(): Unit = {
46 | Await.result(
47 | r2dbcExecutor.updateOne("beforeAll delete")(
48 | _.createStatement(s"delete from ${r2dbcSettings.journalTableWithSchema}")),
49 | 10.seconds)
50 | Await.result(
51 | r2dbcExecutor.updateOne("beforeAll delete")(
52 | _.createStatement(s"delete from ${r2dbcSettings.snapshotsTableWithSchema}")),
53 | 10.seconds)
54 | Await.result(
55 | r2dbcExecutor.updateOne("beforeAll delete")(
56 | _.createStatement(s"delete from ${r2dbcSettings.durableStateTableWithSchema}")),
57 | 10.seconds)
58 | super.beforeAll()
59 | }
60 |
61 | }
62 |
--------------------------------------------------------------------------------
/core/src/test/scala/org/apache/pekko/persistence/r2dbc/internal/BySliceQueryBucketsSpec.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Licensed to the Apache Software Foundation (ASF) under one or more
3 | * license agreements; and to You under the Apache License, version 2.0:
4 | *
5 | * https://www.apache.org/licenses/LICENSE-2.0
6 | *
7 | * This file is part of the Apache Pekko project, which was derived from Akka.
8 | */
9 |
10 | /*
11 | * Copyright (C) 2021 Lightbend Inc.
12 | */
13 |
14 | package org.apache.pekko.persistence.r2dbc.internal
15 |
16 | import java.time.Instant
17 |
18 | import org.apache.pekko
19 | import pekko.persistence.r2dbc.internal.BySliceQuery.Buckets
20 | import pekko.persistence.r2dbc.internal.BySliceQuery.Buckets.Bucket
21 | import pekko.persistence.r2dbc.internal.BySliceQuery.Buckets.BucketDurationSeconds
22 | import org.scalatest.TestSuite
23 | import org.scalatest.matchers.should.Matchers
24 | import org.scalatest.wordspec.AnyWordSpec
25 |
26 | class BySliceQueryBucketsSpec extends AnyWordSpec with TestSuite with Matchers {
27 |
28 | private val startTime = Instant.now()
29 | private val firstBucketStartTime = startTime.plusSeconds(60)
30 | private val firstBucketStartEpochSeconds = firstBucketStartTime.toEpochMilli / 1000
31 |
32 | private def bucketStartEpochSeconds(bucketIndex: Int): Long =
33 | firstBucketStartEpochSeconds + BucketDurationSeconds * bucketIndex
34 |
35 | private def bucketEndEpochSeconds(bucketIndex: Int): Long =
36 | bucketStartEpochSeconds(bucketIndex) + BucketDurationSeconds
37 |
38 | private def bucketEndTime(bucketIndex: Int): Instant =
39 | Instant.ofEpochSecond(bucketEndEpochSeconds(bucketIndex))
40 |
41 | private val buckets = {
42 | Buckets.empty
43 | .add(
44 | List(
45 | Bucket(bucketStartEpochSeconds(0), 101),
46 | Bucket(bucketStartEpochSeconds(1), 202),
47 | Bucket(bucketStartEpochSeconds(2), 303),
48 | Bucket(bucketStartEpochSeconds(3), 304),
49 | Bucket(bucketStartEpochSeconds(4), 305),
50 | Bucket(bucketStartEpochSeconds(5), 306)))
51 | }
52 |
53 | "BySliceQuery.Buckets" should {
54 | "find time for events limit" in {
55 | buckets.findTimeForLimit(startTime, 100) shouldBe Some(bucketEndTime(0))
56 |
57 | // not including the bucket that includes the `from` time
58 | buckets.findTimeForLimit(firstBucketStartTime, 100) shouldBe Some(bucketEndTime(1))
59 | buckets.findTimeForLimit(firstBucketStartTime.plusSeconds(9), 100) shouldBe Some(bucketEndTime(1))
60 | buckets.findTimeForLimit(firstBucketStartTime.plusSeconds(10), 100) shouldBe Some(bucketEndTime(2))
61 | buckets.findTimeForLimit(firstBucketStartTime.plusSeconds(11), 100) shouldBe Some(bucketEndTime(2))
62 |
63 | // 202 + 303 >= 500
64 | buckets.findTimeForLimit(firstBucketStartTime.plusSeconds(3), 500) shouldBe Some(bucketEndTime(2))
65 | // 202 + 303 >= 505
66 | buckets.findTimeForLimit(firstBucketStartTime.plusSeconds(3), 505) shouldBe Some(bucketEndTime(2))
67 | // 202 + 303 + 304 >= 506
68 | buckets.findTimeForLimit(firstBucketStartTime.plusSeconds(3), 506) shouldBe Some(bucketEndTime(3))
69 |
70 | buckets.findTimeForLimit(firstBucketStartTime.plusSeconds(3), 1000) shouldBe Some(bucketEndTime(4))
71 | buckets.findTimeForLimit(firstBucketStartTime.plusSeconds(3), 1400) shouldBe Some(bucketEndTime(5))
72 | buckets.findTimeForLimit(firstBucketStartTime.plusSeconds(3), 1500) shouldBe None
73 | }
74 |
75 | "clear until time" in {
76 | buckets.clearUntil(startTime).size shouldBe buckets.size
77 | buckets.clearUntil(firstBucketStartTime).size shouldBe buckets.size
78 | buckets.clearUntil(firstBucketStartTime.plusSeconds(9)).size shouldBe buckets.size
79 |
80 | buckets.clearUntil(firstBucketStartTime.plusSeconds(10)).size shouldBe buckets.size - 1
81 | buckets.clearUntil(firstBucketStartTime.plusSeconds(11)).size shouldBe buckets.size - 1
82 | buckets.clearUntil(firstBucketStartTime.plusSeconds(19)).size shouldBe buckets.size - 1
83 |
84 | buckets.clearUntil(firstBucketStartTime.plusSeconds(31)).size shouldBe buckets.size - 3
85 | buckets.clearUntil(firstBucketStartTime.plusSeconds(100)).size shouldBe 1 // keep last
86 | }
87 |
88 | }
89 |
90 | }
91 |
--------------------------------------------------------------------------------
/core/src/test/scala/org/apache/pekko/persistence/r2dbc/internal/MonoToFutureSpec.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Licensed to the Apache Software Foundation (ASF) under one or more
3 | * license agreements; and to You under the Apache License, version 2.0:
4 | *
5 | * https://www.apache.org/licenses/LICENSE-2.0
6 | *
7 | * This file is part of the Apache Pekko project, which was derived from Akka.
8 | */
9 |
10 | package org.apache.pekko.persistence.r2dbc.internal
11 |
12 | import org.scalatest.TestSuite
13 | import org.scalatest.concurrent.ScalaFutures
14 | import org.scalatest.matchers.should.Matchers
15 | import org.scalatest.wordspec.AnyWordSpec
16 | import reactor.core.publisher.Mono
17 |
18 | import java.util.concurrent.atomic.AtomicBoolean
19 |
20 | class MonoToFutureSpec extends AnyWordSpec with ScalaFutures with TestSuite with Matchers {
21 | "MonoToFutureSpec" should {
22 | "convert a Mono to a Future in happy path" in {
23 | val r = Mono.just("pekko")
24 | .subscribeWith(new MonoToFuture[String]())
25 | .future
26 | r.futureValue shouldBe "pekko"
27 | }
28 |
29 | "convert a failed Mono to a failed Future" in {
30 | val r = Mono.error(new RuntimeException("pekko"))
31 | .subscribeWith(new MonoToFuture[String]())
32 | .future
33 | r.failed.futureValue.getMessage shouldBe "pekko"
34 | }
35 |
36 | "convert an empty Mono to a Future with null" in {
37 | val r = Mono.empty[String]
38 | .subscribeWith(new MonoToFuture[String]())
39 | .future
40 | r.futureValue shouldBe null
41 | }
42 |
43 | "will not cancel the origin Mono after Future completes" in {
44 | val canceledFlag = new AtomicBoolean(false)
45 | val r = Mono.just("pekko")
46 | .doOnCancel(() => canceledFlag.set(true))
47 | .subscribeWith(new MonoToFuture[String]())
48 | .future
49 | r.futureValue shouldBe "pekko"
50 | canceledFlag.get() shouldBe false
51 | }
52 |
53 | }
54 | }
55 |
--------------------------------------------------------------------------------
/core/src/test/scala/org/apache/pekko/persistence/r2dbc/internal/SqlSpec.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Licensed to the Apache Software Foundation (ASF) under one or more
3 | * license agreements; and to You under the Apache License, version 2.0:
4 | *
5 | * https://www.apache.org/licenses/LICENSE-2.0
6 | *
7 | * This file is part of the Apache Pekko project, which was derived from Akka.
8 | */
9 |
10 | /*
11 | * Copyright (C) 2021 Lightbend Inc.
12 | */
13 |
14 | package org.apache.pekko.persistence.r2dbc.internal
15 |
16 | import org.scalatest.TestSuite
17 | import org.scalatest.matchers.should.Matchers
18 | import org.scalatest.wordspec.AnyWordSpec
19 |
20 | class SqlSpec extends AnyWordSpec with TestSuite with Matchers {
21 | import Sql.Interpolation
22 |
23 | "SQL string interpolation" should {
24 | "replace ? bind parameters with numbered $" in {
25 | sql"select * from bar where a = ?" shouldBe "select * from bar where a = $1"
26 | sql"select * from bar where a = ? and b = ? and c = ?" shouldBe "select * from bar where a = $1 and b = $2 and c = $3"
27 | sql"select * from bar" shouldBe "select * from bar"
28 | }
29 |
30 | "work together with standard string interpolation" in {
31 | val table = "foo"
32 | sql"select * from $table where a = ?" shouldBe "select * from foo where a = $1"
33 | }
34 |
35 | "replace bind parameters after standard string interpolation" in {
36 | val where = "where a = ? and b = ?"
37 | sql"select * from foo $where" shouldBe "select * from foo where a = $1 and b = $2"
38 | }
39 |
40 | "trim line breaks" in {
41 | val table = "foo"
42 | sql"""
43 | select * from $table where
44 | a = ? and
45 | b = ?
46 | """ shouldBe "select * from foo where a = $1 and b = $2"
47 | }
48 | }
49 |
50 | }
51 |
--------------------------------------------------------------------------------
/core/src/test/scala/org/apache/pekko/persistence/r2dbc/journal/PersistTagsSpec.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Licensed to the Apache Software Foundation (ASF) under one or more
3 | * license agreements; and to You under the Apache License, version 2.0:
4 | *
5 | * https://www.apache.org/licenses/LICENSE-2.0
6 | *
7 | * This file is part of the Apache Pekko project, which was derived from Akka.
8 | */
9 |
10 | /*
11 | * Copyright (C) 2021 Lightbend Inc.
12 | */
13 |
14 | package org.apache.pekko.persistence.r2dbc.journal
15 |
16 | import scala.concurrent.duration._
17 |
18 | import org.apache.pekko
19 | import pekko.Done
20 | import pekko.actor.testkit.typed.scaladsl.LogCapturing
21 | import pekko.actor.testkit.typed.scaladsl.ScalaTestWithActorTestKit
22 | import pekko.actor.typed.ActorSystem
23 | import pekko.persistence.r2dbc.R2dbcSettings
24 | import pekko.persistence.r2dbc.TestActors.Persister
25 | import pekko.persistence.r2dbc.TestConfig
26 | import pekko.persistence.r2dbc.TestData
27 | import pekko.persistence.r2dbc.TestDbLifecycle
28 | import pekko.persistence.typed.PersistenceId
29 | import org.scalatest.wordspec.AnyWordSpecLike
30 |
31 | class PersistTagsSpec
32 | extends ScalaTestWithActorTestKit(TestConfig.config)
33 | with AnyWordSpecLike
34 | with TestDbLifecycle
35 | with TestData
36 | with LogCapturing {
37 |
38 | override def typedSystem: ActorSystem[_] = system
39 | private val settings = new R2dbcSettings(system.settings.config.getConfig("pekko.persistence.r2dbc"))
40 |
41 | case class Row(pid: String, seqNr: Long, tags: Set[String])
42 |
43 | private lazy val dialect = system.settings.config.getString("pekko.persistence.r2dbc.dialect")
44 |
45 | private lazy val testEnabled: Boolean = {
46 | // tags are not implemented for MySQL
47 | dialect != "mysql"
48 | }
49 |
50 | "Persist tags" should {
51 | if (!testEnabled) {
52 | info(s"PersistTagsSpec not enabled for $dialect")
53 | pending
54 | }
55 |
56 | "be the same for events stored in same transaction" in {
57 | val numberOfEntities = 9
58 | val entityType = nextEntityType()
59 |
60 | val entities = (0 until numberOfEntities).map { n =>
61 | val persistenceId = PersistenceId(entityType, s"p$n")
62 | val tags = Set(entityType, s"tag-p$n")
63 | spawn(Persister(persistenceId, tags), s"p$n")
64 | }
65 |
66 | entities.foreach { ref =>
67 | ref ! Persister.Persist("e1")
68 | }
69 |
70 | val pingProbe = createTestProbe[Done]()
71 | entities.foreach { ref =>
72 | ref ! Persister.Ping(pingProbe.ref)
73 | }
74 | pingProbe.receiveMessages(entities.size, 20.seconds)
75 |
76 | val rows =
77 | r2dbcExecutor
78 | .select[Row]("test")(
79 | connection => connection.createStatement(s"select * from ${settings.journalTableWithSchema}"),
80 | row => {
81 | val tags = row.get("tags", classOf[Array[String]]) match {
82 | case null => Set.empty[String]
83 | case tagsArray => tagsArray.toSet
84 | }
85 | Row(
86 | pid = row.get("persistence_id", classOf[String]),
87 | seqNr = row.get[java.lang.Long]("seq_nr", classOf[java.lang.Long]),
88 | tags)
89 | })
90 | .futureValue
91 |
92 | rows.foreach { case Row(pid, _, tags) =>
93 | withClue(s"pid [$pid}]: ") {
94 | tags shouldBe Set(PersistenceId.extractEntityType(pid), s"tag-${PersistenceId.extractEntityId(pid)}")
95 | }
96 | }
97 | }
98 |
99 | }
100 | }
101 |
--------------------------------------------------------------------------------
/core/src/test/scala/org/apache/pekko/persistence/r2dbc/journal/PersistTimestampSpec.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Licensed to the Apache Software Foundation (ASF) under one or more
3 | * license agreements; and to You under the Apache License, version 2.0:
4 | *
5 | * https://www.apache.org/licenses/LICENSE-2.0
6 | *
7 | * This file is part of the Apache Pekko project, which was derived from Akka.
8 | */
9 |
10 | /*
11 | * Copyright (C) 2021 Lightbend Inc.
12 | */
13 |
14 | package org.apache.pekko.persistence.r2dbc.journal
15 |
16 | import java.time.Instant
17 |
18 | import scala.concurrent.duration._
19 |
20 | import org.apache.pekko
21 | import pekko.Done
22 | import pekko.actor.testkit.typed.scaladsl.LogCapturing
23 | import pekko.actor.testkit.typed.scaladsl.ScalaTestWithActorTestKit
24 | import pekko.actor.typed.ActorSystem
25 | import pekko.persistence.r2dbc.R2dbcSettings
26 | import pekko.persistence.r2dbc.TestActors.Persister
27 | import pekko.persistence.r2dbc.TestConfig
28 | import pekko.persistence.r2dbc.TestData
29 | import pekko.persistence.r2dbc.TestDbLifecycle
30 | import pekko.persistence.typed.PersistenceId
31 | import pekko.serialization.SerializationExtension
32 | import org.scalatest.wordspec.AnyWordSpecLike
33 |
34 | class PersistTimestampSpec
35 | extends ScalaTestWithActorTestKit(TestConfig.config)
36 | with AnyWordSpecLike
37 | with TestDbLifecycle
38 | with TestData
39 | with LogCapturing {
40 |
41 | override def typedSystem: ActorSystem[_] = system
42 | private val settings = new R2dbcSettings(system.settings.config.getConfig("pekko.persistence.r2dbc"))
43 | private val serialization = SerializationExtension(system)
44 |
45 | case class Row(pid: String, seqNr: Long, dbTimestamp: Instant, event: String)
46 |
47 | "Persist timestamp" should {
48 |
49 | "be the same for events stored in same transaction" in {
50 | val numberOfEntities = 20
51 | val entityType = nextEntityType()
52 |
53 | val entities = (0 until numberOfEntities).map { n =>
54 | val persistenceId = PersistenceId(entityType, s"p$n")
55 | spawn(Persister(persistenceId), s"p$n")
56 | }
57 |
58 | (1 to 100).foreach { n =>
59 | val p = n % numberOfEntities
60 | // mix some persist 1 and persist 3 events
61 | if (n % 5 == 0) {
62 | // same event stored 3 times
63 | val event = s"e$p-$n"
64 | entities(p) ! Persister.PersistAll((0 until 3).map(_ => event).toList)
65 | } else {
66 | entities(p) ! Persister.Persist(s"e$p-$n")
67 | }
68 | }
69 |
70 | val pingProbe = createTestProbe[Done]()
71 | entities.foreach { ref =>
72 | ref ! Persister.Ping(pingProbe.ref)
73 | }
74 | pingProbe.receiveMessages(entities.size, 20.seconds)
75 |
76 | val rows =
77 | r2dbcExecutor
78 | .select[Row]("test")(
79 | connection => connection.createStatement(s"select * from ${settings.journalTableWithSchema}"),
80 | row => {
81 | val event = serialization
82 | .deserialize(
83 | row.get("event_payload", classOf[Array[Byte]]),
84 | row.get[Integer]("event_ser_id", classOf[Integer]),
85 | row.get("event_ser_manifest", classOf[String]))
86 | .get
87 | .asInstanceOf[String]
88 | Row(
89 | pid = row.get("persistence_id", classOf[String]),
90 | seqNr = row.get[java.lang.Long]("seq_nr", classOf[java.lang.Long]),
91 | dbTimestamp = row.get("db_timestamp", classOf[Instant]),
92 | event)
93 | })
94 | .futureValue
95 |
96 | rows.groupBy(_.event).foreach { case (_, rowsByUniqueEvent) =>
97 | withClue(s"pid [${rowsByUniqueEvent.head.pid}]: ") {
98 | rowsByUniqueEvent.map(_.dbTimestamp).toSet shouldBe Set(rowsByUniqueEvent.head.dbTimestamp)
99 | }
100 | }
101 |
102 | val rowOrdering: Ordering[Row] = Ordering.fromLessThan[Row] { (a, b) =>
103 | if (a eq b) false
104 | else if (a.dbTimestamp != b.dbTimestamp) a.dbTimestamp.compareTo(b.dbTimestamp) < 0
105 | else a.seqNr.compareTo(b.seqNr) < 0
106 | }
107 |
108 | rows.groupBy(_.pid).foreach { case (_, rowsByPid) =>
109 | withClue(s"pid [${rowsByPid.head.pid}]: ") {
110 | rowsByPid.sortBy(_.seqNr) shouldBe rowsByPid.sorted(rowOrdering)
111 | }
112 | }
113 | }
114 |
115 | }
116 | }
117 |
--------------------------------------------------------------------------------
/core/src/test/scala/org/apache/pekko/persistence/r2dbc/journal/R2dbcJournalPerfManyActorsSpec.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Licensed to the Apache Software Foundation (ASF) under one or more
3 | * license agreements; and to You under the Apache License, version 2.0:
4 | *
5 | * https://www.apache.org/licenses/LICENSE-2.0
6 | *
7 | * This file is part of the Apache Pekko project, which was derived from Akka.
8 | */
9 |
10 | /*
11 | * Copyright (C) 2021 Lightbend Inc.
12 | */
13 |
14 | package org.apache.pekko.persistence.r2dbc.journal
15 |
16 | import scala.concurrent.duration._
17 |
18 | import org.apache.pekko
19 | import pekko.actor.Props
20 | import pekko.actor.typed.ActorSystem
21 | import pekko.actor.typed.scaladsl.adapter._
22 | import pekko.persistence.CapabilityFlag
23 | import pekko.persistence.journal.JournalPerfSpec
24 | import pekko.persistence.journal.JournalPerfSpec.BenchActor
25 | import pekko.persistence.journal.JournalPerfSpec.Cmd
26 | import pekko.persistence.journal.JournalPerfSpec.ResetCounter
27 | import pekko.persistence.r2dbc.TestDbLifecycle
28 | import pekko.testkit.TestProbe
29 |
30 | class R2dbcJournalPerfManyActorsSpec extends JournalPerfSpec(R2dbcJournalPerfSpec.config) with TestDbLifecycle {
31 | override def eventsCount: Int = 10
32 |
33 | override def measurementIterations: Int = 2 // increase when testing for real
34 |
35 | override def awaitDurationMillis: Long = 60.seconds.toMillis
36 |
37 | override protected def supportsRejectingNonSerializableObjects: CapabilityFlag = CapabilityFlag.off()
38 |
39 | override def typedSystem: ActorSystem[_] = system.toTyped
40 |
41 | def actorCount = 20 // increase when testing for real
42 |
43 | private val commands = Vector(1 to eventsCount: _*)
44 |
45 | "A PersistentActor's performance" must {
46 | s"measure: persist()-ing $eventsCount events for $actorCount actors" in {
47 | val testProbe = TestProbe()
48 | val replyAfter = eventsCount
49 | def createBenchActor(actorNumber: Int) =
50 | system.actorOf(Props(classOf[BenchActor], s"$pid-$actorNumber", testProbe.ref, replyAfter))
51 | val actors = 1.to(actorCount).map(createBenchActor)
52 |
53 | measure(d => s"Persist()-ing $eventsCount * $actorCount took ${d.toMillis} ms") {
54 | for (cmd <- commands; actor <- actors) {
55 | actor ! Cmd("p", cmd)
56 | }
57 | for (_ <- actors) {
58 | testProbe.expectMsg(awaitDurationMillis.millis, commands.last)
59 | }
60 | for (actor <- actors) {
61 | actor ! ResetCounter
62 | }
63 | }
64 | }
65 | }
66 | }
67 |
--------------------------------------------------------------------------------
/core/src/test/scala/org/apache/pekko/persistence/r2dbc/journal/R2dbcJournalPerfSpec.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Licensed to the Apache Software Foundation (ASF) under one or more
3 | * license agreements; and to You under the Apache License, version 2.0:
4 | *
5 | * https://www.apache.org/licenses/LICENSE-2.0
6 | *
7 | * This file is part of the Apache Pekko project, which was derived from Akka.
8 | */
9 |
10 | /*
11 | * Copyright (C) 2021 Lightbend Inc.
12 | */
13 |
14 | package org.apache.pekko.persistence.r2dbc.journal
15 |
16 | import scala.concurrent.duration._
17 |
18 | import org.apache.pekko
19 | import pekko.actor.typed.ActorSystem
20 | import pekko.actor.typed.scaladsl.adapter._
21 | import pekko.persistence.CapabilityFlag
22 | import pekko.persistence.journal.JournalPerfSpec
23 | import pekko.persistence.r2dbc.TestDbLifecycle
24 |
25 | object R2dbcJournalPerfSpec {
26 | val config = R2dbcJournalSpec.testConfig()
27 | }
28 |
29 | class R2dbcJournalPerfSpec extends JournalPerfSpec(R2dbcJournalPerfSpec.config) with TestDbLifecycle {
30 | override def eventsCount: Int = 200
31 |
32 | override def measurementIterations: Int = 2 // increase when testing for real
33 |
34 | override def awaitDurationMillis: Long = 60.seconds.toMillis
35 |
36 | override protected def supportsRejectingNonSerializableObjects: CapabilityFlag = CapabilityFlag.off()
37 |
38 | override def typedSystem: ActorSystem[_] = system.toTyped
39 | }
40 |
--------------------------------------------------------------------------------
/core/src/test/scala/org/apache/pekko/persistence/r2dbc/journal/R2dbcJournalSpec.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Licensed to the Apache Software Foundation (ASF) under one or more
3 | * license agreements; and to You under the Apache License, version 2.0:
4 | *
5 | * https://www.apache.org/licenses/LICENSE-2.0
6 | *
7 | * This file is part of the Apache Pekko project, which was derived from Akka.
8 | */
9 |
10 | /*
11 | * Copyright (C) 2021 Lightbend Inc.
12 | */
13 |
14 | package org.apache.pekko.persistence.r2dbc.journal
15 |
16 | import org.apache.pekko
17 | import pekko.actor.typed.ActorSystem
18 | import pekko.actor.typed.scaladsl.adapter._
19 | import pekko.persistence.CapabilityFlag
20 | import pekko.persistence.journal.JournalSpec
21 | import pekko.persistence.r2dbc.TestConfig
22 | import pekko.persistence.r2dbc.TestDbLifecycle
23 | import com.typesafe.config.Config
24 | import com.typesafe.config.ConfigFactory
25 |
26 | object R2dbcJournalSpec {
27 | val config = R2dbcJournalSpec.testConfig()
28 |
29 | def configWithMeta =
30 | ConfigFactory
31 | .parseString("""pekko.persistence.r2dbc.with-meta = true""")
32 | .withFallback(R2dbcJournalSpec.testConfig())
33 |
34 | def testConfig(): Config = {
35 | ConfigFactory
36 | .parseString(s"""
37 | # allow java serialization when testing
38 | pekko.actor.allow-java-serialization = on
39 | pekko.actor.warn-about-java-serializer-usage = off
40 | """)
41 | .withFallback(TestConfig.config)
42 | }
43 | }
44 |
45 | class R2dbcJournalSpec extends JournalSpec(R2dbcJournalSpec.config) with TestDbLifecycle {
46 | override protected def supportsRejectingNonSerializableObjects: CapabilityFlag = CapabilityFlag.off()
47 | override def typedSystem: ActorSystem[_] = system.toTyped
48 | }
49 |
50 | class R2dbcJournalWithMetaSpec extends JournalSpec(R2dbcJournalSpec.configWithMeta) with TestDbLifecycle {
51 | override protected def supportsRejectingNonSerializableObjects: CapabilityFlag = CapabilityFlag.off()
52 | protected override def supportsMetadata: CapabilityFlag = CapabilityFlag.on()
53 | override def typedSystem: ActorSystem[_] = system.toTyped
54 | }
55 |
--------------------------------------------------------------------------------
/core/src/test/scala/org/apache/pekko/persistence/r2dbc/query/CurrentPersistenceIdsQuerySpec.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Licensed to the Apache Software Foundation (ASF) under one or more
3 | * license agreements; and to You under the Apache License, version 2.0:
4 | *
5 | * https://www.apache.org/licenses/LICENSE-2.0
6 | *
7 | * This file is part of the Apache Pekko project, which was derived from Akka.
8 | */
9 |
10 | /*
11 | * Copyright (C) 2021 Lightbend Inc.
12 | */
13 |
14 | package org.apache.pekko.persistence.r2dbc.query
15 |
16 | import scala.concurrent.duration._
17 |
18 | import org.apache.pekko
19 | import pekko.Done
20 | import pekko.actor.testkit.typed.scaladsl.LogCapturing
21 | import pekko.actor.testkit.typed.scaladsl.ScalaTestWithActorTestKit
22 | import pekko.actor.typed.ActorSystem
23 | import pekko.persistence.query.PersistenceQuery
24 | import pekko.persistence.r2dbc.TestActors
25 | import pekko.persistence.r2dbc.TestActors.Persister
26 | import pekko.persistence.r2dbc.TestConfig
27 | import pekko.persistence.r2dbc.TestData
28 | import pekko.persistence.r2dbc.TestDbLifecycle
29 | import pekko.persistence.r2dbc.query.scaladsl.R2dbcReadJournal
30 | import pekko.persistence.typed.PersistenceId
31 | import pekko.stream.scaladsl.Sink
32 | import com.typesafe.config.ConfigFactory
33 | import org.scalatest.wordspec.AnyWordSpecLike
34 |
35 | class CurrentPersistenceIdsQuerySpec
36 | extends ScalaTestWithActorTestKit(
37 | ConfigFactory
38 | .parseString("""
39 | pekko.persistence.r2dbc.query.persistence-ids.buffer-size = 20
40 | """)
41 | .withFallback(TestConfig.config))
42 | with AnyWordSpecLike
43 | with TestDbLifecycle
44 | with TestData
45 | with LogCapturing {
46 |
47 | override def typedSystem: ActorSystem[_] = system
48 |
49 | private val query = PersistenceQuery(testKit.system).readJournalFor[R2dbcReadJournal](R2dbcReadJournal.Identifier)
50 |
51 | private val zeros = "0000"
52 | private val entityType = nextEntityType()
53 | private val numberOfPids = 100
54 | private val pids =
55 | (1 to numberOfPids).map(n => PersistenceId(entityType, "p" + zeros.drop(n.toString.length) + n))
56 |
57 | override protected def beforeAll(): Unit = {
58 | super.beforeAll()
59 |
60 | val probe = createTestProbe[Done]()
61 | pids.foreach { pid =>
62 | val persister = spawn(TestActors.Persister(pid))
63 | persister ! Persister.PersistWithAck("e-1", probe.ref)
64 | persister ! Persister.PersistWithAck("e-2", probe.ref)
65 | persister ! Persister.Stop(probe.ref)
66 | }
67 |
68 | probe.receiveMessages(numberOfPids * 3, 30.seconds) // 2 acks + stop done
69 | }
70 |
71 | "Event Sourced currentPersistenceIds" should {
72 | "retrieve all ids" in {
73 | val result = query.currentPersistenceIds().runWith(Sink.seq).futureValue
74 | result shouldBe pids.map(_.id)
75 | }
76 |
77 | "retrieve ids afterId" in {
78 | val result = query.currentPersistenceIds(afterId = Some(pids(9).id), limit = 7).runWith(Sink.seq).futureValue
79 | result shouldBe pids.slice(10, 17).map(_.id)
80 | }
81 |
82 | }
83 |
84 | }
85 |
--------------------------------------------------------------------------------
/core/src/test/scala/org/apache/pekko/persistence/r2dbc/snapshot/R2dbcSnapshotStoreSpec.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Licensed to the Apache Software Foundation (ASF) under one or more
3 | * license agreements; and to You under the Apache License, version 2.0:
4 | *
5 | * https://www.apache.org/licenses/LICENSE-2.0
6 | *
7 | * This file is part of the Apache Pekko project, which was derived from Akka.
8 | */
9 |
10 | /*
11 | * Copyright (C) 2021 Lightbend Inc.
12 | */
13 |
14 | package org.apache.pekko.persistence.r2dbc.snapshot
15 |
16 | import org.apache.pekko
17 | import pekko.actor.typed.ActorSystem
18 | import pekko.actor.typed.scaladsl.adapter._
19 | import pekko.persistence.CapabilityFlag
20 | import pekko.persistence.DeleteSnapshotSuccess
21 | import pekko.persistence.SnapshotMetadata
22 | import pekko.persistence.SnapshotProtocol.DeleteSnapshot
23 | import pekko.persistence.SnapshotProtocol.LoadSnapshot
24 | import pekko.persistence.SnapshotProtocol.LoadSnapshotResult
25 | import pekko.persistence.SnapshotSelectionCriteria
26 | import pekko.persistence.r2dbc.{ TestConfig, TestDbLifecycle }
27 | import pekko.persistence.snapshot.SnapshotStoreSpec
28 | import pekko.testkit.TestProbe
29 | import org.scalatest.Outcome
30 | import org.scalatest.Pending
31 |
32 | class R2dbcSnapshotStoreSpec extends SnapshotStoreSpec(TestConfig.config) with TestDbLifecycle {
33 | def typedSystem: ActorSystem[_] = system.toTyped
34 |
35 | val ignoreTests = Set(
36 | // All these expects multiple snapshots for same pid, either as the core test
37 | // or as a verification that there are still snapshots in db after some specific delete
38 | "A snapshot store must load the most recent snapshot matching an upper sequence number bound",
39 | "A snapshot store must load the most recent snapshot matching upper sequence number and timestamp bounds",
40 | "A snapshot store must delete a single snapshot identified by sequenceNr in snapshot metadata",
41 | "A snapshot store must delete all snapshots matching upper sequence number and timestamp bounds",
42 | "A snapshot store must not delete snapshots with non-matching upper timestamp bounds")
43 |
44 | override protected def withFixture(test: NoArgTest): Outcome =
45 | if (ignoreTests(test.name)) {
46 | Pending // No Ignored/Skipped available so Pending will have to do
47 | } else {
48 | super.withFixture(test)
49 | }
50 |
51 | protected override def supportsMetadata: CapabilityFlag = true
52 |
53 | // Note: these depends on populating db with snapshots in SnapshotStoreSpec.beforeEach
54 | // mostly covers the important bits of the skipped tests but for a upsert snapshot store
55 | "A update in place snapshot store" must {
56 | "not find any other snapshots than the latest with upper sequence number bound" in {
57 | // SnapshotStoreSpec saves snapshots with sequence nr 10-15
58 | val senderProbe = TestProbe()
59 | snapshotStore.tell(
60 | LoadSnapshot(pid, SnapshotSelectionCriteria(maxSequenceNr = 13), Long.MaxValue),
61 | senderProbe.ref)
62 | senderProbe.expectMsg(LoadSnapshotResult(None, Long.MaxValue))
63 | snapshotStore.tell(LoadSnapshot(pid, SnapshotSelectionCriteria.Latest, toSequenceNr = 13), senderProbe.ref)
64 | senderProbe.expectMsg(LoadSnapshotResult(None, 13))
65 | snapshotStore.tell(LoadSnapshot(pid, SnapshotSelectionCriteria.Latest, toSequenceNr = 15), senderProbe.ref)
66 |
67 | // no access to SnapshotStoreSpec.metadata with timestamps so can't compare directly (because timestamp)
68 | val result = senderProbe.expectMsgType[LoadSnapshotResult]
69 | result.snapshot shouldBe defined
70 | result.snapshot.get.snapshot should ===("s-5")
71 | }
72 | "delete the single snapshot for a pid identified by sequenceNr in snapshot metadata" in {
73 | val md =
74 | SnapshotMetadata(pid, sequenceNr = 2, timestamp = 0) // don't care about timestamp for delete of single snap
75 | val cmd = DeleteSnapshot(md)
76 | val sub = TestProbe()
77 |
78 | val senderProbe = TestProbe()
79 | subscribe[DeleteSnapshot](sub.ref)
80 | snapshotStore.tell(cmd, senderProbe.ref)
81 | sub.expectMsg(cmd)
82 | senderProbe.expectMsg(DeleteSnapshotSuccess(md))
83 |
84 | snapshotStore.tell(LoadSnapshot(pid, SnapshotSelectionCriteria(md.sequenceNr), Long.MaxValue), senderProbe.ref)
85 | senderProbe.expectMsg(LoadSnapshotResult(None, Long.MaxValue))
86 | }
87 | }
88 | }
89 |
--------------------------------------------------------------------------------
/core/src/test/scala/org/apache/pekko/persistence/r2dbc/state/CurrentPersistenceIdsQuerySpec.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Licensed to the Apache Software Foundation (ASF) under one or more
3 | * license agreements; and to You under the Apache License, version 2.0:
4 | *
5 | * https://www.apache.org/licenses/LICENSE-2.0
6 | *
7 | * This file is part of the Apache Pekko project, which was derived from Akka.
8 | */
9 |
10 | /*
11 | * Copyright (C) 2021 Lightbend Inc.
12 | */
13 |
14 | package org.apache.pekko.persistence.r2dbc.state
15 |
16 | import scala.concurrent.duration._
17 |
18 | import org.apache.pekko
19 | import pekko.Done
20 | import pekko.actor.testkit.typed.scaladsl.LogCapturing
21 | import pekko.actor.testkit.typed.scaladsl.ScalaTestWithActorTestKit
22 | import pekko.actor.typed.ActorSystem
23 | import pekko.persistence.r2dbc.TestActors
24 | import pekko.persistence.r2dbc.TestActors.DurableStatePersister
25 | import pekko.persistence.r2dbc.TestConfig
26 | import pekko.persistence.r2dbc.TestData
27 | import pekko.persistence.r2dbc.TestDbLifecycle
28 | import pekko.persistence.r2dbc.state.scaladsl.R2dbcDurableStateStore
29 | import pekko.persistence.state.DurableStateStoreRegistry
30 | import pekko.persistence.typed.PersistenceId
31 | import pekko.stream.scaladsl.Sink
32 | import com.typesafe.config.ConfigFactory
33 | import org.scalatest.wordspec.AnyWordSpecLike
34 |
35 | class CurrentPersistenceIdsQuerySpec
36 | extends ScalaTestWithActorTestKit(
37 | ConfigFactory
38 | .parseString("""
39 | pekko.persistence.r2dbc.query.persistence-ids.buffer-size = 20
40 | """)
41 | .withFallback(TestConfig.config))
42 | with AnyWordSpecLike
43 | with TestDbLifecycle
44 | with TestData
45 | with LogCapturing {
46 |
47 | override def typedSystem: ActorSystem[_] = system
48 |
49 | private val store = DurableStateStoreRegistry(testKit.system)
50 | .durableStateStoreFor[R2dbcDurableStateStore[String]](R2dbcDurableStateStore.Identifier)
51 |
52 | private val zeros = "0000"
53 | private val entityType = nextEntityType()
54 | private val numberOfPids = 100
55 | private val pids =
56 | (1 to numberOfPids).map(n => PersistenceId(entityType, "p" + zeros.drop(n.toString.length) + n))
57 |
58 | override protected def beforeAll(): Unit = {
59 | super.beforeAll()
60 |
61 | val probe = createTestProbe[Done]()
62 | pids.foreach { pid =>
63 | val persister = spawn(TestActors.DurableStatePersister(pid))
64 | persister ! DurableStatePersister.PersistWithAck("s-1", probe.ref)
65 | persister ! DurableStatePersister.Stop(probe.ref)
66 | }
67 |
68 | probe.receiveMessages(numberOfPids * 2, 30.seconds) // ack + stop done
69 | }
70 |
71 | "Durable State persistenceIds" should {
72 | "retrieve all ids" in {
73 | val result = store.currentPersistenceIds().runWith(Sink.seq).futureValue
74 | result shouldBe pids.map(_.id)
75 | }
76 |
77 | "retrieve ids afterId" in {
78 | val result = store.currentPersistenceIds(afterId = Some(pids(9).id), limit = 7).runWith(Sink.seq).futureValue
79 | result shouldBe pids.slice(10, 17).map(_.id)
80 | }
81 |
82 | }
83 |
84 | }
85 |
--------------------------------------------------------------------------------
/ddl-scripts/create_tables_mysql.sql:
--------------------------------------------------------------------------------
1 | -- Licensed to the Apache Software Foundation (ASF) under one
2 | -- or more contributor license agreements. See the NOTICE file
3 | -- distributed with this work for additional information
4 | -- regarding copyright ownership. The ASF licenses this file
5 | -- to you under the Apache License, Version 2.0 (the
6 | -- "License"); you may not use this file except in compliance
7 | -- with the License. You may obtain a copy of the License at
8 | --
9 | -- http://www.apache.org/licenses/LICENSE-2.0
10 | --
11 | -- Unless required by applicable law or agreed to in writing,
12 | -- software distributed under the License is distributed on an
13 | -- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14 | -- KIND, either express or implied. See the License for the
15 | -- specific language governing permissions and limitations
16 | -- under the License.
17 |
18 | CREATE TABLE IF NOT EXISTS event_journal(
19 | slice INT NOT NULL,
20 | entity_type VARCHAR(255) NOT NULL,
21 | persistence_id VARCHAR(255) NOT NULL,
22 | seq_nr BIGINT NOT NULL,
23 | db_timestamp TIMESTAMP(6) NOT NULL,
24 |
25 | event_ser_id INTEGER NOT NULL,
26 | event_ser_manifest VARCHAR(255) NOT NULL,
27 | event_payload BLOB NOT NULL,
28 |
29 | deleted BOOLEAN DEFAULT FALSE NOT NULL,
30 | writer VARCHAR(255) NOT NULL,
31 | adapter_manifest VARCHAR(255),
32 | tags TEXT, -- FIXME no array type, is this the best option?
33 |
34 | meta_ser_id INTEGER,
35 | meta_ser_manifest VARCHAR(255),
36 | meta_payload BLOB,
37 |
38 | PRIMARY KEY(persistence_id, seq_nr)
39 | );
40 |
41 | -- `event_journal_slice_idx` is only needed if the slice based queries are used
42 | CREATE INDEX event_journal_slice_idx ON event_journal(slice, entity_type, db_timestamp, seq_nr);
43 |
44 | CREATE TABLE IF NOT EXISTS snapshot(
45 | slice INT NOT NULL,
46 | entity_type VARCHAR(255) NOT NULL,
47 | persistence_id VARCHAR(255) NOT NULL,
48 | seq_nr BIGINT NOT NULL,
49 | write_timestamp BIGINT NOT NULL,
50 | ser_id INTEGER NOT NULL,
51 | ser_manifest VARCHAR(255) NOT NULL,
52 | snapshot BLOB NOT NULL,
53 | meta_ser_id INTEGER,
54 | meta_ser_manifest VARCHAR(255),
55 | meta_payload BLOB,
56 |
57 | PRIMARY KEY(persistence_id)
58 | );
59 |
60 | CREATE TABLE IF NOT EXISTS durable_state (
61 | slice INT NOT NULL,
62 | entity_type VARCHAR(255) NOT NULL,
63 | persistence_id VARCHAR(255) NOT NULL,
64 | revision BIGINT NOT NULL,
65 | db_timestamp TIMESTAMP(6) NOT NULL,
66 |
67 | state_ser_id INTEGER NOT NULL,
68 | state_ser_manifest VARCHAR(255),
69 | state_payload BLOB NOT NULL,
70 | tags TEXT, -- FIXME no array type, is this the best option?
71 |
72 | PRIMARY KEY(persistence_id, revision)
73 | );
74 |
75 | -- `durable_state_slice_idx` is only needed if the slice based queries are used
76 | CREATE INDEX durable_state_slice_idx ON durable_state(slice, entity_type, db_timestamp, revision);
77 |
78 | -- Primitive offset types are stored in this table.
79 | -- If only timestamp based offsets are used this table is optional.
80 | -- Configure pekko.projection.r2dbc.offset-store.offset-table="" if the table is not created.
81 | CREATE TABLE IF NOT EXISTS projection_offset_store (
82 | projection_name VARCHAR(255) NOT NULL,
83 | projection_key VARCHAR(255) NOT NULL,
84 | current_offset VARCHAR(255) NOT NULL,
85 | manifest VARCHAR(32) NOT NULL,
86 | mergeable BOOLEAN NOT NULL,
87 | last_updated BIGINT NOT NULL,
88 | PRIMARY KEY(projection_name, projection_key)
89 | );
90 |
91 | -- Timestamp based offsets are stored in this table.
92 | CREATE TABLE IF NOT EXISTS projection_timestamp_offset_store (
93 | projection_name VARCHAR(255) NOT NULL,
94 | projection_key VARCHAR(255) NOT NULL,
95 | slice INT NOT NULL,
96 | persistence_id VARCHAR(255) NOT NULL,
97 | seq_nr BIGINT NOT NULL,
98 | -- timestamp_offset is the db_timestamp of the original event
99 | timestamp_offset TIMESTAMP(6) NOT NULL,
100 | -- timestamp_consumed is when the offset was stored
101 | -- the consumer lag is timestamp_consumed - timestamp_offset
102 | timestamp_consumed TIMESTAMP(6) NOT NULL,
103 | PRIMARY KEY(slice, projection_name, timestamp_offset, persistence_id, seq_nr)
104 | );
105 |
106 | CREATE TABLE IF NOT EXISTS projection_management (
107 | projection_name VARCHAR(255) NOT NULL,
108 | projection_key VARCHAR(255) NOT NULL,
109 | paused BOOLEAN NOT NULL,
110 | last_updated BIGINT NOT NULL,
111 | PRIMARY KEY(projection_name, projection_key)
112 | );
113 |
--------------------------------------------------------------------------------
/ddl-scripts/create_tables_postgres.sql:
--------------------------------------------------------------------------------
1 | CREATE TABLE IF NOT EXISTS event_journal(
2 | slice INT NOT NULL,
3 | entity_type VARCHAR(255) NOT NULL,
4 | persistence_id VARCHAR(255) NOT NULL,
5 | seq_nr BIGINT NOT NULL,
6 | db_timestamp timestamp with time zone NOT NULL,
7 |
8 | event_ser_id INTEGER NOT NULL,
9 | event_ser_manifest VARCHAR(255) NOT NULL,
10 | event_payload BYTEA NOT NULL,
11 |
12 | deleted BOOLEAN DEFAULT FALSE NOT NULL,
13 | writer VARCHAR(255) NOT NULL,
14 | adapter_manifest VARCHAR(255),
15 | tags TEXT ARRAY,
16 |
17 | meta_ser_id INTEGER,
18 | meta_ser_manifest VARCHAR(255),
19 | meta_payload BYTEA,
20 |
21 | PRIMARY KEY(persistence_id, seq_nr)
22 | );
23 |
24 | -- `event_journal_slice_idx` is only needed if the slice based queries are used
25 | CREATE INDEX IF NOT EXISTS event_journal_slice_idx ON event_journal(slice, entity_type, db_timestamp, seq_nr);
26 |
27 | CREATE TABLE IF NOT EXISTS snapshot(
28 | slice INT NOT NULL,
29 | entity_type VARCHAR(255) NOT NULL,
30 | persistence_id VARCHAR(255) NOT NULL,
31 | seq_nr BIGINT NOT NULL,
32 | write_timestamp BIGINT NOT NULL,
33 | ser_id INTEGER NOT NULL,
34 | ser_manifest VARCHAR(255) NOT NULL,
35 | snapshot BYTEA NOT NULL,
36 | meta_ser_id INTEGER,
37 | meta_ser_manifest VARCHAR(255),
38 | meta_payload BYTEA,
39 |
40 | PRIMARY KEY(persistence_id)
41 | );
42 |
43 | CREATE TABLE IF NOT EXISTS durable_state (
44 | slice INT NOT NULL,
45 | entity_type VARCHAR(255) NOT NULL,
46 | persistence_id VARCHAR(255) NOT NULL,
47 | revision BIGINT NOT NULL,
48 | db_timestamp timestamp with time zone NOT NULL,
49 |
50 | state_ser_id INTEGER NOT NULL,
51 | state_ser_manifest VARCHAR(255),
52 | state_payload BYTEA NOT NULL,
53 | tags TEXT ARRAY,
54 |
55 | PRIMARY KEY(persistence_id, revision)
56 | );
57 |
58 | -- `durable_state_slice_idx` is only needed if the slice based queries are used
59 | CREATE INDEX IF NOT EXISTS durable_state_slice_idx ON durable_state(slice, entity_type, db_timestamp, revision);
60 |
61 | -- Primitive offset types are stored in this table.
62 | -- If only timestamp based offsets are used this table is optional.
63 | -- Configure pekko.projection.r2dbc.offset-store.offset-table="" if the table is not created.
64 | CREATE TABLE IF NOT EXISTS projection_offset_store (
65 | projection_name VARCHAR(255) NOT NULL,
66 | projection_key VARCHAR(255) NOT NULL,
67 | current_offset VARCHAR(255) NOT NULL,
68 | manifest VARCHAR(32) NOT NULL,
69 | mergeable BOOLEAN NOT NULL,
70 | last_updated BIGINT NOT NULL,
71 | PRIMARY KEY(projection_name, projection_key)
72 | );
73 |
74 | -- Timestamp based offsets are stored in this table.
75 | CREATE TABLE IF NOT EXISTS projection_timestamp_offset_store (
76 | projection_name VARCHAR(255) NOT NULL,
77 | projection_key VARCHAR(255) NOT NULL,
78 | slice INT NOT NULL,
79 | persistence_id VARCHAR(255) NOT NULL,
80 | seq_nr BIGINT NOT NULL,
81 | -- timestamp_offset is the db_timestamp of the original event
82 | timestamp_offset timestamp with time zone NOT NULL,
83 | -- timestamp_consumed is when the offset was stored
84 | -- the consumer lag is timestamp_consumed - timestamp_offset
85 | timestamp_consumed timestamp with time zone NOT NULL,
86 | PRIMARY KEY(slice, projection_name, timestamp_offset, persistence_id, seq_nr)
87 | );
88 |
89 | CREATE TABLE IF NOT EXISTS projection_management (
90 | projection_name VARCHAR(255) NOT NULL,
91 | projection_key VARCHAR(255) NOT NULL,
92 | paused BOOLEAN NOT NULL,
93 | last_updated BIGINT NOT NULL,
94 | PRIMARY KEY(projection_name, projection_key)
95 | );
96 |
--------------------------------------------------------------------------------
/ddl-scripts/create_tables_yugabyte.sql:
--------------------------------------------------------------------------------
1 | CREATE TABLE IF NOT EXISTS event_journal(
2 | slice INT NOT NULL,
3 | entity_type VARCHAR(255) NOT NULL,
4 | persistence_id VARCHAR(255) NOT NULL,
5 | seq_nr BIGINT NOT NULL,
6 | db_timestamp timestamp with time zone NOT NULL,
7 |
8 | event_ser_id INTEGER NOT NULL,
9 | event_ser_manifest VARCHAR(255) NOT NULL,
10 | event_payload BYTEA NOT NULL,
11 |
12 | deleted BOOLEAN DEFAULT FALSE NOT NULL,
13 | writer VARCHAR(255) NOT NULL,
14 | adapter_manifest VARCHAR(255),
15 | tags TEXT ARRAY,
16 |
17 | meta_ser_id INTEGER,
18 | meta_ser_manifest VARCHAR(255),
19 | meta_payload BYTEA,
20 |
21 | PRIMARY KEY(persistence_id HASH, seq_nr ASC)
22 | );
23 |
24 | -- `event_journal_slice_idx` is only needed if the slice based queries are used
25 | CREATE INDEX IF NOT EXISTS event_journal_slice_idx ON event_journal(slice ASC, entity_type ASC, db_timestamp ASC, seq_nr ASC, persistence_id, deleted)
26 | SPLIT AT VALUES ((127), (255), (383), (511), (639), (767), (895));
27 |
28 | CREATE TABLE IF NOT EXISTS snapshot(
29 | slice INT NOT NULL,
30 | entity_type VARCHAR(255) NOT NULL,
31 | persistence_id VARCHAR(255) NOT NULL,
32 | seq_nr BIGINT NOT NULL,
33 | write_timestamp BIGINT NOT NULL,
34 | ser_id INTEGER NOT NULL,
35 | ser_manifest VARCHAR(255) NOT NULL,
36 | snapshot BYTEA NOT NULL,
37 | meta_ser_id INTEGER,
38 | meta_ser_manifest VARCHAR(255),
39 | meta_payload BYTEA,
40 |
41 | PRIMARY KEY(persistence_id HASH)
42 | );
43 |
44 | CREATE TABLE IF NOT EXISTS durable_state (
45 | slice INT NOT NULL,
46 | entity_type VARCHAR(255) NOT NULL,
47 | persistence_id VARCHAR(255) NOT NULL,
48 | revision BIGINT NOT NULL,
49 | db_timestamp timestamp with time zone NOT NULL,
50 |
51 | state_ser_id INTEGER NOT NULL,
52 | state_ser_manifest VARCHAR(255),
53 | state_payload BYTEA NOT NULL,
54 | tags TEXT ARRAY,
55 |
56 | PRIMARY KEY(persistence_id HASH, revision ASC)
57 | );
58 |
59 | -- `durable_state_slice_idx` is only needed if the slice based queries are used
60 | CREATE INDEX IF NOT EXISTS durable_state_slice_idx ON durable_state(slice ASC, entity_type ASC, db_timestamp ASC, revision ASC, persistence_id)
61 | SPLIT AT VALUES ((127), (255), (383), (511), (639), (767), (895));
62 |
63 | -- Primitive offset types are stored in this table.
64 | -- If only timestamp based offsets are used this table is optional.
65 | -- Configure pekko.projection.r2dbc.offset-store.offset-table="" if the table is not created.
66 | CREATE TABLE IF NOT EXISTS projection_offset_store (
67 | projection_name VARCHAR(255) NOT NULL,
68 | projection_key VARCHAR(255) NOT NULL,
69 | current_offset VARCHAR(255) NOT NULL,
70 | manifest VARCHAR(32) NOT NULL,
71 | mergeable BOOLEAN NOT NULL,
72 | last_updated BIGINT NOT NULL,
73 | PRIMARY KEY(projection_name, projection_key)
74 | );
75 |
76 | -- Timestamp based offsets are stored in this table.
77 |
78 | CREATE TABLE IF NOT EXISTS projection_timestamp_offset_store (
79 | projection_name VARCHAR(255) NOT NULL,
80 | projection_key VARCHAR(255) NOT NULL,
81 | slice INT NOT NULL,
82 | persistence_id VARCHAR(255) NOT NULL,
83 | seq_nr BIGINT NOT NULL,
84 | -- timestamp_offset is the db_timestamp of the original event
85 | timestamp_offset timestamp with time zone NOT NULL,
86 | -- timestamp_consumed is when the offset was stored
87 | -- the consumer lag is timestamp_consumed - timestamp_offset
88 | timestamp_consumed timestamp with time zone NOT NULL,
89 | PRIMARY KEY(slice ASC, projection_name ASC, timestamp_offset ASC, persistence_id ASC, seq_nr ASC)
90 | ) SPLIT AT VALUES ((127), (255), (383), (511), (639), (767), (895));
91 |
92 | CREATE TABLE IF NOT EXISTS projection_management (
93 | projection_name VARCHAR(255) NOT NULL,
94 | projection_key VARCHAR(255) NOT NULL,
95 | paused BOOLEAN NOT NULL,
96 | last_updated BIGINT NOT NULL,
97 | PRIMARY KEY(projection_name, projection_key)
98 | );
99 |
--------------------------------------------------------------------------------
/ddl-scripts/drop_tables_mysql.sql:
--------------------------------------------------------------------------------
1 | -- Licensed to the Apache Software Foundation (ASF) under one
2 | -- or more contributor license agreements. See the NOTICE file
3 | -- distributed with this work for additional information
4 | -- regarding copyright ownership. The ASF licenses this file
5 | -- to you under the Apache License, Version 2.0 (the
6 | -- "License"); you may not use this file except in compliance
7 | -- with the License. You may obtain a copy of the License at
8 | --
9 | -- http://www.apache.org/licenses/LICENSE-2.0
10 | --
11 | -- Unless required by applicable law or agreed to in writing,
12 | -- software distributed under the License is distributed on an
13 | -- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14 | -- KIND, either express or implied. See the License for the
15 | -- specific language governing permissions and limitations
16 | -- under the License.
17 |
18 | DROP TABLE IF EXISTS event_journal;
19 | DROP TABLE IF EXISTS snapshot;
20 | DROP TABLE IF EXISTS durable_state;
21 | DROP TABLE IF EXISTS projection_offset_store;
22 | DROP TABLE IF EXISTS projection_timestamp_offset_store;
23 | DROP TABLE IF EXISTS projection_management;
24 |
--------------------------------------------------------------------------------
/ddl-scripts/drop_tables_postgres.sql:
--------------------------------------------------------------------------------
1 | DROP INDEX event_journal_slice_idx;
2 | DROP TABLE IF EXISTS event_journal;
3 | DROP TABLE IF EXISTS snapshot;
4 | DROP TABLE IF EXISTS durable_state;
5 | DROP TABLE IF EXISTS projection_offset_store;
6 | DROP TABLE IF EXISTS projection_timestamp_offset_store;
7 | DROP TABLE IF EXISTS projection_management;
8 |
--------------------------------------------------------------------------------
/design-notes.md:
--------------------------------------------------------------------------------
1 | # Design notes
2 |
3 | One of the primary goals with this Pekko Persistence plugin is to have a design that is efficient for Postgres compatible distributed SQL databases like Yugabyte or Cockroach. It should also work well with ordinary Postgres.
4 |
5 | ## Problems with eventsByTag
6 |
7 | The usage of `eventsByTag` for Projections has the major drawback that the number of tags must be decided up-front and can't easily be changed afterwards. Starting with too many tags means much overhead since many projection instances would be running on each node in a small Pekko Cluster. Each projection instance polling the database periodically. Starting with too few tags means that it can't be scaled later to more Pekko nodes.
8 |
9 | ## Introducing event slicing
10 |
11 | Instead of tags we can store a slice number by hashing the persistence id. Like `math.abs(persistenceId.hashCode % numberOfSlices)`.
12 |
13 | Then the Projection query can be a range query of the slices. For example if using 1024 slices and running 4 Projection instances the slice ranges would be (0 to 255), (256 to 511), (512 to 767) and (768 to 1023). That can easily be split to more Projection instances when needed and still reuse the offsets for the previous range distributions.
14 |
15 | ## Offset
16 |
17 | What offset shall be used for the Projection queries? A database sequence is not good because it becomes a single point of bottleneck on the write path, and it doesn't have much guarantees for monotonically increasing without gaps anyway.
18 |
19 | A rich offset that tracked sequence numbers for persistence id would be very useful for deduplication. Then the offset itself doesn't have to be very exact since we can scan back in time for potentially missed events. That would also make it easier to have a live feed with Pekko messages of the events directly from the write-side to the Projection, which would reduce the need for frequently polling the database.
20 |
21 | That rich offset can be stored in a database table with one row per persistence id. It can be capped to a time window. For quick deduplication it would also have a cache in memory of all or part of that time window.
22 |
23 | With such sequence number tracking in place the `eventsBySlices` query can use an ordinary database timestamp as the offset.
24 |
25 | Using `transaction_timestamp()` as this timestamp based offset has a few challenges:
26 |
27 | * The `transaction_timestamp()` is the time when the transaction started, not when it was committed. This means that a "later" event may be visible first and when retrieving events after the previously seen timestamp we may miss some events.
28 | * In distributed SQL databases there can also be clock skews for the database timestamps.
29 | * There can be more than one event per timestamp.
30 |
31 | This means that it would be possible to miss events when tailing the end of the event log, but it can perform additional backtracking queries to catch missed events since the deduplication will filter out already processed events.
32 |
33 | ## Secondary index for eventsBySlices
34 |
35 | The range query for `eventsBySlices` would look something like
36 |
37 | ```sql
38 | SELECT * FROM event_journal
39 | WHERE entity_type = $1
40 | AND slice BETWEEN $2 AND $2
41 | AND db_timestamp >= $3
42 | AND db_timestamp < transaction_timestamp() - interval '200 milliseconds'
43 | ORDER BY db_timestamp, seq_nr
44 | ```
45 |
46 | That would need a secondary index like:
47 |
48 | ```sql
49 | CREATE INDEX IF NOT EXISTS event_journal_slice_idx ON event_journal(slice, entity_type, db_timestamp)
50 | ```
51 |
52 | An alternative to `slice BETWEEN` would be `slice in (0, 1, 2,.., 31)`.
53 |
54 | ## Changing slice ranges
55 |
56 | When changing the number of Projection instances it is important that a given slice is not running at more than one place. The rich offset table can include the slice number so that a new range distribution can continue with the offsets of the previous distribution. Once again, the exact deduplication is important.
57 |
--------------------------------------------------------------------------------
/docker/docker-compose-mysql.yml:
--------------------------------------------------------------------------------
1 | # Licensed to the Apache Software Foundation (ASF) under one
2 | # or more contributor license agreements. See the NOTICE file
3 | # distributed with this work for additional information
4 | # regarding copyright ownership. The ASF licenses this file
5 | # to you under the Apache License, Version 2.0 (the
6 | # "License"); you may not use this file except in compliance
7 | # with the License. You may obtain a copy of the License at
8 | #
9 | # http://www.apache.org/licenses/LICENSE-2.0
10 | #
11 | # Unless required by applicable law or agreed to in writing,
12 | # software distributed under the License is distributed on an
13 | # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14 | # KIND, either express or implied. See the License for the
15 | # specific language governing permissions and limitations
16 | # under the License.
17 |
18 | services:
19 | mysql-db:
20 | image: mysql:9.1.0
21 | container_name: docker-mysql-db-1
22 | ports:
23 | - 3306:3306
24 | environment:
25 | MYSQL_ROOT_PASSWORD: root
26 | healthcheck:
27 | test: [ "CMD", "mysqladmin", "--password=root", "ping", "-h", "127.0.0.1" ]
28 | interval: 1s
29 | timeout: 1s
30 | retries: 60
31 |
--------------------------------------------------------------------------------
/docker/docker-compose-postgres.yml:
--------------------------------------------------------------------------------
1 | services:
2 | postgres-db:
3 | image: postgres:latest
4 | container_name: docker-postgres-db-1
5 | ports:
6 | - 5432:5432
7 | environment:
8 | POSTGRES_USER: postgres
9 | POSTGRES_PASSWORD: postgres
10 |
--------------------------------------------------------------------------------
/docker/docker-compose-yugabyte.yml:
--------------------------------------------------------------------------------
1 | # Local Yugabyte database, see https://docs.yugabyte.com/latest/deploy/docker/docker-compose/
2 |
3 | volumes:
4 | yb-master-data-1:
5 | yb-tserver-data-1:
6 |
7 | services:
8 | yb-master:
9 | image: yugabytedb/yugabyte:2.20.4.0-b50
10 | container_name: yb-master-n1
11 | volumes:
12 | - yb-master-data-1:/mnt/master
13 | command: [ "/home/yugabyte/bin/yb-master",
14 | "--fs_data_dirs=/mnt/master",
15 | "--master_addresses=yb-master-n1:7100",
16 | "--rpc_bind_addresses=yb-master-n1:7100",
17 | "--replication_factor=1"]
18 | ports:
19 | - "7000:7000"
20 | environment:
21 | SERVICE_7000_NAME: yb-master
22 |
23 | yb-tserver:
24 | image: yugabytedb/yugabyte:2.20.4.0-b50
25 | container_name: yb-tserver-n1
26 | shm_size: '512mb'
27 | volumes:
28 | - yb-tserver-data-1:/mnt/tserver
29 | command: [ "/home/yugabyte/bin/yb-tserver",
30 | "--fs_data_dirs=/mnt/tserver",
31 | "--start_pgsql_proxy",
32 | "--rpc_bind_addresses=yb-tserver-n1:9100",
33 | "--tserver_master_addrs=yb-master-n1:7100",
34 | "--ysql_sequence_cache_minval=1",
35 | "--yb_num_shards_per_tserver=1"]
36 | ports:
37 | - "9042:9042"
38 | - "5433:5433"
39 | - "9000:9000"
40 | environment:
41 | SERVICE_5433_NAME: ysql
42 | SERVICE_9042_NAME: ycql
43 | SERVICE_6379_NAME: yedis
44 | SERVICE_9000_NAME: yb-tserver
45 | depends_on:
46 | - yb-master
47 |
--------------------------------------------------------------------------------
/docs/src/main/paradox/connection-config.md:
--------------------------------------------------------------------------------
1 | # Connection configuration
2 |
3 | Shared configuration for the connection pool is located under `pekko.persistence.r2dbc.connection-factory`.
4 | You have to set at least:
5 |
6 | Postgres:
7 | : @@snip [application.conf](/docs/src/test/resources/application-postgres.conf) { #connection-settings }
8 |
9 | Yugabyte:
10 | : @@snip [application.conf](/docs/src/test/resources/application-yugabyte.conf) { #connection-settings }
11 |
12 | MySQL:
13 | : @@snip [application.conf](/docs/src/test/resources/application-mysql.conf) { #connection-settings }
14 |
15 | ## Reference configuration
16 |
17 | The following can be overridden in your `application.conf` for the connection settings:
18 |
19 | @@snip [reference.conf](/core/src/main/resources/reference.conf) {#connection-settings}
20 |
--------------------------------------------------------------------------------
/docs/src/main/paradox/contributing.md:
--------------------------------------------------------------------------------
1 | # Contributing
2 |
3 | Please feel free to contribute to Apache Pekko Persistence R2DBC and the documentation by reporting issues you identify, or by suggesting changes to the code.
4 | Please refer to our [contributing instructions](https://github.com/apache/pekko-persistence-r2dbc/blob/main/CONTRIBUTING.md) to learn how it can be done.
5 |
6 | We want the Apache Pekko community to have a welcoming and open atmosphere and expect all contributors to respect our [code of conduct](https://www.apache.org/foundation/policies/conduct.html).
7 |
--------------------------------------------------------------------------------
/docs/src/main/paradox/durable-state-store.md:
--------------------------------------------------------------------------------
1 | # Durable state store plugin
2 |
3 | The durable state plugin enables storing and loading key-value entries for @extref:[durable state actors](pekko:typed/durable-state/persistence.html).
4 |
5 | ## Schema
6 |
7 | The `durable_state` table and `durable_state_slice_idx` index need to be created in the configured database, see schema definition in @ref:[Creating the schema](getting-started.md#schema).
8 |
9 | The `durable_state_slice_idx` index is only needed if the slice based @ref:[queries](query.md) are used.
10 |
11 | ## Configuration
12 |
13 | To enable the journal plugin to be used by default, add the following line to your Pekko `application.conf`:
14 |
15 | ```
16 | pekko.persistence.state.plugin = "pekko.persistence.r2dbc.state"
17 | ```
18 |
19 | It can also be enabled with the `durableStateStorePluginId` for a specific `DurableStateBehavior` and multiple
20 | plugin configurations are supported.
21 |
22 | See also @ref:[Connection configuration](connection-config.md).
23 |
24 | ### Reference configuration
25 |
26 | The following can be overridden in your `application.conf` for the journal specific settings:
27 |
28 | @@snip [reference.conf](/core/src/main/resources/reference.conf) {#durable-state-settings}
29 |
30 | ## Deletes
31 |
32 | The journal supports deletes through hard deletes, which means the durable state store entries are actually deleted from the database.
33 | There is no materialized view with a copy of the state so make sure to not delete durable states too early if they are used from projections or queries.
34 |
--------------------------------------------------------------------------------
/docs/src/main/paradox/getting-started.md:
--------------------------------------------------------------------------------
1 | # Getting Started
2 |
3 | ## Dependencies
4 |
5 | @@dependency [Maven,sbt,Gradle] {
6 | group=org.apache.pekko
7 | artifact=pekko-persistence-r2dbc_$scala.binary.version$
8 | version=$project.version$
9 | }
10 |
11 | This plugin depends on Pekko $pekko.version$ or later, and note that it is important that all `pekko-*`
12 | dependencies are in the same version, so it is recommended to depend on them explicitly to avoid problems
13 | with transient dependencies causing an unlucky mix of versions.
14 |
15 | The plugin is published for Scala 2.13.
16 |
17 | ## Enabling
18 |
19 | To enable the plugins to be used by default, add the following line to your Pekko `application.conf`:
20 |
21 | ```
22 | pekko.persistence.journal.plugin = "pekko.persistence.r2dbc.journal"
23 | pekko.persistence.snapshot-store.plugin = "pekko.persistence.r2dbc.snapshot"
24 | pekko.persistence.state.plugin = "pekko.persistence.r2dbc.state"
25 | ```
26 |
27 | More information in:
28 |
29 | * @ref:[journal](journal.md)
30 | * @ref:[snapshot store](snapshots.md)
31 | * @ref:[durable state store](durable-state-store.md)
32 | * @ref:[queries](query.md)
33 |
34 | ## Local testing
35 |
36 | The database can be run in Docker. Here's a sample docker compose file:
37 |
38 | Postgres:
39 | : @@snip [docker-compose.yml](/docker/docker-compose-postgres.yml)
40 |
41 | Yugabyte:
42 | : @@snip [docker-compose.yml](/docker/docker-compose-yugabyte.yml)
43 |
44 | Start with:
45 |
46 | Postgres:
47 | : ```
48 | docker compose -f docker/docker-compose-postgres.yml up
49 | ```
50 |
51 | Yugabyte:
52 | : ```
53 | docker compose -f docker/docker-compose-yugabyte.yml up
54 | ```
55 |
56 |
57 | ### Creating the schema
58 |
59 | Tables and indexes:
60 |
61 | Postgres:
62 | : @@snip [create_tables.sql](/ddl-scripts/create_tables_postgres.sql)
63 |
64 | Yugabyte:
65 | : @@snip [create_tables.sql](/ddl-scripts/create_tables_yugabyte.sql)
66 |
67 | The ddl script can be run in Docker with:
68 |
69 | Postgres:
70 | : ```
71 | docker exec -i docker-postgres-db-1 psql -U postgres -t < ddl-scripts/create_tables_postgres.sql
72 | ```
73 |
74 | Yugabyte:
75 | : ```
76 | docker exec -i yb-tserver-n1 /home/yugabyte/bin/ysqlsh -h yb-tserver-n1 -t < ddl-scripts/create_tables_yugabyte.sql
77 | ```
78 |
79 | ### Dropping the schema
80 |
81 | Postgres:
82 | : @@snip [drop_tables.sql](/ddl-scripts/drop_tables_postgres.sql)
83 |
84 | Yugabyte:
85 | : @@snip [drop_tables.sql](/ddl-scripts/drop_tables_postgres.sql)
86 |
--------------------------------------------------------------------------------
/docs/src/main/paradox/index.md:
--------------------------------------------------------------------------------
1 | # Pekko Persistence R2DBC Documentation
2 |
3 | The Pekko Persistence R2DBC plugin allows for using SQL database with R2DBC as a backend for Pekko Persistence.
4 |
5 | @@toc { depth=2 }
6 |
7 | @@@ index
8 |
9 | * [overview](overview.md)
10 | * [Getting Started](getting-started.md)
11 | * [Getting Started](connection-config.md)
12 | * [License Report](license-report.md)
13 | * [Journal Plugin](journal.md)
14 | * [Snapshot Plugin](snapshots.md)
15 | * [Durable State Plugin](durable-state-store.md)
16 | * [Query Plugin](query.md)
17 | * [Projection](projection.md)
18 | * [Migration Tool](migration.md)
19 | * [Migration Guides](migration-guides.md)
20 | * [Release Notes](release-notes/index.md)
21 | * [Contributing](contributing.md)
22 |
23 | @@@
24 |
25 |
--------------------------------------------------------------------------------
/docs/src/main/paradox/journal.md:
--------------------------------------------------------------------------------
1 | # Journal plugin
2 |
3 | The journal plugin enables storing and loading events for @extref:[event sourced persistent actors](pekko:typed/persistence.html).
4 |
5 | ## Schema
6 |
7 | The `event_journal` table and `event_journal_slice_idx` index need to be created in the configured database, see schema definition in @ref:[Creating the schema](getting-started.md#schema).
8 |
9 | The `event_journal_slice_idx` index is only needed if the slice based @ref:[queries](query.md) are used.
10 |
11 | ## Relation to Pekko JDBC plugin
12 |
13 | Pekko Persistence R2DBC plugin tables are not compatible with the tables of Pekko Persistence JDBC. JDBC data must be migrated using the @ref:[migration tool](migration.md) and a different schema/database must be used (or the table names overridden).
14 |
15 | ## Configuration
16 |
17 | To enable the journal plugin to be used by default, add the following line to your Pekko `application.conf`:
18 |
19 | ```
20 | pekko.persistence.journal.plugin = "pekko.persistence.r2dbc.journal"
21 | ```
22 |
23 | It can also be enabled with the `journalPluginId` for a specific `EventSourcedBehavior` and multiple
24 | plugin configurations are supported.
25 |
26 | See also @ref:[Connection configuration](connection-config.md).
27 |
28 | ### Reference configuration
29 |
30 | The following can be overridden in your `application.conf` for the journal specific settings:
31 |
32 | @@snip [reference.conf](/core/src/main/resources/reference.conf) {#journal-settings}
33 |
34 | ## Deletes
35 |
36 | The journal supports deletes through hard deletes, which means the journal entries are actually deleted from the database.
37 | There is no materialized view with a copy of the event so make sure to not delete events too early if they are used from projections or queries.
38 |
--------------------------------------------------------------------------------
/docs/src/main/paradox/migration-guides.md:
--------------------------------------------------------------------------------
1 | # Migration Guides
2 |
3 | Apache Pekko Persistence R2DBC 1.0.0 is based on Akka Persistence R2DBC 0.7.7.
4 |
5 | ## Migration to Apache Pekko
6 |
7 | These migration notes are designed for users migrating from Akka Persistence R2DBC 0.7.7 to Persistence R2DBC 1.0
8 | and assume a basic level of Akka experience. Please feel free to submit an issue or a patch if you feel like the notes can be improved.
9 |
10 | One of the main differences is that the database tables used by `pekko-projection-r2dbc` have been renamed to
11 | remove the `akka` prefixes ([PR71](https://github.com/apache/pekko-persistence-r2dbc/pull/71)).
12 |
13 | The table names that `pekko-projection-r2dbc` expects to find can be changed using [configuration settngs](https://github.com/lightbend/config).
14 |
15 | Users migrating from Akka who want to reuse the pre-existing tables could set a config like:
16 |
17 | ```HOCON
18 | pekko.projection.r2dbc.offset-store {
19 | offset-table = "akka_projection_offset_store"
20 | timestamp-offset-table = "akka_projection_timestamp_offset_store"
21 | management-table = "akka_projection_management"
22 | }
23 | ```
24 |
--------------------------------------------------------------------------------
/docs/src/main/paradox/migration.md:
--------------------------------------------------------------------------------
1 | # Migration Tool
2 |
3 | There is a migration tool that is useful if you would like to migrate from another Pekko Persistence plugin
4 | to the R2DBC plugin. It has been tested with Pekko Persistence JDBC as source plugin, but it should work with
5 | any plugin that has support for `CurrentPersistenceIdsQuery` and `CurrentEventsByPersistenceIdQuery`.
6 |
7 | The migration tool can be run while the source system is still active, and it can be run multiple times with
8 | idempotent result. Full rolling update when switching database or Persistence plugin is not supported, but
9 | you can migrate most of the data while the system is online and then have a short full shutdown while
10 | migrating the remaining data that was written after the previous online migration.
11 |
12 | ## Dependencies
13 |
14 | @@dependency [Maven,sbt,Gradle] {
15 | group=org.apache.pekko
16 | artifact=pekko-persistence-r2dbc-migration_$scala.binary.version$
17 | version=$project.version$
18 | }
19 |
20 | ## Progress table
21 |
22 | To speed up processing of subsequent runs it stores migrated persistence ids and sequence
23 | numbers in the table `migration_progress`. In a subsequent run it will only migrate new events and snapshots
24 | compared to what was stored in `migration_progress`. It will also find and migrate new persistence ids in a
25 | subsequent run. You can delete from `migration_progress` if you want to re-run the full migration.
26 |
27 | It's recommended that you create the `migration_progress` table before running the migration tool, but
28 | if it doesn't exist the tool will try to create the table.
29 |
30 | ```sql
31 | CREATE TABLE IF NOT EXISTS migration_progress(
32 | persistence_id VARCHAR(255) NOT NULL,
33 | event_seq_nr BIGINT,
34 | snapshot_seq_nr BIGINT,
35 | PRIMARY KEY(persistence_id)
36 | ```
37 |
38 | ## Configuration
39 |
40 | The migration tool can be run as main class `org.apache.pekko.persistence.r2dbc.migration.MigrationTool` provided by the above
41 | `pekko-persistence-r2dbc-migration` dependency.
42 |
43 | You need to provide configuration for the source persistence plugin and the target Rd2BC plugin in your `application.conf`. An example of such configuration for migration from Pekko Persistence JDBC:
44 |
45 | @@snip [docker-compose.yml](/migration/src/test/resources/application.conf)
46 |
47 | @@@ note
48 |
49 | Application specific serializers for events and snapshots must also be configured and included in classpath.
50 |
51 | @@@
52 |
53 | ### Reference configuration
54 |
55 | The following can be overridden in your `application.conf` for the migration tool specific settings:
56 |
57 | @@snip [reference.conf](/migration/src/main/resources/reference.conf)
58 |
--------------------------------------------------------------------------------
/docs/src/main/paradox/overview.md:
--------------------------------------------------------------------------------
1 | # Overview
2 |
3 | The Pekko Persistence R2DBC plugin allows for using SQL database with R2DBC as a backend for Pekko Persistence.
4 |
5 | Currently, the R2DBC plugin has support for [PostgreSQL](https://www.postgresql.org) and [Yugabyte](https://www.yugabyte.com).
6 | It is specifically designed to work well for distributed SQL databases.
7 |
8 | [Create an issue](https://github.com/apache/pekko-persistence-r2dbc/issues) if you would like to @ref[contribute](contributing.md)
9 | support for other databases that has a [R2DBC driver](https://r2dbc.io/drivers/).
10 |
11 | @@@ warning
12 |
13 | The project is currently under development and there are no guarantees for binary compatibility
14 | and the schema may change.
15 |
16 | @@@
17 |
18 | ## Project Info
19 |
20 | @@project-info{ projectId="core" }
21 |
22 | ## Dependencies
23 |
24 | @@dependency [Maven,sbt,Gradle] {
25 | group=org.apache.pekko
26 | artifact=pekko-persistence-r2dbc_$scala.binary.version$
27 | version=$project.version$
28 | }
29 |
30 | This plugin depends on Pekko $pekko.version$ or later, and note that it is important that all `pekko-*`
31 | dependencies are in the same version, so it is recommended to depend on them explicitly to avoid problems
32 | with transient dependencies causing an unlucky mix of versions.
33 |
34 | @@dependencies{ projectId="core" }
35 |
36 |
37 |
--------------------------------------------------------------------------------
/docs/src/main/paradox/release-notes/index.md:
--------------------------------------------------------------------------------
1 | # Release Notes
2 |
3 | @@toc { depth=1 }
4 |
5 | @@@ index
6 |
7 | * [1.1 Releases](releases-1.1.md)
8 | * [1.0 Releases](releases-1.0.md)
9 |
10 | @@@
11 |
--------------------------------------------------------------------------------
/docs/src/main/paradox/release-notes/releases-1.0.md:
--------------------------------------------------------------------------------
1 | # Release Notes (1.0.x)
2 |
3 | ## 1.0.0
4 |
5 | Apache Pekko Persistence R2DBC 1.0.0 is based on Akka Persistence R2DBC 0.7.7. Pekko came about as a result of Lightbend's
6 | decision to make future Akka releases under a [Business Software License](https://www.lightbend.com/blog/why-we-are-changing-the-license-for-akka),
7 | a license that is not compatible with Open Source usage.
8 |
9 | Apache Pekko has changed the package names, among other changes. Config names have changed to use `pekko` instead
10 | of `akka` in their names. Users switching from Akka to Pekko should read our [Migration Guide](https://pekko.apache.org/docs/pekko/1.0/project/migration-guides.html).
11 |
12 | Generally, we have tried to make it as easy as possible to switch existing Akka based projects over to using Pekko.
13 |
14 | We have gone through the code base and have tried to properly acknowledge all third party source code in the
15 | Apache Pekko code base. If anyone believes that there are any instances of third party source code that is not
16 | properly acknowledged, please get in touch.
17 |
18 | ### Bug Fixes
19 |
20 | We haven't had to fix any significant bugs that were in Akka Persistence R2DBC 0.7.7.
21 |
22 | ### Changes
23 |
24 | * Changed the table names in the DDL schemas to remove the akka/pekko prefixes ([PR71](https://github.com/apache/pekko-persistence-r2dbc/pull/71))
25 |
26 | ### Additions
27 |
28 | * Scala 3 support
29 | * the minimum required version is Scala 3.3.0
30 |
31 | ### Dependency Upgrades
32 | We have tried to limit the changes to third party dependencies that are used in Akka Persistence R2DBC 0.7.7. These are some exceptions:
33 |
34 | * some minor upgrades to r2dbc jars (all still 0.9.x)
35 | * scalatest 3.2.14. Pekko users who have existing tests based on Akka Testkit may need to migrate their tests due to the scalatest upgrade. The [scalatest 3.2 release notes](https://www.scalatest.org/release_notes/3.2.0) have a detailed description of the changes needed.
36 |
--------------------------------------------------------------------------------
/docs/src/main/paradox/release-notes/releases-1.1.md:
--------------------------------------------------------------------------------
1 | # Release Notes (1.1.x)
2 |
3 | Apache Pekko Persistence R2DBC 1.1.x releases support Java 8 and above.
4 |
5 | ## 1.1.0-M1
6 |
7 | Release notes for Apache Pekko Persistence R2DBC 1.1.0-M1. See [GitHub Milestone for 1.1.0-M1](https://github.com/apache/pekko-persistence-r2dbc/milestone/2?closed=1) for a fuller list of changes.
8 | As with all milestone releases, this release is not recommended for production use - it is designed to allow users to try out the changes in a test environment.
9 |
10 | ### Breaking Changes
11 |
12 | * The dependency on org.postgresql:r2dbc-postgresql is no longer added to our dependency pom.xml
13 | * Users need to add their own explicit dependency if they want to use Postgres (version 1.0.7.RELEASE recommended)
14 | * We now support Postgres and MySQL in pekko-persistence-r2dbc and pekko-projection-r2dbc
15 | * MySQL users will need to add their own explicit dependency on io.asyncer:r2dbc-mysql (version 1.3.0 recommended) ([PR175](https://github.com/apache/pekko-persistence-r2dbc/pull/175), [PR177](https://github.com/apache/pekko-persistence-r2dbc/pull/177))
16 | * change R2dbcExecutor functions that work with getRowsUpdated to return Future[Long] ([PR90](https://github.com/apache/pekko-persistence-r2dbc/pull/90))
17 | * Durable State: support revision in deletes ([PR92](https://github.com/apache/pekko-persistence-r2dbc/pull/92))
18 |
19 | ### Changes
20 |
21 | * Add ConnectionFactoryOptionsCustomizer ([PR171](https://github.com/apache/pekko-persistence-r2dbc/pull/171))
22 |
23 | ### Dependency Changes
24 |
25 | * upgrade io.r2dbc dependencies to 1.0.x
26 |
--------------------------------------------------------------------------------
/docs/src/main/paradox/snapshots.md:
--------------------------------------------------------------------------------
1 | # Snapshot store plugin
2 |
3 | The snapshot plugin enables storing and loading snapshots for @extref:[event sourced persistent actors](pekko:typed/persistence.html).
4 |
5 | ## Schema
6 |
7 | The `snapshot` table need to be created in the configured database, see schema definition in @ref:[Creating the schema](getting-started.md#schema).
8 |
9 | ## Configuration
10 |
11 | To enable the snapshot plugin to be used by default, add the following line to your Pekko `application.conf`:
12 |
13 | ```
14 | pekko.persistence.snapshot-store.plugin = "pekko.persistence.r2dbc.snapshot"
15 | ```
16 |
17 | It can also be enabled with the `snapshotPluginId` for a specific `EventSourcedBehavior` and multiple
18 | plugin configurations are supported.
19 |
20 | See also @ref:[Connection configuration](connection-config.md).
21 |
22 | ### Reference configuration
23 |
24 | The following can be overridden in your `application.conf` for the snapshot specific settings:
25 |
26 | @@snip [reference.conf](/core/src/main/resources/reference.conf) {#snapshot-settings}
27 |
28 | ## Usage
29 |
30 | The snapshot plugin is used whenever a snapshot write is triggered through the
31 | @extref:[Pekko Persistence APIs](pekko:typed/persistence-snapshot.html).
32 |
--------------------------------------------------------------------------------
/docs/src/test/resources/application-mysql.conf:
--------------------------------------------------------------------------------
1 | # SPDX-License-Identifier: Apache-2.0
2 |
3 | pekko.persistence.journal.plugin = "pekko.persistence.r2dbc.journal"
4 | pekko.persistence.snapshot-store.plugin = "pekko.persistence.r2dbc.snapshot"
5 | pekko.persistence.state.plugin = "pekko.persistence.r2dbc.state"
6 |
7 | // #connection-settings
8 | pekko.persistence.r2dbc {
9 | dialect = "mysql"
10 | connection-factory {
11 | driver = "mysql"
12 | host = "localhost"
13 | host = ${?DB_HOST}
14 | port = 3306
15 | database = "mysql"
16 | database = ${?DB_NAME}
17 | user = "root"
18 | user = ${?DB_USER}
19 | password = "root"
20 | password = ${?DB_PASSWORD}
21 |
22 | db-timestamp-monotonic-increasing = on
23 | use-app-timestamp = on
24 |
25 | # ssl {
26 | # enabled = on
27 | # mode = "VERIFY_CA"
28 | # root-cert = "/path/db_root.crt"
29 | # }
30 | }
31 | }
32 | // #connection-settings
33 |
--------------------------------------------------------------------------------
/docs/src/test/resources/application-postgres.conf:
--------------------------------------------------------------------------------
1 | # SPDX-License-Identifier: Apache-2.0
2 |
3 | pekko.persistence.journal.plugin = "pekko.persistence.r2dbc.journal"
4 | pekko.persistence.snapshot-store.plugin = "pekko.persistence.r2dbc.snapshot"
5 | pekko.persistence.state.plugin = "pekko.persistence.r2dbc.state"
6 |
7 | // #connection-settings
8 | pekko.persistence.r2dbc {
9 | dialect = "postgres"
10 | connection-factory {
11 | driver = "postgres"
12 | host = "localhost"
13 | host = ${?DB_HOST}
14 | database = "postgres"
15 | database = ${?DB_NAME}
16 | user = "postgres"
17 | user = ${?DB_USER}
18 | password = "postgres"
19 | password = ${?DB_PASSWORD}
20 |
21 | # ssl {
22 | # enabled = on
23 | # mode = "VERIFY_CA"
24 | # root-cert = "/path/db_root.crt"
25 | # }
26 | }
27 | }
28 | // #connection-settings
29 |
--------------------------------------------------------------------------------
/docs/src/test/resources/application-yugabyte.conf:
--------------------------------------------------------------------------------
1 | # SPDX-License-Identifier: Apache-2.0
2 |
3 | pekko.persistence.journal.plugin = "pekko.persistence.r2dbc.journal"
4 | pekko.persistence.snapshot-store.plugin = "pekko.persistence.r2dbc.snapshot"
5 | pekko.persistence.state.plugin = "pekko.persistence.r2dbc.state"
6 |
7 | // #connection-settings
8 | pekko.persistence.r2dbc {
9 | dialect = "yugabyte"
10 | connection-factory {
11 | driver = "postgres"
12 | host = "localhost"
13 | host = ${?DB_HOST}
14 | port = 5433
15 | database = "yugabyte"
16 | database = ${?DB_NAME}
17 | user = "yugabyte"
18 | user = ${?DB_USER}
19 | password = "yugabyte"
20 | password = ${?DB_PASSWORD}
21 |
22 | # ssl {
23 | # enabled = on
24 | # mode = "VERIFY_CA"
25 | # root-cert = "/path/db_root.crt"
26 | # }
27 | }
28 | }
29 | // #connection-settings
30 |
--------------------------------------------------------------------------------
/docs/src/test/scala/docs/home/CborSerializable.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Licensed to the Apache Software Foundation (ASF) under one or more
3 | * license agreements; and to You under the Apache License, version 2.0:
4 | *
5 | * https://www.apache.org/licenses/LICENSE-2.0
6 | *
7 | * This file is part of the Apache Pekko project, which was derived from Akka.
8 | */
9 |
10 | /*
11 | * Copyright (C) 2021 Lightbend Inc.
12 | */
13 |
14 | package docs.home
15 |
16 | trait CborSerializable
17 |
--------------------------------------------------------------------------------
/docs/src/test/scala/docs/home/query/QueryDocCompileOnly.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Licensed to the Apache Software Foundation (ASF) under one or more
3 | * license agreements; and to You under the Apache License, version 2.0:
4 | *
5 | * https://www.apache.org/licenses/LICENSE-2.0
6 | *
7 | * This file is part of the Apache Pekko project, which was derived from Akka.
8 | */
9 |
10 | package docs.home.query
11 |
12 | import org.apache.pekko
13 | import pekko.actor.typed.ActorSystem
14 | import pekko.persistence.query.NoOffset
15 | import pekko.persistence.typed.PersistenceId
16 | import pekko.stream.scaladsl.Sink
17 |
18 | object QueryDocCompileOnly {
19 | implicit val system: ActorSystem[_] = ???
20 | trait MyEvent
21 | trait MyState
22 |
23 | // #readJournalFor
24 | import org.apache.pekko
25 | import pekko.persistence.query.PersistenceQuery
26 | import pekko.persistence.r2dbc.query.scaladsl.R2dbcReadJournal
27 |
28 | val eventQueries = PersistenceQuery(system)
29 | .readJournalFor[R2dbcReadJournal](R2dbcReadJournal.Identifier)
30 | // #readJournalFor
31 |
32 | // #durableStateStoreFor
33 | import org.apache.pekko
34 | import pekko.persistence.state.DurableStateStoreRegistry
35 | import pekko.persistence.r2dbc.state.scaladsl.R2dbcDurableStateStore
36 |
37 | val stateQueries = DurableStateStoreRegistry(system)
38 | .durableStateStoreFor[R2dbcDurableStateStore[MyState]](R2dbcDurableStateStore.Identifier)
39 | // #durableStateStoreFor
40 |
41 | {
42 | // #currentEventsByPersistenceId
43 | val persistenceId = PersistenceId("MyEntity", "id1")
44 | eventQueries
45 | .currentEventsByPersistenceId(persistenceId.id, 1, 101)
46 | .map(envelope => s"event with seqNr ${envelope.sequenceNr}: ${envelope.event}")
47 | .runWith(Sink.foreach(println))
48 | // #currentEventsByPersistenceId
49 | }
50 |
51 | {
52 | // #currentEventsBySlices
53 | import org.apache.pekko.persistence.query.typed.EventEnvelope
54 |
55 | // Slit the slices into 4 ranges
56 | val numberOfSliceRanges: Int = 4
57 | val sliceRanges = eventQueries.sliceRanges(numberOfSliceRanges)
58 |
59 | // Example of using the first slice range
60 | val minSlice: Int = sliceRanges.head.min
61 | val maxSlice: Int = sliceRanges.head.max
62 | val entityType: String = "MyEntity"
63 | eventQueries
64 | .currentEventsBySlices[MyEvent](entityType, minSlice, maxSlice, NoOffset.getInstance)
65 | .map(envelope =>
66 | s"event from persistenceId ${envelope.persistenceId} with " +
67 | s"seqNr ${envelope.sequenceNr}: ${envelope.event}")
68 | .runWith(Sink.foreach(println))
69 | // #currentEventsBySlices
70 | }
71 |
72 | {
73 | // #currentChangesBySlices
74 | import org.apache.pekko.persistence.query.UpdatedDurableState
75 |
76 | // Slit the slices into 4 ranges
77 | val numberOfSliceRanges: Int = 4
78 | val sliceRanges = stateQueries.sliceRanges(numberOfSliceRanges)
79 |
80 | // Example of using the first slice range
81 | val minSlice: Int = sliceRanges.head.min
82 | val maxSlice: Int = sliceRanges.head.max
83 | val entityType: String = "MyEntity"
84 | stateQueries
85 | .currentChangesBySlices(entityType, minSlice, maxSlice, NoOffset.getInstance)
86 | .collect { case change: UpdatedDurableState[MyState] => change }
87 | .map(change =>
88 | s"state change from persistenceId ${change.persistenceId} with " +
89 | s"revision ${change.revision}: ${change.value}")
90 | .runWith(Sink.foreach(println))
91 | // #currentChangesBySlices
92 | }
93 | }
94 |
--------------------------------------------------------------------------------
/migration/src/main/resources/reference.conf:
--------------------------------------------------------------------------------
1 | # SPDX-License-Identifier: Apache-2.0
2 |
3 | pekko.persistence.r2dbc.migration {
4 |
5 | # Pekko Persistence plugin to migrate from.
6 | # You must also define plugin specific configuration
7 | # and application specific serializers for events and snapshots.
8 | source {
9 | query-plugin-id = "jdbc-read-journal"
10 | snapshot-plugin-id = "jdbc-snapshot-store"
11 | }
12 |
13 | # R2DBC Pekko Persistence plugin to migrate to.
14 | # You must also define pekko-persistence-r2dbc specific configuration.
15 | target {
16 | # this must be a configuration path of pekko-persistence-r2dbc
17 | persistence-plugin-id = "pekko.persistence.r2dbc"
18 |
19 | # Events are stored in batches of this size.
20 | batch = 10
21 | }
22 |
23 | # How many persistence ids to migrate concurrently.
24 | parallelism = 10
25 |
26 | }
27 |
--------------------------------------------------------------------------------
/migration/src/main/scala/org/apache/pekko/persistence/r2dbc/migration/MigrationToolDao.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Licensed to the Apache Software Foundation (ASF) under one or more
3 | * license agreements; and to You under the Apache License, version 2.0:
4 | *
5 | * https://www.apache.org/licenses/LICENSE-2.0
6 | *
7 | * This file is part of the Apache Pekko project, which was derived from Akka.
8 | */
9 |
10 | /*
11 | * Copyright (C) 2021 Lightbend Inc.
12 | */
13 |
14 | package org.apache.pekko.persistence.r2dbc.migration
15 |
16 | import scala.concurrent.ExecutionContext
17 | import scala.concurrent.Future
18 | import scala.concurrent.duration.FiniteDuration
19 |
20 | import org.apache.pekko
21 | import pekko.Done
22 | import pekko.actor.typed.ActorSystem
23 | import pekko.annotation.InternalApi
24 | import pekko.dispatch.ExecutionContexts
25 | import pekko.persistence.r2dbc.internal.Sql.Interpolation
26 | import pekko.persistence.r2dbc.internal.R2dbcExecutor
27 | import pekko.persistence.r2dbc.journal.JournalDao.log
28 | import io.r2dbc.spi.ConnectionFactory
29 |
30 | /**
31 | * INTERNAL API
32 | */
33 | @InternalApi private[r2dbc] object MigrationToolDao {
34 | final case class CurrentProgress(persistenceId: String, eventSeqNr: Long, snapshotSeqNr: Long)
35 | }
36 |
37 | /**
38 | * INTERNAL API
39 | */
40 | @InternalApi private[r2dbc] class MigrationToolDao(
41 | connectionFactory: ConnectionFactory,
42 | logDbCallsExceeding: FiniteDuration)(implicit ec: ExecutionContext, system: ActorSystem[_]) {
43 | import MigrationToolDao.CurrentProgress
44 |
45 | private val r2dbcExecutor = new R2dbcExecutor(connectionFactory, log, logDbCallsExceeding)(ec, system)
46 |
47 | def createProgressTable(): Future[Done] = {
48 | r2dbcExecutor.executeDdl("create migration progress table") { connection =>
49 | connection.createStatement(sql"""
50 | CREATE TABLE IF NOT EXISTS migration_progress(
51 | persistence_id VARCHAR(255) NOT NULL,
52 | event_seq_nr BIGINT,
53 | snapshot_seq_nr BIGINT,
54 | PRIMARY KEY(persistence_id)
55 | )""")
56 | }
57 | }
58 |
59 | def updateEventProgress(persistenceId: String, seqNr: Long): Future[Done] = {
60 | r2dbcExecutor
61 | .updateOne(s"upsert migration progress [$persistenceId]") { connection =>
62 | connection
63 | .createStatement(sql"""
64 | INSERT INTO migration_progress
65 | (persistence_id, event_seq_nr)
66 | VALUES (?, ?)
67 | ON CONFLICT (persistence_id)
68 | DO UPDATE SET
69 | event_seq_nr = excluded.event_seq_nr""")
70 | .bind(0, persistenceId)
71 | .bind(1, seqNr)
72 | }
73 | .map(_ => Done)(ExecutionContexts.parasitic)
74 | }
75 |
76 | def updateSnapshotProgress(persistenceId: String, seqNr: Long): Future[Done] = {
77 | r2dbcExecutor
78 | .updateOne(s"upsert migration progress [$persistenceId]") { connection =>
79 | connection
80 | .createStatement(sql"""
81 | INSERT INTO migration_progress
82 | (persistence_id, snapshot_seq_nr)
83 | VALUES (?, ?)
84 | ON CONFLICT (persistence_id)
85 | DO UPDATE SET
86 | snapshot_seq_nr = excluded.snapshot_seq_nr""")
87 | .bind(0, persistenceId)
88 | .bind(1, seqNr)
89 | }
90 | .map(_ => Done)(ExecutionContexts.parasitic)
91 | }
92 |
93 | def currentProgress(persistenceId: String): Future[Option[CurrentProgress]] = {
94 | r2dbcExecutor.selectOne(s"read migration progress [$persistenceId]")(
95 | _.createStatement(sql"SELECT * FROM migration_progress WHERE persistence_id = ?")
96 | .bind(0, persistenceId),
97 | row =>
98 | CurrentProgress(
99 | persistenceId,
100 | eventSeqNr = zeroIfNull(row.get("event_seq_nr", classOf[java.lang.Long])),
101 | snapshotSeqNr = zeroIfNull(row.get("snapshot_seq_nr", classOf[java.lang.Long]))))
102 | }
103 |
104 | private def zeroIfNull(n: java.lang.Long): Long =
105 | if (n eq null) 0L else n
106 |
107 | }
108 |
--------------------------------------------------------------------------------
/migration/src/test/resources/application.conf:
--------------------------------------------------------------------------------
1 | # SPDX-License-Identifier: Apache-2.0
2 |
3 | pekko.persistence.r2dbc.migration {
4 | source {
5 | query-plugin-id = "jdbc-read-journal"
6 | snapshot-plugin-id = "jdbc-snapshot-store"
7 | }
8 | }
9 |
10 | pekko.persistence.r2dbc {
11 | # use different table names or schema
12 | journal.table = "event_journal2"
13 | snapshot.table = "snapshot2"
14 | state.table = "durable_state2"
15 | }
16 |
17 | pekko.persistence.r2dbc.connection-factory {
18 | driver = "postgres"
19 | host = "localhost"
20 | port = 5432
21 | user = "postgres"
22 | password = "postgres"
23 | database = "postgres"
24 | }
25 |
26 | pekko-persistence-jdbc {
27 | shared-databases {
28 | default {
29 | profile = "slick.jdbc.PostgresProfile$"
30 | db {
31 | host = "localhost"
32 | url = "jdbc:postgresql://localhost:5432/postgres?reWriteBatchedInserts=true"
33 | user = postgres
34 | password = postgres
35 | driver = "org.postgresql.Driver"
36 | numThreads = 20
37 | maxConnections = 20
38 | minConnections = 5
39 | }
40 | }
41 | }
42 | }
43 |
44 | jdbc-journal {
45 | use-shared-db = "default"
46 | }
47 | jdbc-snapshot-store {
48 | use-shared-db = "default"
49 | }
50 | jdbc-read-journal {
51 | use-shared-db = "default"
52 | }
53 |
54 | # application specific serializers for events and snapshots
55 | # must also be configured and included in classpath
56 |
--------------------------------------------------------------------------------
/migration/src/test/resources/logback-main.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 | [%date{ISO8601}] [%level] [%logger] [%X{pekkoAddress}] [%marker] [%thread] - %msg%n
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
--------------------------------------------------------------------------------
/project/AutomaticModuleName.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Licensed to the Apache Software Foundation (ASF) under one or more
3 | * license agreements; and to You under the Apache License, version 2.0:
4 | *
5 | * https://www.apache.org/licenses/LICENSE-2.0
6 | *
7 | * This file is part of the Apache Pekko project, which was derived from Akka.
8 | */
9 |
10 | /**
11 | * Copyright (C) 2009-2018 Lightbend Inc.
12 | */
13 |
14 | import sbt.{ Def, _ }
15 | import sbt.Keys._
16 |
17 | /**
18 | * Helper to set Automatic-Module-Name in projects.
19 | *
20 | * !! DO NOT BE TEMPTED INTO AUTOMATICALLY DERIVING THE NAMES FROM PROJECT NAMES !!
21 | *
22 | * The names carry a lot of implications and DO NOT have to always align 1:1 with the group ids or package names, though
23 | * there should be of course a strong relationship between them.
24 | */
25 | object AutomaticModuleName {
26 | private val AutomaticModuleName = "Automatic-Module-Name"
27 |
28 | def settings(name: String): Seq[Def.Setting[Task[Seq[PackageOption]]]] = Seq(
29 | Compile / packageBin / packageOptions += Package.ManifestAttributes(AutomaticModuleName -> name))
30 | }
31 |
--------------------------------------------------------------------------------
/project/CommonSettings.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Licensed to the Apache Software Foundation (ASF) under one or more
3 | * license agreements; and to You under the Apache License, version 2.0:
4 | *
5 | * https://www.apache.org/licenses/LICENSE-2.0
6 | *
7 | * This file is part of the Apache Pekko project, which was derived from Akka.
8 | */
9 |
10 | import com.lightbend.paradox.projectinfo.ParadoxProjectInfoPlugin.autoImport.projectInfoVersion
11 | import org.mdedetrich.apache.sonatype.ApacheSonatypePlugin
12 | import sbt.Keys._
13 | import sbt.{ AutoPlugin, Compile, CrossVersion, Global, Test, TestFrameworks, Tests }
14 | import sbt.plugins.JvmPlugin
15 | import sbtdynver.DynVerPlugin
16 | import sbtdynver.DynVerPlugin.autoImport.dynverSonatypeSnapshots
17 |
18 | object CommonSettings extends AutoPlugin {
19 | override def trigger = allRequirements
20 |
21 | override def requires = JvmPlugin && ApacheSonatypePlugin && DynVerPlugin
22 |
23 | override lazy val projectSettings = Seq(
24 | crossScalaVersions := Seq(Dependencies.Scala212, Dependencies.Scala213, Dependencies.Scala3),
25 | scalaVersion := Dependencies.Scala213,
26 | crossVersion := CrossVersion.binary,
27 | // Setting javac options in common allows IntelliJ IDEA to import them automatically
28 | Compile / javacOptions ++= Seq("-encoding", "UTF-8", "-source", "1.8", "-target", "1.8"),
29 | Test / logBuffered := false,
30 | Test / parallelExecution := false,
31 | // show full stack traces and test case durations
32 | Test / testOptions += Tests.Argument("-oDF"),
33 | // -v Log "test run started" / "test started" / "test run finished" events on log level "info" instead of "debug".
34 | // -a Show stack traces and exception class name for AssertionErrors.
35 | testOptions += Tests.Argument(TestFrameworks.JUnit, "-v", "-a"),
36 | Test / fork := true, // some non-heap memory is leaking
37 | Test / javaOptions ++= {
38 | import scala.collection.JavaConverters._
39 | // include all passed -Dpekko. properties to the javaOptions for forked tests
40 | // useful to switch DB dialects for example
41 | val pekkoProperties = System.getProperties.stringPropertyNames.asScala.toList.collect {
42 | case key: String if key.startsWith("pekko.") => "-D" + key + "=" + System.getProperty(key)
43 | }
44 | "-Xms1G" :: "-Xmx1G" :: "-XX:MaxDirectMemorySize=256M" :: pekkoProperties
45 | },
46 | projectInfoVersion := (if (isSnapshot.value) "snapshot" else version.value))
47 |
48 | override lazy val globalSettings = Seq(
49 | Global / excludeLintKeys += projectInfoVersion)
50 |
51 | override lazy val buildSettings = Seq(
52 | dynverSonatypeSnapshots := true)
53 | }
54 |
--------------------------------------------------------------------------------
/project/CopyrightHeader.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Licensed to the Apache Software Foundation (ASF) under one or more
3 | * license agreements; and to You under the Apache License, version 2.0:
4 | *
5 | * https://www.apache.org/licenses/LICENSE-2.0
6 | *
7 | * This file is part of the Apache Pekko project, which was derived from Akka.
8 | */
9 |
10 | import de.heikoseeberger.sbtheader.HeaderPlugin.autoImport._
11 | import de.heikoseeberger.sbtheader.{ CommentCreator, HeaderPlugin, NewLine }
12 | import org.apache.commons.lang3.StringUtils
13 | import sbt.Keys._
14 | import sbt._
15 |
16 | trait CopyrightHeader extends AutoPlugin {
17 |
18 | override def requires: Plugins = HeaderPlugin
19 |
20 | override def trigger: PluginTrigger = allRequirements
21 |
22 | protected def headerMappingSettings: Seq[Def.Setting[_]] =
23 | Seq(Compile, Test).flatMap { config =>
24 | inConfig(config)(
25 | Seq(
26 | headerLicense := Some(HeaderLicense.Custom(apacheHeader)),
27 | headerMappings := headerMappings.value ++ Map(
28 | HeaderFileType.scala -> cStyleComment,
29 | HeaderFileType.java -> cStyleComment,
30 | HeaderFileType.conf -> hashLineComment,
31 | HeaderFileType("template") -> cStyleComment)))
32 | }
33 |
34 | override def projectSettings: Seq[Def.Setting[_]] =
35 | Def.settings(headerMappingSettings, additional)
36 |
37 | def additional: Seq[Def.Setting[_]] =
38 | Def.settings(Compile / compile := {
39 | (Compile / headerCreate).value
40 | (Compile / compile).value
41 | },
42 | Test / compile := {
43 | (Test / headerCreate).value
44 | (Test / compile).value
45 | })
46 |
47 | def apacheHeader: String =
48 | """Licensed to the Apache Software Foundation (ASF) under one or more
49 | |license agreements; and to You under the Apache License, version 2.0:
50 | |
51 | | https://www.apache.org/licenses/LICENSE-2.0
52 | |
53 | |This file is part of the Apache Pekko project, which was derived from Akka.
54 | |""".stripMargin
55 |
56 | val apacheSpdxHeader: String = "SPDX-License-Identifier: Apache-2.0"
57 |
58 | val cStyleComment = HeaderCommentStyle.cStyleBlockComment.copy(commentCreator = new CommentCreator() {
59 |
60 | override def apply(text: String, existingText: Option[String]): String = {
61 | val formatted = existingText match {
62 | case Some(currentText) if isValidCopyrightAnnotated(currentText) =>
63 | currentText
64 | case Some(currentText) if isLightbendCopyrighted(currentText) =>
65 | HeaderCommentStyle.cStyleBlockComment.commentCreator(text, existingText) + NewLine * 2 + currentText
66 | case Some(currentText) =>
67 | throw new IllegalStateException(s"Unable to detect copyright for header:[$currentText]")
68 | case None =>
69 | HeaderCommentStyle.cStyleBlockComment.commentCreator(text, existingText)
70 | }
71 | formatted.trim
72 | }
73 | })
74 |
75 | val hashLineComment = HeaderCommentStyle.hashLineComment.copy(commentCreator = new CommentCreator() {
76 |
77 | // deliberately hardcode use of apacheSpdxHeader and ignore input text
78 | override def apply(text: String, existingText: Option[String]): String = {
79 | val formatted = existingText match {
80 | case Some(currentText) if isApacheCopyrighted(currentText) =>
81 | currentText
82 | case Some(currentText) =>
83 | HeaderCommentStyle.hashLineComment.commentCreator(apacheSpdxHeader, existingText) + NewLine * 2 + currentText
84 | case None =>
85 | HeaderCommentStyle.hashLineComment.commentCreator(apacheSpdxHeader, existingText)
86 | }
87 | formatted.trim
88 | }
89 | })
90 |
91 | private def isApacheCopyrighted(text: String): Boolean =
92 | StringUtils.containsIgnoreCase(text, "licensed to the apache software foundation (asf)") ||
93 | StringUtils.containsIgnoreCase(text, "www.apache.org/licenses/license-2.0") ||
94 | StringUtils.contains(text, "Apache-2.0")
95 |
96 | private def isLightbendCopyrighted(text: String): Boolean =
97 | StringUtils.containsIgnoreCase(text, "lightbend inc.")
98 |
99 | private def isValidCopyrightAnnotated(text: String): Boolean = {
100 | isApacheCopyrighted(text)
101 | }
102 | }
103 |
104 | object CopyrightHeader extends CopyrightHeader
105 |
--------------------------------------------------------------------------------
/project/LicenseReport.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Licensed to the Apache Software Foundation (ASF) under one or more
3 | * contributor license agreements. See the NOTICE file distributed with
4 | * this work for additional information regarding copyright ownership.
5 | * The ASF licenses this file to You under the Apache License, Version 2.0
6 | * (the "License"); you may not use this file except in compliance with
7 | * the License. You may obtain a copy of the License at
8 | *
9 | * http://www.apache.org/licenses/LICENSE-2.0
10 | *
11 | * Unless required by applicable law or agreed to in writing, software
12 | * distributed under the License is distributed on an "AS IS" BASIS,
13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 | * See the License for the specific language governing permissions and
15 | * limitations under the License.
16 | */
17 |
18 | import sbt._
19 | import sbtlicensereport.SbtLicenseReport
20 | import sbtlicensereport.SbtLicenseReport.autoImportImpl._
21 | import sbtlicensereport.license.{ DepModuleInfo, MarkDown }
22 |
23 | object LicenseReport extends AutoPlugin {
24 |
25 | override lazy val projectSettings = Seq(
26 | licenseReportTypes := Seq(MarkDown),
27 | licenseReportMakeHeader := (language => language.header1("License Report")),
28 | licenseConfigurations := {
29 | Set("compile", "test", "provided")
30 | },
31 | licenseDepExclusions := {
32 | case DepModuleInfo("org.apache.pekko", _, _) => true // Inter pekko project dependencies are pointless
33 | case DepModuleInfo(_, "scala-library", _) => true // Scala library is part of Scala language
34 | },
35 | licenseReportColumns := Seq(
36 | Column.Category,
37 | Column.License,
38 | Column.Dependency,
39 | Column.OriginatingArtifactName,
40 | Column.Configuration))
41 |
42 | override def requires = plugins.JvmPlugin && SbtLicenseReport
43 |
44 | override def trigger = allRequirements
45 |
46 | }
47 |
--------------------------------------------------------------------------------
/project/MetaInfLicenseNoticeCopy.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Licensed to the Apache Software Foundation (ASF) under one or more
3 | * license agreements; and to You under the Apache License, version 2.0:
4 | *
5 | * https://www.apache.org/licenses/LICENSE-2.0
6 | *
7 | * This file is part of the Apache Pekko project, which was derived from Akka.
8 | */
9 |
10 | import sbt.Keys._
11 | import sbt._
12 | import org.mdedetrich.apache.sonatype.ApacheSonatypePlugin
13 | import org.mdedetrich.apache.sonatype.ApacheSonatypePlugin.autoImport._
14 |
15 | /**
16 | * Copies LICENSE and NOTICE files into jar META-INF dir
17 | */
18 | object MetaInfLicenseNoticeCopy extends AutoPlugin {
19 |
20 | private lazy val baseDir = LocalRootProject / baseDirectory
21 |
22 | override def trigger = allRequirements
23 |
24 | override def requires = ApacheSonatypePlugin
25 | }
26 |
--------------------------------------------------------------------------------
/project/PekkoCoreDependency.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Licensed to the Apache Software Foundation (ASF) under one or more
3 | * contributor license agreements. See the NOTICE file distributed with
4 | * this work for additional information regarding copyright ownership.
5 | * The ASF licenses this file to You under the Apache License, Version 2.0
6 | * (the "License"); you may not use this file except in compliance with
7 | * the License. You may obtain a copy of the License at
8 | *
9 | * http://www.apache.org/licenses/LICENSE-2.0
10 | *
11 | * Unless required by applicable law or agreed to in writing, software
12 | * distributed under the License is distributed on an "AS IS" BASIS,
13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 | * See the License for the specific language governing permissions and
15 | * limitations under the License.
16 | */
17 |
18 | import com.github.pjfanning.pekkobuild.PekkoDependency
19 |
20 | object PekkoCoreDependency extends PekkoDependency {
21 | override val checkProject: String = "pekko-cluster-sharding-typed"
22 | override val module: Option[String] = None
23 | override val currentVersion: String = "1.1.3"
24 | }
25 |
--------------------------------------------------------------------------------
/project/PekkoPersistenceJdbcDependency.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Licensed to the Apache Software Foundation (ASF) under one or more
3 | * contributor license agreements. See the NOTICE file distributed with
4 | * this work for additional information regarding copyright ownership.
5 | * The ASF licenses this file to You under the Apache License, Version 2.0
6 | * (the "License"); you may not use this file except in compliance with
7 | * the License. You may obtain a copy of the License at
8 | *
9 | * http://www.apache.org/licenses/LICENSE-2.0
10 | *
11 | * Unless required by applicable law or agreed to in writing, software
12 | * distributed under the License is distributed on an "AS IS" BASIS,
13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 | * See the License for the specific language governing permissions and
15 | * limitations under the License.
16 | */
17 |
18 | import com.github.pjfanning.pekkobuild.PekkoDependency
19 |
20 | object PekkoPersistenceJdbcDependency extends PekkoDependency {
21 | override val checkProject: String = "pekko-persistence-jdbc"
22 | override val module: Option[String] = Some("persistence.jdbc")
23 | override val currentVersion: String = "1.1.0"
24 | }
25 |
--------------------------------------------------------------------------------
/project/PekkoProjectionDependency.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Licensed to the Apache Software Foundation (ASF) under one or more
3 | * contributor license agreements. See the NOTICE file distributed with
4 | * this work for additional information regarding copyright ownership.
5 | * The ASF licenses this file to You under the Apache License, Version 2.0
6 | * (the "License"); you may not use this file except in compliance with
7 | * the License. You may obtain a copy of the License at
8 | *
9 | * http://www.apache.org/licenses/LICENSE-2.0
10 | *
11 | * Unless required by applicable law or agreed to in writing, software
12 | * distributed under the License is distributed on an "AS IS" BASIS,
13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 | * See the License for the specific language governing permissions and
15 | * limitations under the License.
16 | */
17 |
18 | import com.github.pjfanning.pekkobuild.PekkoDependency
19 |
20 | object PekkoProjectionDependency extends PekkoDependency {
21 | override val checkProject: String = "pekko-projection-jdbc"
22 | override val module: Option[String] = Some("projection")
23 | override val currentVersion: String = "1.1.0"
24 | }
25 |
--------------------------------------------------------------------------------
/project/build.properties:
--------------------------------------------------------------------------------
1 | sbt.version=1.11.2
2 |
3 |
--------------------------------------------------------------------------------
/project/plugins.sbt:
--------------------------------------------------------------------------------
1 | /*
2 | * Licensed to the Apache Software Foundation (ASF) under one or more
3 | * license agreements; and to You under the Apache License, version 2.0:
4 | *
5 | * https://www.apache.org/licenses/LICENSE-2.0
6 | *
7 | * This file is part of the Apache Pekko project, which was derived from Akka.
8 | */
9 |
10 | addSbtPlugin("de.heikoseeberger" % "sbt-header" % "5.10.0") // for maintenance of copyright file header
11 | addSbtPlugin("org.scalameta" % "sbt-scalafmt" % "2.5.4")
12 | addSbtPlugin("com.github.sbt" % "sbt-java-formatter" % "0.9.0")
13 | addSbtPlugin("com.github.sbt" % "sbt-unidoc" % "0.5.0")
14 | addSbtPlugin("com.typesafe" % "sbt-mima-plugin" % "1.1.4")
15 |
16 | // for releasing
17 | addSbtPlugin("com.github.sbt" % "sbt-dynver" % "5.1.0")
18 | addSbtPlugin("net.bzzt" % "sbt-reproducible-builds" % "0.32")
19 | addSbtPlugin("com.github.pjfanning" % "sbt-pekko-build" % "0.4.2")
20 | addSbtPlugin("com.github.pjfanning" % "sbt-source-dist" % "0.1.12")
21 | addSbtPlugin("com.github.sbt" % "sbt-license-report" % "1.7.0")
22 |
23 | //// docs
24 | addSbtPlugin("org.apache.pekko" % "pekko-sbt-paradox" % "1.0.1")
25 | addSbtPlugin(("com.github.sbt" % "sbt-site-paradox" % "1.7.0").excludeAll(
26 | "com.lightbend.paradox", "sbt-paradox"))
27 |
--------------------------------------------------------------------------------
/project/project-info.conf:
--------------------------------------------------------------------------------
1 | project-info {
2 | version: "current"
3 | shared-info {
4 | jdk-versions: ["OpenJDK 8", "OpenJDK 11", "OpenJDK 17", "OpenJDK 21"]
5 | issues: {
6 | url: "https://github.com/apache/pekko-persistence-r2dbc/issues"
7 | text: "Github issues"
8 | }
9 | release-notes: {
10 | url: "https://github.com/apache/pekko-persistence-r2dbc/releases"
11 | text: "Github releases"
12 | }
13 | forums: [
14 | {
15 | text: "Apache Pekko User mailing list"
16 | url: "https://lists.apache.org/list.html?users@pekko.apache.org"
17 | }
18 | {
19 | text: "Apache Pekko Dev mailing list"
20 | url: "https://lists.apache.org/list.html?dev@pekko.apache.org"
21 | }
22 | {
23 | text: "Github Discussions"
24 | url: "https://github.com/apache/pekko-persistence-r2dbc/discussions"
25 | }
26 | ]
27 | }
28 | core: ${project-info.shared-info} {
29 | title: "Pekko Persistence R2DBC"
30 | jpms-name: "pekko.persistence.r2dbc"
31 | }
32 | projection: ${project-info.shared-info} {
33 | title: "Pekko Projections with R2DBC"
34 | jpms-name: "pekko.projection.r2dbc"
35 | }
36 | }
37 |
--------------------------------------------------------------------------------
/projection/src/main/mima-filters/1.0.x.backwards.excludes/r2dbcsession.excludes:
--------------------------------------------------------------------------------
1 | # Licensed to the Apache Software Foundation (ASF) under one
2 | # or more contributor license agreements. See the NOTICE file
3 | # distributed with this work for additional information
4 | # regarding copyright ownership. The ASF licenses this file
5 | # to you under the Apache License, Version 2.0 (the
6 | # "License"); you may not use this file except in compliance
7 | # with the License. You may obtain a copy of the License at
8 | #
9 | # http://www.apache.org/licenses/LICENSE-2.0
10 | #
11 | # Unless required by applicable law or agreed to in writing,
12 | # software distributed under the License is distributed on an
13 | # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14 | # KIND, either express or implied. See the License for the
15 | # specific language governing permissions and limitations
16 | # under the License.
17 |
18 | # Caused by https://github.com/apache/pekko-persistence-r2dbc/pull/101
19 | ProblemFilters.exclude[IncompatibleSignatureProblem]("org.apache.pekko.projection.r2dbc.javadsl.R2dbcSession.updateOne")
20 | ProblemFilters.exclude[IncompatibleSignatureProblem]("org.apache.pekko.projection.r2dbc.javadsl.R2dbcSession.update")
21 |
--------------------------------------------------------------------------------
/projection/src/main/resources/reference.conf:
--------------------------------------------------------------------------------
1 | # SPDX-License-Identifier: Apache-2.0
2 |
3 | # This defines the default configuration for pekko-projection-r2dbc.
4 | # Make your edits/overrides in your application.conf.
5 |
6 | //#projection-config
7 | pekko.projection.r2dbc {
8 | # postgres, yugabyte or mysql
9 | dialect = ${pekko.persistence.r2dbc.dialect}
10 |
11 | offset-store {
12 | # set this to your database schema if applicable, empty by default
13 | schema = ""
14 | # the database table name for the offset store,
15 | # can be set to "" if only timestamp offsets are used and table is not created
16 | offset-table = "projection_offset_store"
17 |
18 | # the database table name for the offset store
19 | timestamp-offset-table = "projection_timestamp_offset_store"
20 |
21 | # the database table name for the projection manangement data
22 | management-table = "projection_management"
23 |
24 | # The offset store will keep track of persistence ids and sequence numbers
25 | # within this time window from latest offset.
26 | time-window = 5 minutes
27 |
28 | # Keep this number of entries. Don't evict old entries until this threshold
29 | # has been reached.
30 | keep-number-of-entries = 10000
31 |
32 | # Remove old entries outside the time-window from the offset store memory
33 | # with this frequency.
34 | evict-interval = 10 seconds
35 |
36 | # Remove old entries outside the time-window from the offset store database
37 | # with this frequency.
38 | delete-interval = 1 minute
39 | }
40 |
41 | # By default it shares connection-factory with pekko-persistence-r2dbc (write side),
42 | # i.e. same connection pool. To use a separate pool for projections this can be
43 | # set to another config path that defines the same kind of config as
44 | # pekko.persistence.r2dbc.connection-factory.
45 | use-connection-factory = "pekko.persistence.r2dbc.connection-factory"
46 |
47 | # Logs database calls that take longer than this duration at INFO level.
48 | # Set to "off" to disable this logging.
49 | # Set to 0 to log all calls.
50 | log-db-calls-exceeding = 300 ms
51 | }
52 | //#projection-config
53 |
--------------------------------------------------------------------------------
/projection/src/main/scala/org/apache/pekko/projection/r2dbc/internal/BySliceSourceProviderAdapter.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Licensed to the Apache Software Foundation (ASF) under one or more
3 | * license agreements; and to You under the Apache License, version 2.0:
4 | *
5 | * https://www.apache.org/licenses/LICENSE-2.0
6 | *
7 | * This file is part of the Apache Pekko project, which was derived from Akka.
8 | */
9 |
10 | /*
11 | * Copyright (C) 2021 Lightbend Inc.
12 | */
13 |
14 | package org.apache.pekko.projection.r2dbc.internal
15 |
16 | import java.time.Instant
17 | import java.util.Optional
18 | import java.util.concurrent.CompletionStage
19 | import java.util.function.Supplier
20 |
21 | import scala.concurrent.Future
22 |
23 | import org.apache.pekko
24 | import pekko.NotUsed
25 | import pekko.annotation.InternalApi
26 | import pekko.projection.javadsl
27 | import pekko.projection.scaladsl
28 | import pekko.dispatch.ExecutionContexts
29 | import pekko.stream.scaladsl.Source
30 | import pekko.util.ccompat.JavaConverters._
31 | import pekko.util.FutureConverters._
32 | import pekko.util.OptionConverters._
33 | import scala.concurrent.ExecutionContext
34 |
35 | import pekko.persistence.query.typed.EventEnvelope
36 | import pekko.persistence.query.typed.scaladsl.EventTimestampQuery
37 | import pekko.persistence.query.typed.scaladsl.LoadEventQuery
38 | import pekko.projection.BySlicesSourceProvider
39 |
40 | /**
41 | * INTERNAL API: Adapter from javadsl.SourceProvider to scaladsl.SourceProvider
42 | */
43 | @InternalApi private[projection] class BySliceSourceProviderAdapter[Offset, Envelope](
44 | delegate: javadsl.SourceProvider[Offset, Envelope])
45 | extends scaladsl.SourceProvider[Offset, Envelope]
46 | with BySlicesSourceProvider
47 | with EventTimestampQuery
48 | with LoadEventQuery {
49 |
50 | def source(offset: () => Future[Option[Offset]]): Future[Source[Envelope, NotUsed]] = {
51 | // the parasitic context is used to convert the Optional to Option and a java streams Source to a scala Source,
52 | // it _should_ not be used for the blocking operation of getting offsets themselves
53 | val ec = pekko.dispatch.ExecutionContexts.parasitic
54 | val offsetAdapter = new Supplier[CompletionStage[Optional[Offset]]] {
55 | override def get(): CompletionStage[Optional[Offset]] = offset().map(_.toJava)(ec).asJava
56 | }
57 | delegate.source(offsetAdapter).asScala.map(_.asScala)(ec)
58 | }
59 |
60 | def extractOffset(envelope: Envelope): Offset = delegate.extractOffset(envelope)
61 |
62 | def extractCreationTime(envelope: Envelope): Long = delegate.extractCreationTime(envelope)
63 |
64 | override def minSlice: Int =
65 | delegate.asInstanceOf[BySlicesSourceProvider].minSlice
66 |
67 | override def maxSlice: Int =
68 | delegate.asInstanceOf[BySlicesSourceProvider].maxSlice
69 |
70 | override def timestampOf(persistenceId: String, sequenceNr: Long): Future[Option[Instant]] =
71 | delegate match {
72 | case timestampQuery: pekko.persistence.query.typed.javadsl.EventTimestampQuery =>
73 | timestampQuery.timestampOf(persistenceId, sequenceNr).asScala.map(_.toScala)(ExecutionContexts.parasitic)
74 | case _ =>
75 | Future.failed(
76 | new IllegalArgumentException(
77 | s"Expected SourceProvider [${delegate.getClass.getName}] to implement " +
78 | "EventTimestampQuery when TimestampOffset is used."))
79 | }
80 |
81 | override def loadEnvelope[Event](persistenceId: String, sequenceNr: Long): Future[EventEnvelope[Event]] =
82 | delegate match {
83 | case timestampQuery: pekko.persistence.query.typed.javadsl.LoadEventQuery =>
84 | timestampQuery.loadEnvelope[Event](persistenceId, sequenceNr).asScala
85 | case _ =>
86 | Future.failed(
87 | new IllegalArgumentException(
88 | s"Expected SourceProvider [${delegate.getClass.getName}] to implement " +
89 | "EventTimestampQuery when LoadEventQuery is used."))
90 | }
91 | }
92 |
--------------------------------------------------------------------------------
/projection/src/main/scala/org/apache/pekko/projection/r2dbc/internal/R2dbcHandlerAdapter.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Licensed to the Apache Software Foundation (ASF) under one or more
3 | * license agreements; and to You under the Apache License, version 2.0:
4 | *
5 | * https://www.apache.org/licenses/LICENSE-2.0
6 | *
7 | * This file is part of the Apache Pekko project, which was derived from Akka.
8 | */
9 |
10 | /*
11 | * Copyright (C) 2021 Lightbend Inc.
12 | */
13 |
14 | package org.apache.pekko.projection.r2dbc.internal
15 |
16 | import scala.collection.immutable
17 | import scala.concurrent.Future
18 |
19 | import org.apache.pekko
20 | import pekko.Done
21 | import pekko.annotation.InternalApi
22 | import pekko.projection.r2dbc.javadsl
23 | import pekko.projection.r2dbc.javadsl.R2dbcSession
24 | import pekko.projection.r2dbc.scaladsl
25 | import pekko.util.ccompat.JavaConverters._
26 | import pekko.util.FutureConverters._
27 |
28 | /**
29 | * INTERNAL API: Adapter from javadsl.R2dbcHandler to scaladsl.R2dbcHandler
30 | */
31 | @InternalApi private[projection] class R2dbcHandlerAdapter[Envelope](delegate: javadsl.R2dbcHandler[Envelope])
32 | extends scaladsl.R2dbcHandler[Envelope] {
33 |
34 | override def process(session: scaladsl.R2dbcSession, envelope: Envelope): Future[Done] = {
35 | delegate.process(new R2dbcSession(session.connection)(session.ec, session.system), envelope).asScala
36 | }
37 |
38 | override def start(): Future[Done] =
39 | delegate.start().asScala
40 |
41 | override def stop(): Future[Done] =
42 | delegate.stop().asScala
43 |
44 | }
45 |
46 | /**
47 | * INTERNAL API: Adapter from `javadsl.R2dbcHandler[java.util.List[Envelope]]` to
48 | * `scaladsl.R2dbcHandler[immutable.Seq[Envelope]]`
49 | */
50 | @InternalApi private[projection] class R2dbcGroupedHandlerAdapter[Envelope](
51 | delegate: javadsl.R2dbcHandler[java.util.List[Envelope]])
52 | extends scaladsl.R2dbcHandler[immutable.Seq[Envelope]] {
53 |
54 | override def process(session: scaladsl.R2dbcSession, envelopes: immutable.Seq[Envelope]): Future[Done] = {
55 | delegate.process(new R2dbcSession(session.connection)(session.ec, session.system), envelopes.asJava).asScala
56 | }
57 |
58 | override def start(): Future[Done] =
59 | delegate.start().asScala
60 |
61 | override def stop(): Future[Done] =
62 | delegate.stop().asScala
63 |
64 | }
65 |
--------------------------------------------------------------------------------
/projection/src/main/scala/org/apache/pekko/projection/r2dbc/internal/mysql/MySQLR2dbcOffsetStore.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Licensed to the Apache Software Foundation (ASF) under one
3 | * or more contributor license agreements. See the NOTICE file
4 | * distributed with this work for additional information
5 | * regarding copyright ownership. The ASF licenses this file
6 | * to you under the Apache License, Version 2.0 (the
7 | * "License"); you may not use this file except in compliance
8 | * with the License. You may obtain a copy of the License at
9 | *
10 | * http://www.apache.org/licenses/LICENSE-2.0
11 | *
12 | * Unless required by applicable law or agreed to in writing,
13 | * software distributed under the License is distributed on an
14 | * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
15 | * KIND, either express or implied. See the License for the
16 | * specific language governing permissions and limitations
17 | * under the License.
18 | */
19 |
20 | package org.apache.pekko.projection.r2dbc.internal.mysql
21 |
22 | import java.time.Clock
23 |
24 | import org.apache.pekko
25 | import pekko.actor.typed.ActorSystem
26 | import pekko.annotation.InternalApi
27 | import pekko.persistence.r2dbc.internal.R2dbcExecutor
28 | import pekko.persistence.r2dbc.internal.Sql.DialectInterpolation
29 | import pekko.projection.BySlicesSourceProvider
30 | import pekko.projection.ProjectionId
31 | import pekko.projection.r2dbc.R2dbcProjectionSettings
32 | import pekko.projection.r2dbc.internal.R2dbcOffsetStore
33 |
34 | /**
35 | * INTERNAL API
36 | */
37 | @InternalApi
38 | private[projection] class MySQLR2dbcOffsetStore(
39 | projectionId: ProjectionId,
40 | sourceProvider: Option[BySlicesSourceProvider],
41 | system: ActorSystem[_],
42 | settings: R2dbcProjectionSettings,
43 | r2dbcExecutor: R2dbcExecutor,
44 | clock: Clock = Clock.systemUTC())
45 | extends R2dbcOffsetStore(projectionId, sourceProvider, system, settings, r2dbcExecutor, clock) {
46 |
47 | override lazy val timestampSql: String = "NOW(6)"
48 |
49 | override val upsertOffsetSql: String = sql"""
50 | INSERT INTO $offsetTable
51 | (projection_name, projection_key, current_offset, manifest, mergeable, last_updated)
52 | VALUES (?,?,?,?,?,?) AS excluded
53 | ON DUPLICATE KEY UPDATE
54 | current_offset = excluded.current_offset,
55 | manifest = excluded.manifest,
56 | mergeable = excluded.mergeable,
57 | last_updated = excluded.last_updated"""
58 |
59 | override val updateManagementStateSql: String = sql"""
60 | INSERT INTO $managementTable
61 | (projection_name, projection_key, paused, last_updated)
62 | VALUES (?,?,?,?) AS excluded
63 | ON DUPLICATE KEY UPDATE
64 | paused = excluded.paused,
65 | last_updated = excluded.last_updated"""
66 | }
67 |
--------------------------------------------------------------------------------
/projection/src/main/scala/org/apache/pekko/projection/r2dbc/javadsl/R2dbcHandler.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Licensed to the Apache Software Foundation (ASF) under one or more
3 | * license agreements; and to You under the Apache License, version 2.0:
4 | *
5 | * https://www.apache.org/licenses/LICENSE-2.0
6 | *
7 | * This file is part of the Apache Pekko project, which was derived from Akka.
8 | */
9 |
10 | /*
11 | * Copyright (C) 2021 Lightbend Inc.
12 | */
13 |
14 | package org.apache.pekko.projection.r2dbc.javadsl
15 |
16 | import java.util.concurrent.CompletableFuture
17 | import java.util.concurrent.CompletionStage
18 | import java.util.function.BiFunction
19 |
20 | import org.apache.pekko
21 | import pekko.Done
22 | import pekko.annotation.ApiMayChange
23 | import pekko.annotation.InternalApi
24 | import pekko.projection.javadsl.HandlerLifecycle
25 |
26 | object R2dbcHandler {
27 |
28 | /**
29 | * INTERNAL API
30 | */
31 | @InternalApi
32 | private class HandlerFunction[Envelope](handler: BiFunction[R2dbcSession, Envelope, CompletionStage[Done]])
33 | extends R2dbcHandler[Envelope] {
34 | override def process(session: R2dbcSession, envelope: Envelope): CompletionStage[Done] =
35 | handler.apply(session, envelope)
36 | }
37 |
38 | /**
39 | * Handler that can be defined from a simple function
40 | */
41 | def fromFunction[Envelope](
42 | handler: BiFunction[R2dbcSession, Envelope, CompletionStage[Done]]): R2dbcHandler[Envelope] =
43 | new HandlerFunction(handler)
44 |
45 | }
46 |
47 | /**
48 | * Implement this interface for the Envelope handler for R2DBC Projections.
49 | *
50 | * It can be stateful, with variables and mutable data structures. It is invoked by the `Projection` machinery one
51 | * envelope at a time and visibility guarantees between the invocations are handled automatically, i.e. no volatile or
52 | * other concurrency primitives are needed for managing the state.
53 | *
54 | * Supported error handling strategies for when processing an `Envelope` fails can be defined in configuration or using
55 | * the `withRecoveryStrategy` method of a `Projection` implementation.
56 | */
57 | @ApiMayChange
58 | abstract class R2dbcHandler[Envelope] extends HandlerLifecycle {
59 |
60 | /**
61 | * The `process` method is invoked for each `Envelope`. Each time a new `Connection` is passed with a new open
62 | * transaction. You can use `createStatement`, `update` and other methods provided by the [[R2dbcSession]]. The
63 | * results of several statements can be combined with `CompletionStage` composition (e.g. `thenCompose`). The
64 | * transaction will be automatically committed or rolled back when the returned `CompletionStage` is completed.
65 | *
66 | * One envelope is processed at a time. It will not be invoked with the next envelope until after this method returns
67 | * and the returned `CompletionStage` is completed.
68 | */
69 | @throws(classOf[Exception])
70 | def process(session: R2dbcSession, envelope: Envelope): CompletionStage[Done]
71 |
72 | /**
73 | * Invoked when the projection is starting, before first envelope is processed. Can be overridden to implement
74 | * initialization. It is also called when the `Projection` is restarted after a failure.
75 | */
76 | def start(): CompletionStage[Done] =
77 | CompletableFuture.completedFuture(Done)
78 |
79 | /**
80 | * Invoked when the projection has been stopped. Can be overridden to implement resource cleanup. It is also called
81 | * when the `Projection` is restarted after a failure.
82 | */
83 | def stop(): CompletionStage[Done] =
84 | CompletableFuture.completedFuture(Done)
85 |
86 | }
87 |
--------------------------------------------------------------------------------
/projection/src/main/scala/org/apache/pekko/projection/r2dbc/javadsl/R2dbcSession.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Licensed to the Apache Software Foundation (ASF) under one or more
3 | * license agreements; and to You under the Apache License, version 2.0:
4 | *
5 | * https://www.apache.org/licenses/LICENSE-2.0
6 | *
7 | * This file is part of the Apache Pekko project, which was derived from Akka.
8 | */
9 |
10 | /*
11 | * Copyright (C) 2021 Lightbend Inc.
12 | */
13 |
14 | package org.apache.pekko.projection.r2dbc.javadsl
15 |
16 | import java.util.Optional
17 | import java.util.concurrent.CompletionStage
18 |
19 | import scala.concurrent.ExecutionContext
20 |
21 | import org.apache.pekko
22 | import pekko.actor.typed.ActorSystem
23 | import pekko.annotation.ApiMayChange
24 | import pekko.dispatch.ExecutionContexts
25 | import pekko.persistence.r2dbc.internal.R2dbcExecutor
26 | import pekko.util.ccompat.JavaConverters._
27 | import pekko.util.FutureConverters._
28 | import pekko.util.OptionConverters._
29 | import io.r2dbc.spi.Connection
30 | import io.r2dbc.spi.Row
31 | import io.r2dbc.spi.Statement
32 |
33 | @ApiMayChange
34 | final class R2dbcSession(connection: Connection)(implicit ec: ExecutionContext, system: ActorSystem[_]) {
35 |
36 | def createStatement(sql: String): Statement =
37 | connection.createStatement(sql)
38 |
39 | def updateOne(statement: Statement): CompletionStage[java.lang.Long] =
40 | R2dbcExecutor.updateOneInTx(statement).map(java.lang.Long.valueOf)(ExecutionContexts.parasitic).asJava
41 |
42 | def update(statements: java.util.List[Statement]): CompletionStage[java.util.List[java.lang.Long]] =
43 | R2dbcExecutor.updateInTx(statements.asScala.toVector).map(results =>
44 | results.map(java.lang.Long.valueOf).asJava).asJava
45 |
46 | def selectOne[A](statement: Statement)(mapRow: Row => A): CompletionStage[Optional[A]] =
47 | R2dbcExecutor.selectOneInTx(statement, mapRow).map(_.toJava)(ExecutionContexts.parasitic).asJava
48 |
49 | def select[A](statement: Statement)(mapRow: Row => A): CompletionStage[java.util.List[A]] =
50 | R2dbcExecutor.selectInTx(statement, mapRow).map(_.asJava).asJava
51 |
52 | }
53 |
--------------------------------------------------------------------------------
/projection/src/main/scala/org/apache/pekko/projection/r2dbc/scaladsl/R2dbcHandler.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Licensed to the Apache Software Foundation (ASF) under one or more
3 | * license agreements; and to You under the Apache License, version 2.0:
4 | *
5 | * https://www.apache.org/licenses/LICENSE-2.0
6 | *
7 | * This file is part of the Apache Pekko project, which was derived from Akka.
8 | */
9 |
10 | /*
11 | * Copyright (C) 2021 Lightbend Inc.
12 | */
13 |
14 | package org.apache.pekko.projection.r2dbc.scaladsl
15 |
16 | import scala.concurrent.Future
17 |
18 | import org.apache.pekko
19 | import pekko.Done
20 | import pekko.annotation.ApiMayChange
21 | import pekko.projection.scaladsl.HandlerLifecycle
22 |
23 | /**
24 | * Implement this interface for the Envelope handler for R2DBC Projections.
25 | *
26 | * It can be stateful, with variables and mutable data structures. It is invoked by the `Projection` machinery one
27 | * envelope at a time and visibility guarantees between the invocations are handled automatically, i.e. no volatile or
28 | * other concurrency primitives are needed for managing the state.
29 | *
30 | * Supported error handling strategies for when processing an `Envelope` fails can be defined in configuration or using
31 | * the `withRecoveryStrategy` method of a `Projection` implementation.
32 | */
33 | @ApiMayChange
34 | trait R2dbcHandler[Envelope] extends HandlerLifecycle {
35 |
36 | /**
37 | * The `process` method is invoked for each `Envelope`. Each time a new `Connection` is passed with a new open
38 | * transaction. You can use `createStatement`, `update` and other methods provided by the [[R2dbcSession]]. The
39 | * results of several statements can be combined with `Future` composition (e.g. `flatMap`). The transaction will be
40 | * automatically committed or rolled back when the returned `Future[Done]` is completed.
41 | *
42 | * One envelope is processed at a time. It will not be invoked with the next envelope until after this method returns
43 | * and the returned `Future[Done]` is completed.
44 | */
45 | def process(session: R2dbcSession, envelope: Envelope): Future[Done]
46 |
47 | }
48 |
49 | @ApiMayChange
50 | object R2dbcHandler {
51 |
52 | /** R2dbcHandler that can be define from a simple function */
53 | private class R2dbcHandlerFunction[Envelope](handler: (R2dbcSession, Envelope) => Future[Done])
54 | extends R2dbcHandler[Envelope] {
55 |
56 | override def process(session: R2dbcSession, envelope: Envelope): Future[Done] =
57 | handler(session, envelope)
58 | }
59 |
60 | def apply[Envelope](handler: (R2dbcSession, Envelope) => Future[Done]): R2dbcHandler[Envelope] =
61 | new R2dbcHandlerFunction(handler)
62 | }
63 |
--------------------------------------------------------------------------------
/projection/src/main/scala/org/apache/pekko/projection/r2dbc/scaladsl/R2dbcSession.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Licensed to the Apache Software Foundation (ASF) under one or more
3 | * license agreements; and to You under the Apache License, version 2.0:
4 | *
5 | * https://www.apache.org/licenses/LICENSE-2.0
6 | *
7 | * This file is part of the Apache Pekko project, which was derived from Akka.
8 | */
9 |
10 | /*
11 | * Copyright (C) 2021 Lightbend Inc.
12 | */
13 |
14 | package org.apache.pekko.projection.r2dbc.scaladsl
15 |
16 | import scala.collection.immutable
17 | import scala.concurrent.ExecutionContext
18 | import scala.concurrent.Future
19 |
20 | import org.apache.pekko
21 | import pekko.actor.typed.ActorSystem
22 | import pekko.persistence.r2dbc.internal.R2dbcExecutor
23 | import io.r2dbc.spi.Connection
24 | import io.r2dbc.spi.Row
25 | import io.r2dbc.spi.Statement
26 |
27 | final class R2dbcSession(val connection: Connection)(implicit val ec: ExecutionContext, val system: ActorSystem[_]) {
28 |
29 | def createStatement(sql: String): Statement =
30 | connection.createStatement(sql)
31 |
32 | def updateOne(statement: Statement): Future[Long] =
33 | R2dbcExecutor.updateOneInTx(statement)
34 |
35 | def update(statements: immutable.IndexedSeq[Statement]): Future[immutable.IndexedSeq[Long]] =
36 | R2dbcExecutor.updateInTx(statements)
37 |
38 | def selectOne[A](statement: Statement)(mapRow: Row => A): Future[Option[A]] =
39 | R2dbcExecutor.selectOneInTx(statement, mapRow)
40 |
41 | def select[A](statement: Statement)(mapRow: Row => A): Future[immutable.IndexedSeq[A]] =
42 | R2dbcExecutor.selectInTx(statement, mapRow)
43 |
44 | }
45 |
--------------------------------------------------------------------------------
/projection/src/test/resources/logback-test.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 | [%date{ISO8601}] [%level] [%logger] [%X{pekkoAddress}] [%marker] [%thread] - %msg%n
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
--------------------------------------------------------------------------------
/projection/src/test/scala/org/apache/pekko/projection/TestStatusObserver.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Licensed to the Apache Software Foundation (ASF) under one or more
3 | * license agreements; and to You under the Apache License, version 2.0:
4 | *
5 | * https://www.apache.org/licenses/LICENSE-2.0
6 | *
7 | * This file is part of the Apache Pekko project, which was derived from Akka.
8 | */
9 |
10 | /*
11 | * Copyright (C) 2021 Lightbend Inc.
12 | */
13 |
14 | package org.apache.pekko.projection
15 |
16 | import org.apache.pekko.actor.typed.ActorRef
17 |
18 | // FIXME this is an exact copy of projection-core-test/src/test/scala/org/apache/pekko/projection/TestStatusObserver.scala
19 |
20 | object TestStatusObserver {
21 | sealed trait Status
22 |
23 | case object Started extends Status
24 | case object Failed extends Status
25 | case object Stopped extends Status
26 |
27 | final case class OffsetProgress[Envelope](envelope: Envelope) extends Status
28 |
29 | trait EnvelopeProgress[Envelope] extends Status
30 | final case class Before[Envelope](envelope: Envelope) extends EnvelopeProgress[Envelope]
31 | final case class After[Envelope](envelope: Envelope) extends EnvelopeProgress[Envelope]
32 |
33 | final case class Err[Envelope](env: Envelope, cause: Throwable) extends Status {
34 | // don't include cause message in equals
35 | override def equals(obj: Any): Boolean = obj match {
36 | case Err(`env`, e) => e.getClass == cause.getClass
37 | case _ => false
38 | }
39 |
40 | override def hashCode(): Int = env.hashCode()
41 | }
42 | }
43 |
44 | class TestStatusObserver[Envelope](
45 | probe: ActorRef[TestStatusObserver.Status],
46 | lifecycle: Boolean = false,
47 | offsetProgressProbe: Option[ActorRef[TestStatusObserver.OffsetProgress[Envelope]]] = None,
48 | beforeEnvelopeProbe: Option[ActorRef[TestStatusObserver.Before[Envelope]]] = None,
49 | afterEnvelopeProbe: Option[ActorRef[TestStatusObserver.After[Envelope]]] = None)
50 | extends StatusObserver[Envelope] {
51 | import TestStatusObserver._
52 |
53 | override def started(projectionId: ProjectionId): Unit = {
54 | if (lifecycle)
55 | probe ! Started
56 | }
57 |
58 | override def failed(projectionId: ProjectionId, cause: Throwable): Unit = {
59 | if (lifecycle)
60 | probe ! Failed
61 | }
62 |
63 | override def stopped(projectionId: ProjectionId): Unit = {
64 | if (lifecycle)
65 | probe ! Stopped
66 | }
67 |
68 | override def beforeProcess(projectionId: ProjectionId, envelope: Envelope): Unit = {
69 | beforeEnvelopeProbe.foreach(_ ! Before(envelope))
70 | }
71 |
72 | override def afterProcess(projectionId: ProjectionId, envelope: Envelope): Unit = {
73 | afterEnvelopeProbe.foreach(_ ! After(envelope))
74 | }
75 |
76 | override def offsetProgress(projectionId: ProjectionId, envelope: Envelope): Unit = {
77 | offsetProgressProbe.foreach(_ ! OffsetProgress(envelope))
78 | }
79 |
80 | override def error(
81 | projectionId: ProjectionId,
82 | envelope: Envelope,
83 | cause: Throwable,
84 | recoveryStrategy: HandlerRecoveryStrategy): Unit = {
85 | probe ! Err(envelope, cause)
86 | }
87 |
88 | }
89 |
--------------------------------------------------------------------------------
/projection/src/test/scala/org/apache/pekko/projection/r2dbc/TestClock.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Licensed to the Apache Software Foundation (ASF) under one or more
3 | * license agreements; and to You under the Apache License, version 2.0:
4 | *
5 | * https://www.apache.org/licenses/LICENSE-2.0
6 | *
7 | * This file is part of the Apache Pekko project, which was derived from Akka.
8 | */
9 |
10 | /*
11 | * Copyright (C) 2021 Lightbend Inc.
12 | */
13 |
14 | package org.apache.pekko.projection.r2dbc
15 |
16 | import java.time.Clock
17 | import java.time.Duration
18 | import java.time.Instant
19 | import java.time.ZoneId
20 | import java.time.ZoneOffset
21 |
22 | import org.apache.pekko.annotation.InternalApi
23 |
24 | /**
25 | * INTERNAL API
26 | */
27 | @InternalApi private[projection] class TestClock extends Clock {
28 |
29 | @volatile private var _instant = roundToMillis(Instant.now())
30 |
31 | override def getZone: ZoneId = ZoneOffset.UTC
32 |
33 | override def withZone(zone: ZoneId): Clock =
34 | throw new UnsupportedOperationException("withZone not supported")
35 |
36 | override def instant(): Instant =
37 | _instant
38 |
39 | def setInstant(newInstant: Instant): Unit =
40 | _instant = roundToMillis(newInstant)
41 |
42 | def tick(duration: Duration): Instant = {
43 | val newInstant = roundToMillis(_instant.plus(duration))
44 | _instant = newInstant
45 | newInstant
46 | }
47 |
48 | private def roundToMillis(i: Instant): Instant = {
49 | // algo taken from java.time.Clock.tick
50 | val epochMilli = i.toEpochMilli
51 | Instant.ofEpochMilli(epochMilli - Math.floorMod(epochMilli, 1L))
52 | }
53 |
54 | }
55 |
--------------------------------------------------------------------------------
/projection/src/test/scala/org/apache/pekko/projection/r2dbc/TestConfig.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Licensed to the Apache Software Foundation (ASF) under one or more
3 | * license agreements; and to You under the Apache License, version 2.0:
4 | *
5 | * https://www.apache.org/licenses/LICENSE-2.0
6 | *
7 | * This file is part of the Apache Pekko project, which was derived from Akka.
8 | */
9 |
10 | /*
11 | * Copyright (C) 2021 Lightbend Inc.
12 | */
13 |
14 | package org.apache.pekko.projection.r2dbc
15 |
16 | import com.typesafe.config.Config
17 | import com.typesafe.config.ConfigFactory
18 |
19 | object TestConfig {
20 | lazy val config: Config = {
21 | val defaultConfig = ConfigFactory.load()
22 | val dialect = defaultConfig.getString("pekko.projection.r2dbc.dialect")
23 |
24 | val dialectConfig = dialect match {
25 | case "postgres" =>
26 | ConfigFactory.parseString("""
27 | pekko.persistence.r2dbc.connection-factory {
28 | driver = "postgres"
29 | host = "localhost"
30 | port = 5432
31 | user = "postgres"
32 | password = "postgres"
33 | database = "postgres"
34 | }
35 | """)
36 | case "yugabyte" =>
37 | ConfigFactory.parseString("""
38 | pekko.persistence.r2dbc.connection-factory {
39 | driver = "postgres"
40 | host = "localhost"
41 | port = 5433
42 | user = "yugabyte"
43 | password = "yugabyte"
44 | database = "yugabyte"
45 | }
46 | """)
47 | case "mysql" =>
48 | ConfigFactory.parseString("""
49 | pekko.persistence.r2dbc{
50 | connection-factory {
51 | driver = "mysql"
52 | host = "localhost"
53 | port = 3306
54 | user = "root"
55 | password = "root"
56 | database = "mysql"
57 | }
58 | db-timestamp-monotonic-increasing = on
59 | use-app-timestamp = on
60 | }
61 | """)
62 | }
63 |
64 | // using load here so that connection-factory can be overridden
65 | ConfigFactory.load(dialectConfig.withFallback(ConfigFactory.parseString("""
66 | pekko.persistence.journal.plugin = "pekko.persistence.r2dbc.journal"
67 | pekko.persistence.state.plugin = "pekko.persistence.r2dbc.state"
68 | pekko.persistence.r2dbc {
69 | query {
70 | refresh-interval = 1s
71 | }
72 | }
73 | pekko.actor.testkit.typed.default-timeout = 10s
74 | """)))
75 | }
76 | }
77 |
--------------------------------------------------------------------------------
/projection/src/test/scala/org/apache/pekko/projection/r2dbc/TestData.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Licensed to the Apache Software Foundation (ASF) under one or more
3 | * license agreements; and to You under the Apache License, version 2.0:
4 | *
5 | * https://www.apache.org/licenses/LICENSE-2.0
6 | *
7 | * This file is part of the Apache Pekko project, which was derived from Akka.
8 | */
9 |
10 | /*
11 | * Copyright (C) 2021 Lightbend Inc.
12 | */
13 |
14 | package org.apache.pekko.projection.r2dbc
15 |
16 | import java.util.UUID
17 | import java.util.concurrent.atomic.AtomicLong
18 |
19 | import org.apache.pekko.projection.ProjectionId
20 |
21 | object TestData {
22 | private val start = 0L // could be something more unique, like currentTimeMillis
23 | private val pidCounter = new AtomicLong(start)
24 | private val entityTypeCounter = new AtomicLong(start)
25 | }
26 |
27 | trait TestData {
28 | import TestData.pidCounter
29 | import TestData.entityTypeCounter
30 |
31 | def nextPid() = s"p-${pidCounter.incrementAndGet()}"
32 | // FIXME return PersistenceId instead
33 | def nextPid(entityType: String) = s"$entityType|p-${pidCounter.incrementAndGet()}"
34 |
35 | def nextEntityType() = s"TestEntity-${entityTypeCounter.incrementAndGet()}"
36 |
37 | def genRandomProjectionId(): ProjectionId = ProjectionId(UUID.randomUUID().toString, "00")
38 |
39 | }
40 |
--------------------------------------------------------------------------------
/projection/src/test/scala/org/apache/pekko/projection/r2dbc/TestDbLifecycle.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Licensed to the Apache Software Foundation (ASF) under one or more
3 | * license agreements; and to You under the Apache License, version 2.0:
4 | *
5 | * https://www.apache.org/licenses/LICENSE-2.0
6 | *
7 | * This file is part of the Apache Pekko project, which was derived from Akka.
8 | */
9 |
10 | /*
11 | * Copyright (C) 2021 Lightbend Inc.
12 | */
13 |
14 | package org.apache.pekko.projection.r2dbc
15 |
16 | import scala.concurrent.Await
17 | import scala.concurrent.duration._
18 |
19 | import org.apache.pekko
20 | import pekko.actor.typed.ActorSystem
21 | import pekko.persistence.Persistence
22 | import pekko.persistence.r2dbc.ConnectionFactoryProvider
23 | import pekko.persistence.r2dbc.R2dbcSettings
24 | import pekko.persistence.r2dbc.internal.R2dbcExecutor
25 | import org.scalatest.BeforeAndAfterAll
26 | import org.scalatest.Suite
27 | import org.slf4j.LoggerFactory
28 |
29 | trait TestDbLifecycle extends BeforeAndAfterAll { this: Suite =>
30 |
31 | def typedSystem: ActorSystem[_]
32 |
33 | def testConfigPath: String = "pekko.projection.r2dbc"
34 |
35 | lazy val r2dbcProjectionSettings: R2dbcProjectionSettings =
36 | R2dbcProjectionSettings(typedSystem.settings.config.getConfig(testConfigPath))
37 |
38 | lazy val r2dbcExecutor: R2dbcExecutor = {
39 | new R2dbcExecutor(
40 | ConnectionFactoryProvider(typedSystem).connectionFactoryFor(r2dbcProjectionSettings.useConnectionFactory),
41 | LoggerFactory.getLogger(getClass),
42 | r2dbcProjectionSettings.logDbCallsExceeding)(typedSystem.executionContext, typedSystem)
43 | }
44 |
45 | lazy val persistenceExt: Persistence = Persistence(typedSystem)
46 |
47 | override protected def beforeAll(): Unit = {
48 | lazy val r2dbcSettings: R2dbcSettings =
49 | new R2dbcSettings(typedSystem.settings.config.getConfig("pekko.persistence.r2dbc"))
50 | Await.result(
51 | r2dbcExecutor.updateOne("beforeAll delete")(
52 | _.createStatement(s"delete from ${r2dbcSettings.journalTableWithSchema}")),
53 | 10.seconds)
54 | Await.result(
55 | r2dbcExecutor.updateOne("beforeAll delete")(
56 | _.createStatement(s"delete from ${r2dbcSettings.durableStateTableWithSchema}")),
57 | 10.seconds)
58 | if (r2dbcProjectionSettings.isOffsetTableDefined) {
59 | Await.result(
60 | r2dbcExecutor.updateOne("beforeAll delete")(
61 | _.createStatement(s"delete from ${r2dbcProjectionSettings.offsetTableWithSchema}")),
62 | 10.seconds)
63 | }
64 | Await.result(
65 | r2dbcExecutor.updateOne("beforeAll delete")(
66 | _.createStatement(s"delete from ${r2dbcProjectionSettings.timestampOffsetTableWithSchema}")),
67 | 10.seconds)
68 | Await.result(
69 | r2dbcExecutor.updateOne("beforeAll delete")(
70 | _.createStatement(s"delete from ${r2dbcProjectionSettings.managementTableWithSchema}")),
71 | 10.seconds)
72 | super.beforeAll()
73 | }
74 |
75 | }
76 |
--------------------------------------------------------------------------------
/projection/src/test/scala/org/apache/pekko/projection/r2dbc/TestSourceProviderWithInput.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Licensed to the Apache Software Foundation (ASF) under one or more
3 | * license agreements; and to You under the Apache License, version 2.0:
4 | *
5 | * https://www.apache.org/licenses/LICENSE-2.0
6 | *
7 | * This file is part of the Apache Pekko project, which was derived from Akka.
8 | */
9 |
10 | /*
11 | * Copyright (C) 2021 Lightbend Inc.
12 | */
13 |
14 | package org.apache.pekko.projection.r2dbc
15 |
16 | import java.time.Instant
17 | import java.util.concurrent.ConcurrentLinkedQueue
18 | import java.util.concurrent.atomic.AtomicReference
19 |
20 | import scala.concurrent.ExecutionContext
21 | import scala.concurrent.Future
22 | import scala.concurrent.Promise
23 |
24 | import org.apache.pekko
25 | import pekko.NotUsed
26 | import pekko.actor.typed.ActorRef
27 | import pekko.actor.typed.ActorSystem
28 | import pekko.actor.typed.scaladsl.adapter._
29 | import pekko.persistence.Persistence
30 | import pekko.persistence.query.TimestampOffset
31 | import pekko.persistence.query.typed.EventEnvelope
32 | import pekko.persistence.query.typed.scaladsl.EventTimestampQuery
33 | import pekko.persistence.query.typed.scaladsl.LoadEventQuery
34 | import pekko.projection.BySlicesSourceProvider
35 | import pekko.projection.scaladsl.SourceProvider
36 | import pekko.stream.OverflowStrategy
37 | import pekko.stream.scaladsl.Source
38 | import pekko.util.ccompat.JavaConverters._
39 |
40 | class TestSourceProviderWithInput()(implicit val system: ActorSystem[_])
41 | extends SourceProvider[TimestampOffset, EventEnvelope[String]]
42 | with BySlicesSourceProvider
43 | with EventTimestampQuery
44 | with LoadEventQuery {
45 |
46 | private implicit val ec: ExecutionContext = system.executionContext
47 | private val persistenceExt = Persistence(system)
48 |
49 | private val _input = new AtomicReference[Promise[ActorRef[EventEnvelope[String]]]](Promise())
50 |
51 | def input: Future[ActorRef[EventEnvelope[String]]] = _input.get().future
52 |
53 | private val envelopes = new ConcurrentLinkedQueue[EventEnvelope[String]]
54 |
55 | override def source(offset: () => Future[Option[TimestampOffset]]): Future[Source[EventEnvelope[String], NotUsed]] = {
56 | val oldPromise = _input.get()
57 | _input.set(Promise())
58 | offset().map { _ =>
59 | Source
60 | .actorRef[EventEnvelope[String]](
61 | PartialFunction.empty,
62 | PartialFunction.empty,
63 | bufferSize = 1024,
64 | OverflowStrategy.fail)
65 | .map { env =>
66 | envelopes.offer(env)
67 | env
68 | }
69 | .mapMaterializedValue { ref =>
70 | val typedRef = ref.toTyped[EventEnvelope[String]]
71 | oldPromise.trySuccess(typedRef)
72 | _input.get().trySuccess(typedRef)
73 | NotUsed
74 | }
75 | }
76 | }
77 |
78 | override def extractOffset(envelope: EventEnvelope[String]): TimestampOffset =
79 | envelope.offset.asInstanceOf[TimestampOffset]
80 |
81 | override def extractCreationTime(envelope: EventEnvelope[String]): Long =
82 | envelope.timestamp
83 |
84 | override def minSlice: Int = 0
85 |
86 | override def maxSlice: Int = persistenceExt.numberOfSlices - 1
87 |
88 | override def timestampOf(persistenceId: String, sequenceNr: Long): Future[Option[Instant]] = {
89 | Future.successful(envelopes.iterator().asScala.collectFirst {
90 | case env
91 | if env.persistenceId == persistenceId && env.sequenceNr == sequenceNr && env.offset
92 | .isInstanceOf[TimestampOffset] =>
93 | env.offset.asInstanceOf[TimestampOffset].timestamp
94 | })
95 | }
96 |
97 | override def loadEnvelope[Event](persistenceId: String, sequenceNr: Long): Future[EventEnvelope[Event]] = {
98 | envelopes.iterator().asScala.collectFirst {
99 | case env if env.persistenceId == persistenceId && env.sequenceNr == sequenceNr =>
100 | env.asInstanceOf[EventEnvelope[Event]]
101 | } match {
102 | case Some(env) => Future.successful(env)
103 | case None =>
104 | Future.failed(
105 | new NoSuchElementException(
106 | s"Event with persistenceId [$persistenceId] and sequenceNr [$sequenceNr] not found."))
107 | }
108 | }
109 | }
110 |
--------------------------------------------------------------------------------
/scripts/link-validator.conf:
--------------------------------------------------------------------------------
1 | // config for https://github.com/ennru/site-link-validator/
2 | site-link-validator {
3 | root-dir = "docs/target/paradox/site/main/"
4 | # relative to `root-dir`
5 | start-file = "index.html"
6 |
7 | # Resolves URLs with the given prefix as local files instead
8 | link-mappings = [
9 | {
10 | prefix = "https://pekko.apache.org/docs/pekko/current/"
11 | replace = ""
12 | }
13 | # ScalaDoc from unidoc
14 | {
15 | prefix = "https://pekko.apache.org/api/pekko-persistence-r2dbc/current/"
16 | replace = "/../../../../../target/scala-2.13/unidoc/"
17 | }
18 | {
19 | prefix = "https://pekko.apache.org/api/pekko-persistence-r2dbc/current/"
20 | replace = "/../../../../../target/scala-2.13/unidoc/"
21 | }
22 | {
23 | prefix = "https://pekko.apache.org/api/pekko-persistence-r2dbc/snapshot/"
24 | replace = "/../../../../../target/scala-2.13/unidoc/"
25 | }
26 | ]
27 |
28 | ignore-missing-local-files-regex = ""
29 | // e.g. "^api/alpakka/snapshot/pekko/stream/alpakka/googlecloud/storage/impl/Formats.*"
30 |
31 | ignore-files = [
32 | # This file is generated, we can't do much about the invalid links here
33 | "license-report.html"
34 | ]
35 |
36 | non-https-whitelist = [
37 | ]
38 | }
39 |
--------------------------------------------------------------------------------