├── .gitattributes
├── .github
└── workflows
│ ├── ci.yml
│ ├── cloc.yml
│ └── release.yml
├── .gitignore
├── .idea
└── codeStyles
│ ├── Project.xml
│ └── codeStyleConfig.xml
├── .jvmopts
├── .scalafmt.conf
├── CHANGELOG.md
├── CONTRIBUTING.md
├── LICENSE
├── README.md
├── RELEASING.md
├── build.sbt
├── core
└── src
│ ├── main
│ ├── mima-filters
│ │ ├── 1.0.0.backwards.excludes
│ │ │ ├── pr-47-make-internal-api-private.excludes
│ │ │ ├── pr-62-arrange-internal-raft-protocol.excludes
│ │ │ ├── pr-66-implement-typed-cluster-replication.excludes
│ │ │ ├── pr-88-add-apis-for-persisting-to-multiple-data-stores.excludes
│ │ │ └── pr-92-version-up-scalapb.excludes
│ │ ├── 2.0.0.backwards.excludes
│ │ │ ├── pr-114-efficient-recovery-of-commit-log-store.excludes
│ │ │ ├── pr-119-raft-actor-auto-start.excludes
│ │ │ ├── pr-128-fix-inconsistency-after-installsnapshot.excludes
│ │ │ ├── pr-134-fix-lost-committed-events-by-moving-leader.excludes
│ │ │ ├── pr-137-event-sourcing-progress-tracking.excludes
│ │ │ └── pr-155-improve-entity-recovery-process.excludes
│ │ ├── 2.1.0.backwards.excludes
│ │ │ ├── pr-151-fix-append-entries-handling.excludes
│ │ │ ├── pr-159-refactor-replicate-message.excludes
│ │ │ ├── pr-161-replication-failure-based-on-entitys-applied-index.excludes
│ │ │ ├── pr-172-impl-shard-id-extractor.excludes
│ │ │ ├── pr-186-allow-only-specific-actors-to-become-leader.excludes
│ │ │ ├── pr-188-set-disabled-shards-only-api.excludes
│ │ │ └── pr-189-typed-cluster-replication-settings-supports-new-methods.excludes
│ │ └── 2.3.0.backwards.excludes
│ │ │ └── pr-217-fix-old-snapshot-read-in-consecutive-snapshot-synchronization.excludes
│ ├── protobuf
│ │ └── cluster_replication.proto
│ ├── resources
│ │ ├── example.conf
│ │ └── reference.conf
│ └── scala
│ │ ├── akka
│ │ └── lerna
│ │ │ ├── DeferredBehavior.scala
│ │ │ ├── InternalActorRefProxy.scala
│ │ │ ├── InternalRecipientRef.scala
│ │ │ └── StashFactory.scala
│ │ └── lerna
│ │ └── akka
│ │ └── entityreplication
│ │ ├── ClusterReplication.scala
│ │ ├── ClusterReplicationSerializable.scala
│ │ ├── ClusterReplicationSettings.scala
│ │ ├── ReplicationActor.scala
│ │ ├── ReplicationActorContext.scala
│ │ ├── ReplicationRegion.scala
│ │ ├── ReplicationRegionRaftActorStarter.scala
│ │ ├── internal
│ │ └── ClusterReplicationSettingsImpl.scala
│ │ ├── model
│ │ ├── EntityInstanceId.scala
│ │ ├── NormalizedEntityId.scala
│ │ ├── NormalizedShardId.scala
│ │ └── TypeName.scala
│ │ ├── protobuf
│ │ ├── ClusterReplicationSerializer.scala
│ │ └── OffsetEnvelope.scala
│ │ ├── raft
│ │ ├── Candidate.scala
│ │ ├── Follower.scala
│ │ ├── Leader.scala
│ │ ├── RaftActor.scala
│ │ ├── RaftActorBase.scala
│ │ ├── RaftMemberData.scala
│ │ ├── RaftProtocol.scala
│ │ ├── RaftSettings.scala
│ │ ├── RaftSettingsImpl.scala
│ │ ├── eventsourced
│ │ │ ├── CommitLogStoreActor.scala
│ │ │ └── InternalEvent.scala
│ │ ├── model
│ │ │ ├── ClientContext.scala
│ │ │ ├── EntityEvent.scala
│ │ │ ├── LogEntry.scala
│ │ │ ├── LogEntryIndex.scala
│ │ │ ├── MatchIndex.scala
│ │ │ ├── NextIndex.scala
│ │ │ ├── NoOp.scala
│ │ │ ├── RaftMember.scala
│ │ │ ├── ReplicatedLog.scala
│ │ │ ├── SnapshotStatus.scala
│ │ │ ├── SnapshottingProgress.scala
│ │ │ ├── Term.scala
│ │ │ └── exception
│ │ │ │ └── SeqIndexOutOfBoundsException.scala
│ │ ├── persistence
│ │ │ ├── EntitySnapshotsUpdatedTag.scala
│ │ │ └── RaftEventAdapter.scala
│ │ ├── protocol
│ │ │ ├── EntityPassivationPermitCommands.scala
│ │ │ ├── FetchEntityEvents.scala
│ │ │ ├── RaftCommands.scala
│ │ │ ├── ShardRequest.scala
│ │ │ ├── SnapshotOffer.scala
│ │ │ ├── SuspendEntity.scala
│ │ │ └── TryCreateEntity.scala
│ │ ├── routing
│ │ │ └── MemberIndex.scala
│ │ └── snapshot
│ │ │ ├── ShardSnapshotStore.scala
│ │ │ ├── SnapshotProtocol.scala
│ │ │ ├── SnapshotStore.scala
│ │ │ └── sync
│ │ │ └── SnapshotSyncManager.scala
│ │ ├── testkit
│ │ ├── TestReplicationActor.scala
│ │ └── TestReplicationActorProps.scala
│ │ ├── typed
│ │ ├── ClusterReplication.scala
│ │ ├── ClusterReplicationSettings.scala
│ │ ├── Effect.scala
│ │ ├── ReplicatedEntity.scala
│ │ ├── ReplicatedEntityBehavior.scala
│ │ ├── ReplicatedEntityContext.scala
│ │ ├── ReplicatedEntityRef.scala
│ │ ├── ReplicatedEntityTypeKey.scala
│ │ ├── ReplicationEnvelope.scala
│ │ ├── internal
│ │ │ ├── ClusterReplicationImpl.scala
│ │ │ ├── ReplicatedEntityImpl.scala
│ │ │ ├── ReplicatedEntityRefImpl.scala
│ │ │ ├── ReplicatedEntityTypeKeyImpl.scala
│ │ │ ├── ReplicationId.scala
│ │ │ ├── ReplicationIdImpl.scala
│ │ │ ├── behavior
│ │ │ │ ├── BehaviorSetup.scala
│ │ │ │ ├── Inactive.scala
│ │ │ │ ├── Ready.scala
│ │ │ │ ├── Recovering.scala
│ │ │ │ ├── ReplicatedEntityBehaviorImpl.scala
│ │ │ │ ├── ReplicationOperations.scala
│ │ │ │ └── WaitForReplication.scala
│ │ │ ├── effect
│ │ │ │ ├── EffectBuilderImpl.scala
│ │ │ │ ├── EffectImpl.scala
│ │ │ │ ├── MainEffect.scala
│ │ │ │ └── SideEffect.scala
│ │ │ └── testkit
│ │ │ │ ├── CommandResultImpl.scala
│ │ │ │ ├── ReplicatedEntityBehaviorTestKitImpl.scala
│ │ │ │ └── RestartResultImpl.scala
│ │ └── testkit
│ │ │ └── ReplicatedEntityBehaviorTestKit.scala
│ │ └── util
│ │ ├── ActorIds.scala
│ │ └── AtLeastOnceComplete.scala
│ ├── multi-jvm
│ ├── resources
│ │ ├── logback.xml
│ │ └── multi-jvm-testing.conf
│ └── scala
│ │ └── lerna
│ │ └── akka
│ │ └── entityreplication
│ │ ├── ConsistencyTestBase.scala
│ │ ├── ConsistencyTestNormal.scala
│ │ ├── PersistencePluginProxySupport.scala
│ │ ├── RaftActorCompactionSpec.scala
│ │ ├── RaftEventSourcedSpec.scala
│ │ ├── ReplicationActorMultiNodeSpec.scala
│ │ ├── ReplicationRegionInitializingSpec.scala
│ │ ├── ReplicationRegionSpec.scala
│ │ ├── STMultiNodeSerializable.scala
│ │ ├── STMultiNodeSpec.scala
│ │ ├── Sample.scala
│ │ ├── raft
│ │ └── RaftActorMultiNodeSpec.scala
│ │ ├── typed
│ │ ├── ClusterReplicationMultiNodeSpec.scala
│ │ ├── CompactionPreservesNotSourcedEventsSpec.scala
│ │ ├── EventSourcingAutoStartMultiNodeSpec.scala
│ │ ├── LogReplicationDuringSnapshotSyncSpec.scala
│ │ ├── MultiDataStoreSpec.scala
│ │ ├── MultiSnapshotSyncSpec.scala
│ │ ├── ReplicatedEntityCommandHandlingBeforeAndAfterCompactionSpec.scala
│ │ ├── ReplicatedEntityMultiNodeSpec.scala
│ │ └── ReplicatedEntitySnapshotMultiNodeSpec.scala
│ │ └── util
│ │ └── persistence
│ │ └── query
│ │ └── proxy
│ │ ├── ReadJournalPluginProxyActor.scala
│ │ ├── ReadJournalPluginProxyProvider.scala
│ │ ├── javadsl
│ │ └── ReadJournalPluginProxy.scala
│ │ └── scaladsl
│ │ └── ReadJournalPluginProxy.scala
│ └── test
│ ├── resources
│ ├── akka-entity-replication-with-cassandra.conf
│ ├── application-test.conf
│ ├── application.conf
│ └── logback.xml
│ └── scala
│ └── lerna
│ └── akka
│ └── entityreplication
│ ├── ClusterReplicationSettingsSpec.scala
│ ├── ReplicationActorSpec.scala
│ ├── ReplicationRegionRaftActorStarterSpec.scala
│ ├── protobuf
│ ├── ClusterReplicationSerializerBindingSpec.scala
│ ├── ClusterReplicationSerializerSpec.scala
│ └── SerializerSpecBase.scala
│ ├── raft
│ ├── ActorSpec.scala
│ ├── RaftActorCandidateEventSourcingSpec.scala
│ ├── RaftActorCandidateReceivingRequestVoteSpec.scala
│ ├── RaftActorCandidateSpec.scala
│ ├── RaftActorFollowerEventSourcingSpec.scala
│ ├── RaftActorFollowerReceivingRequestVoteSpec.scala
│ ├── RaftActorFollowerSpec.scala
│ ├── RaftActorLeaderEventSourcingSpec.scala
│ ├── RaftActorLeaderReceivingRequestVoteSpec.scala
│ ├── RaftActorLeaderSpec.scala
│ ├── RaftActorPersistenceDeletionSpec.scala
│ ├── RaftActorSnapshotSynchronizationSpec.scala
│ ├── RaftActorSpec.scala
│ ├── RaftActorSpecBase.scala
│ ├── RaftMemberDataSpec.scala
│ ├── RaftProtocolReplicateSpec.scala
│ ├── RaftSettingsSpec.scala
│ ├── RaftTestProbe.scala
│ ├── eventsourced
│ │ └── CommitLogStoreActorSpec.scala
│ ├── model
│ │ ├── ClientContextSpec.scala
│ │ ├── LogEntryIndexSpec.scala
│ │ ├── ReplicatedLogSpec.scala
│ │ └── SnapshotStatusSpec.scala
│ ├── protocol
│ │ └── RaftCommandsAppendEntriesSpec.scala
│ └── snapshot
│ │ ├── ShardSnapshotStoreFailureSpec.scala
│ │ ├── ShardSnapshotStoreSpecBase.scala
│ │ ├── ShardSnapshotStoreSuccessSpec.scala
│ │ ├── SnapshotStorePersistenceDeletionSpec.scala
│ │ ├── SnapshotStoreSpec.scala
│ │ └── sync
│ │ ├── SnapshotSyncManagerFinalizingSpec.scala
│ │ └── SnapshotSyncManagerSpec.scala
│ ├── testkit
│ ├── CustomTestProbe.scala
│ ├── CustomTestProbeSpec.scala
│ ├── KryoSerializable.scala
│ └── TestReplicationActorPropsSpec.scala
│ ├── typed
│ ├── ClusterReplicationSpec.scala
│ ├── EffectSpec.scala
│ ├── ReplicatedEntityBehaviorSpec.scala
│ ├── ReplicatedEntityRefSpec.scala
│ └── testkit
│ │ └── ReplicatedEntityBehaviorTestKitSpec.scala
│ └── util
│ ├── ActorIdsSpec.scala
│ ├── AtLeastOnceCompleteSpec.scala
│ ├── EventStore.scala
│ ├── RaftEventJournalTestKit.scala
│ ├── RaftEventJournalTestKitSpec.scala
│ ├── RaftSnapshotStoreTestKit.scala
│ └── RaftSnapshotStoreTestKitSpec.scala
├── docs
├── example
│ └── read-side.sc
├── images
│ └── demo.apng
├── implementation_guide.md
├── migration_guide.md
├── operation_guide.md
├── rollback_guide.md
├── testing_guide.md
└── typed
│ ├── implementation_guide.md
│ └── testing_guide.md
├── project
├── build.properties
├── plugins.sbt
└── scalapb.sbt
├── publish.sbt
├── rollback-tool-cassandra
└── src
│ ├── main
│ ├── mima-filters
│ │ └── 2.2.0.backwards.excludes
│ │ │ ├── pr-203-rollback-deletes-only-target-tagged-events.excludes
│ │ │ ├── pr-209-cassandra-persistence-queries-handles-deleted-partitions.excludes
│ │ │ └── pr-210-rollback-preparation-fails-if-required-data-have-been-deleted
│ ├── resources
│ │ └── reference.conf
│ └── scala
│ │ ├── akka
│ │ └── persistence
│ │ │ └── cassandra
│ │ │ └── lerna
│ │ │ ├── CassandraReadJournalExt.scala
│ │ │ └── Extractor.scala
│ │ └── lerna
│ │ └── akka
│ │ └── entityreplication
│ │ └── rollback
│ │ ├── DefaultRollbackRequirementsVerifier.scala
│ │ ├── LinearRollbackTimestampHintFinder.scala
│ │ ├── LinearSequenceNrSearchStrategy.scala
│ │ ├── PersistenceQueries.scala
│ │ ├── PersistentActorRollback.scala
│ │ ├── RaftEventSourcedPersistence.scala
│ │ ├── RaftPersistence.scala
│ │ ├── RaftShardPersistenceQueries.scala
│ │ ├── RaftShardRollback.scala
│ │ ├── RaftShardRollbackParameters.scala
│ │ ├── RaftShardRollbackSettings.scala
│ │ ├── RollbackException.scala
│ │ ├── RollbackRequirementsVerifier.scala
│ │ ├── RollbackTimestampHintFinder.scala
│ │ ├── SequenceNr.scala
│ │ ├── SequenceNrSearchStrategy.scala
│ │ ├── cassandra
│ │ ├── CassandraEventsByTagSettings.scala
│ │ ├── CassandraJournalSettings.scala
│ │ ├── CassandraPersistenceQueries.scala
│ │ ├── CassandraPersistenceQueriesSettings.scala
│ │ ├── CassandraPersistenceQueriesStatements.scala
│ │ ├── CassandraPersistentActorRollback.scala
│ │ ├── CassandraPersistentActorRollbackSettings.scala
│ │ ├── CassandraPersistentActorRollbackStatements.scala
│ │ ├── CassandraQuerySettings.scala
│ │ ├── CassandraRaftShardRollback.scala
│ │ ├── CassandraRaftShardRollbackSettings.scala
│ │ ├── CassandraSnapshotSettings.scala
│ │ ├── DeleteTagViews.scala
│ │ └── PartitionNr.scala
│ │ └── setup
│ │ ├── CommitLogStoreActorId.scala
│ │ ├── CommitLogStoreActorRollbackSetup.scala
│ │ ├── RaftActorId.scala
│ │ ├── RaftActorRollbackSetup.scala
│ │ ├── RaftShardRollbackSetup.scala
│ │ ├── RollbackSetup.scala
│ │ ├── SnapshotStoreId.scala
│ │ ├── SnapshotStoreRollbackSetup.scala
│ │ ├── SnapshotSyncManagerId.scala
│ │ └── SnapshotSyncManagerRollbackSetup.scala
│ ├── multi-jvm
│ ├── resources
│ │ ├── application.conf
│ │ └── logback.xml
│ └── scala
│ │ └── lerna
│ │ └── akka
│ │ └── entityreplication
│ │ └── rollback
│ │ ├── cassandra
│ │ ├── CassandraRaftShardRollbackFailureWithDeletionSpec.scala
│ │ ├── CassandraRaftShardRollbackSpec.scala
│ │ ├── CassandraRaftShardRollbackWithDeletionSpec.scala
│ │ └── STMultiNodeSpec.scala
│ │ └── testkit
│ │ └── CatalogReplicatedEntity.scala
│ └── test
│ ├── resources
│ ├── application.conf
│ └── logback.xml
│ └── scala
│ └── lerna
│ └── akka
│ └── entityreplication
│ └── rollback
│ ├── DefaultRollbackRequirementsVerifierSpec.scala
│ ├── JsonSerializable.scala
│ ├── LinearRollbackTimestampHintFinderSpec.scala
│ ├── LinearSequenceNrSearchStrategySpec.scala
│ ├── RaftShardPersistenceQueriesSpec.scala
│ ├── RaftShardRollbackParametersSpec.scala
│ ├── RaftShardRollbackSettingsSpec.scala
│ ├── RaftShardRollbackSpec.scala
│ ├── SequenceNrSpec.scala
│ ├── cassandra
│ ├── CassandraEventsByTagSettingsSpec.scala
│ ├── CassandraJournalSettingsSpec.scala
│ ├── CassandraPersistenceQueriesSettingsSpec.scala
│ ├── CassandraPersistenceQueriesSpec.scala
│ ├── CassandraPersistentActorRollbackSettingsSpec.scala
│ ├── CassandraPersistentActorRollbackSpec.scala
│ ├── CassandraQuerySettingsSpec.scala
│ ├── CassandraRaftShardRollbackSettingsSpec.scala
│ ├── CassandraSnapshotSettingsSpec.scala
│ ├── CassandraSpecBase.scala
│ ├── PartitionNrSpec.scala
│ └── testkit
│ │ ├── FirstTimeBucket.scala
│ │ ├── FirstTimeBucketSpec.scala
│ │ └── PersistenceCassandraConfigProvider.scala
│ ├── setup
│ ├── CommitLogStoreActorIdSpec.scala
│ ├── RaftActorIdSpec.scala
│ ├── RollbackSetupSpec.scala
│ ├── SnapshotStoreIdSpec.scala
│ └── SnapshotSyncManagerIdSpec.scala
│ └── testkit
│ ├── ConstantPersistenceQueries.scala
│ ├── ConstantPersistenceQueriesSpec.scala
│ ├── PatienceConfigurationForTestKitBase.scala
│ ├── PersistenceInitializationAwaiter.scala
│ ├── TestPersistentActor.scala
│ ├── TimeBasedUuids.scala
│ └── TimeBasedUuidsSpec.scala
├── scripts
└── run-multijvm-test.sh
└── src
└── site
└── index.html
/.gitattributes:
--------------------------------------------------------------------------------
1 | *.apng filter=lfs diff=lfs merge=lfs -text
2 |
--------------------------------------------------------------------------------
/.github/workflows/ci.yml:
--------------------------------------------------------------------------------
1 | name: CI
2 |
3 | on:
4 | push:
5 | branches: [ master, feature/** ]
6 | pull_request:
7 | branches: [ master, feature/** ]
8 |
9 | env:
10 | # classic timefactor doesn't affect classic default-timeout
11 | SBT_OPTS: >-
12 | -Dlerna.enable.discipline
13 | -Dakka.test.timefactor=3.0
14 | -Dakka.actor.testkit.typed.timefactor=3.0
15 | -Dakka.test.default-timeout=15s
16 | -Dakka.testconductor.barrier-timeout=90s
17 |
18 | jobs:
19 | test:
20 |
21 | runs-on: ubuntu-latest
22 | name: Test (Java ${{ matrix.java }})
23 | strategy:
24 | matrix:
25 | java: [ '8', '11' ]
26 | timeout-minutes: 60
27 | steps:
28 | - uses: actions/checkout@v2
29 | with:
30 | # sbt-dynver (used by sbt-ci-release) detects the previous version by tag:
31 | # https://github.com/dwijnand/sbt-dynver/tree/v4.1.1#previous-version-detection
32 | fetch-depth: 0
33 |
34 | - name: Set up JDK ${{ matrix.java }}
35 | uses: actions/setup-java@v2
36 | with:
37 | distribution: 'zulu'
38 | java-version: ${{ matrix.java }}
39 |
40 | # https://www.scala-sbt.org/1.x/docs/GitHub-Actions-with-sbt.html#Caching
41 | - name: Coursier cache
42 | uses: coursier/cache-action@v5
43 |
44 | - name: Check code format
45 | run: sbt scalafmtCheckAll
46 |
47 | # Detect compilation errors early
48 | - name: Compile
49 | run: sbt clean compile test:compile multi-jvm:compile
50 |
51 | - name: Check binary compartibility
52 | run: sbt --batch mimaReportBinaryIssues
53 |
54 | - name: Run tests
55 | run: sbt coverage test
56 |
57 | - name: Run integration tests
58 | run: sh ./scripts/run-multijvm-test.sh 1
59 |
60 | - name: Publish test report
61 | uses: mikepenz/action-junit-report@v2
62 | if: ${{ always() }}
63 | with:
64 | check_name: ScalaTest Report (Java ${{ matrix.java }})
65 | report_paths: '**/target/**test-reports/TEST-*.xml'
66 | github_token: ${{ secrets.GITHUB_TOKEN }}
67 |
68 | - name: Check the test coverage is above the minimum criteria
69 | run: sbt coverageReport
70 |
71 | # https://www.scala-sbt.org/1.x/docs/GitHub-Actions-with-sbt.html#Caching
72 | - name: Clean files for caching
73 | run: |
74 | rm -rf "$HOME/.ivy2/local" || true
75 | find $HOME/Library/Caches/Coursier/v1 -name "ivydata-*.properties" -delete || true
76 | find $HOME/.ivy2/cache -name "ivydata-*.properties" -delete || true
77 | find $HOME/.cache/coursier/v1 -name "ivydata-*.properties" -delete || true
78 | find $HOME/.sbt -name "*.lock" -delete || true
79 |
--------------------------------------------------------------------------------
/.github/workflows/cloc.yml:
--------------------------------------------------------------------------------
1 | # Count Lines of Code
2 | # https://github.com/AlDanial/cloc
3 | name: cloc
4 |
5 | on:
6 | workflow_dispatch:
7 | inputs:
8 | old_tree_ish:
9 | description: "commit-ish git object name"
10 | type: string
11 | required: false
12 | push:
13 | branches: [ master, feature/** ]
14 |
15 | jobs:
16 | cloc:
17 | runs-on: ubuntu-20.04
18 | name: Count Lines of Code
19 | steps:
20 | - uses: actions/checkout@v3
21 | with:
22 | fetch-depth: 0
23 | - name: Install cloc
24 | run: sudo apt-get install cloc jq
25 | - name: Count lines of code
26 | run: |
27 | set -eu
28 | cloc --vcs=git --exclude-dir=test . | tee cloc.txt
29 | - name: Count diff lines of code
30 | run: |
31 | set -eu
32 | if [ -z "${old_tree_ish}" ]; then
33 | old_tree_ish=$(
34 | curl -L \
35 | -H "Accept: application/vnd.github+json" \
36 | -H "Authorization: Bearer ${{ secrets.GITHUB_TOKEN }}" \
37 | -H "X-GitHub-Api-Version: 2022-11-28" \
38 | https://api.github.com/repos/${{ github.repository }}/releases/latest \
39 | | jq -r .tag_name
40 | )
41 | fi
42 | new_tree_ish=$(git rev-parse --abbrev-ref HEAD)
43 | echo "old_tree_ish=${old_tree_ish} new_tree_ish=${new_tree_ish}"
44 | cloc --git --diff "${old_tree_ish}" "${new_tree_ish}" | tee cloc_diff.txt
45 | env:
46 | old_tree_ish: ${{ inputs.old_tree_ish }}
47 | - uses: actions/upload-artifact@v3
48 | with:
49 | name: cloc
50 | path: |
51 | cloc.txt
52 | cloc_diff.txt
53 | retention-days: 3
54 |
--------------------------------------------------------------------------------
/.github/workflows/release.yml:
--------------------------------------------------------------------------------
1 | name: Release
2 | on:
3 | push:
4 | branches: [master, main]
5 | tags: ["*"]
6 | jobs:
7 | publish:
8 | runs-on: ubuntu-20.04
9 | steps:
10 | - uses: actions/checkout@v2.3.4
11 | with:
12 | fetch-depth: 0
13 | - uses: olafurpg/setup-scala@v10
14 | - uses: olafurpg/setup-gpg@v3
15 | - run: sbt ci-release
16 | env:
17 | PGP_PASSPHRASE: ${{ secrets.PGP_PASSPHRASE }}
18 | PGP_SECRET: ${{ secrets.PGP_SECRET }}
19 | SONATYPE_PASSWORD: ${{ secrets.SONATYPE_PASSWORD }}
20 | SONATYPE_USERNAME: ${{ secrets.SONATYPE_USERNAME }}
21 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | *.class
2 | .idea/*
3 | !.idea/codeStyles
4 | target/
5 | *.pyc
6 | .env
7 | sbt-cache/
8 | .bsp/
9 |
10 | # For Windows
11 | # protobridge are installed below directory
12 | /null/
13 |
14 | # For rollback-cassandra
15 | rollback-cassandra/.toDelete
16 | .toDelete
17 |
--------------------------------------------------------------------------------
/.idea/codeStyles/Project.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
--------------------------------------------------------------------------------
/.idea/codeStyles/codeStyleConfig.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
--------------------------------------------------------------------------------
/.jvmopts:
--------------------------------------------------------------------------------
1 | # scalafix recommends reserving sufficient resources
2 | # see: https://scalacenter.github.io/scalafix/docs/users/installation.html#sbt
3 | -Xss8m
4 | -Xms1G
5 | -Xmx8G
6 |
--------------------------------------------------------------------------------
/.scalafmt.conf:
--------------------------------------------------------------------------------
1 | // Scaclafmt の整形設定
2 | // https://scalameta.org/scalafmt/docs/configuration.html
3 | version = 2.6.4
4 | project.git = true
5 |
6 | project.excludeFilters = [
7 | ".*/Tables.scala"
8 | ]
9 |
10 | maxColumn = 120
11 | align = more
12 | trailingCommas = always
13 | danglingParentheses = true
14 | indentOperator = spray
15 | includeCurlyBraceInSelectChains = true
16 | rewrite.rules = [RedundantParens, SortImports, PreferCurlyFors]
17 | spaces.inImportCurlyBraces = true
18 | binPack.literalArgumentLists = false
19 | unindentTopLevelOperators = true
20 | style = defaultWithAlign
21 | optIn.breaksInsideChains = true
22 |
--------------------------------------------------------------------------------
/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 | # Contributing
2 |
3 | ## Requirements
4 | - [Java 8](https://www.oracle.com/java/technologies/javase/javase-jdk8-downloads.html)
5 | - [SBT](https://www.scala-sbt.org/index.html)
6 |
7 | ## Workflow
8 | 1. Create your fork of this repository
9 | 2. Create a local branch based on `master`
10 | 3. Work in the branch
11 | 4. Push the branch into your repository
12 | 5. Create a Pull Request to the `master` branch of this repository
13 |
14 | ## Code Style
15 | We use [Scalafmt](https://scalameta.org/scalafmt/) to format the source code.
16 | We recommend you set up your editor as documented [here](https://scalameta.org/scalafmt/docs/installation.html).
17 |
18 | ## Run Tests
19 | ```shell
20 | sbt test
21 | ```
22 |
23 | ## Run Integration Tests
24 | ```shell
25 | sbt multi-jvm:test
26 | ```
27 |
28 | If we want to dig in failed test cases in integration tests,
29 | we can look at integration test reports `**/target/multi-jvm-test-reports/*.xml` using [xunit-viewer](https://www.npmjs.com/package/xunit-viewer).
30 |
31 | Tips: Integration tests is not stable for now.
32 |
33 | ## Take test coverage
34 | ```shell
35 | sbt testCoverage
36 | ```
37 |
38 | A test coverage is generated in the directory `target/scala-2.13/scoverage-report`.
39 |
40 | ## Build Scaladoc
41 | ```shell
42 | sbt unidoc
43 | ```
44 |
45 | Scaladoc is generated in the directory `target/scala-2.13/unidoc`.
46 |
47 | ## Preview GitHub Pages
48 |
49 | To preview generated GitHub Pages, run the below command.
50 | ```shell
51 | sbt previewSite
52 | ```
53 |
--------------------------------------------------------------------------------
/RELEASING.md:
--------------------------------------------------------------------------------
1 | # Releasing
2 |
3 | This document describes how to release a new version `X.Y.Z` for maintainers.
4 | It would be required to replace `X.Y.Z` with the actual release version.
5 |
6 | ## 1. Create a new branch
7 |
8 | Create a new branch `release/vX.Y.Z` from `master` branch like the following:
9 | ```shell
10 | git checkout master
11 | git pull origin
12 | git checkout -b release/vX.Y.Z
13 | ```
14 |
15 | ## 2. Update `CHANGELOG.md`
16 |
17 | 1. Add a section of the new release version `vX.Y.Z`
18 | We recommend you add one of the following links to this section.
19 | * `https://github.com/lerna-stack/akka-entity-replication/compare/vA.B.C...vX.Y.Z` if this release is a successor.
20 | * `A.B.C` is the previous latest version.
21 | * `https://github.com/lerna-stack/akka-entity-replication/tree/vX.Y.Z` if this release is the first one.
22 | 2. Update the unreleased version link to `https://github.com/lerna-stack/akka-entity-replication/compare/vX.Y.Z...master`
23 |
24 | ## 3. Commit & Push
25 |
26 | Commit changes, and then push the branch like the following:
27 | ```shell
28 | git commit --message 'release vX.Y.Z'
29 | git push origin
30 | ```
31 |
32 | ## 4. Create a Pull Request
33 |
34 | After committing and pushing all changes, create a pull request.
35 | Other maintainers will review and merge the pull request.
36 |
37 | ## 5. Push a new version tag `vX.Y.Z`
38 |
39 | *It is highly recommended ensuring that the new version tag is correct.*
40 | *The CI will automatically publish this release when detecting the version tag.*
41 |
42 | Create and push the new version tag `vX.Y.Z` like the following:
43 | ```shell
44 | git checkout master
45 | git pull origin
46 | git tag vX.Y.Z
47 | git push origin vX.Y.Z
48 | ```
49 |
50 | ## 6. Check the release is available
51 |
52 | Check the release is available at [Maven Central Repository](https://repo1.maven.org/maven2/com/lerna-stack/).
53 |
54 | **NOTE**
55 | - The release will be available about 10 minutes after publishing.
56 | - It requires more time to be able to find the release with searching (about 2 hours max).
57 |
58 | ## 7. Publish GitHub Pages
59 |
60 | Publish GitHub Pages using `sbt-site` and `sbt-ghpages`.
61 | To publish the pages, checkout the new version tag `vX.Y.Z` and then run the below command.
62 | Note that you should have proper permission to publish.
63 | ```shell
64 | sbt ghpagesPushSite
65 | ```
66 |
67 | ## 8. Create a new release `vX.Y.Z`
68 |
69 | Create a new release `vX.Y.Z` from [this link](https://github.com/lerna-stack/akka-entity-replication/releases/new).
70 |
71 | - **Choose a tag**: select the new version tag
72 | - **Release title**: the same as the tag
73 | - **Describe this release**:
74 | Write the following text, at least.
75 | Replace the part `#vXYZ---YYYY-MM-DD` of the link with the actual release version and date.
76 | ```markdown
77 | See [CHANGELOG] for details.
78 |
79 | [CHANGELOG]: https://github.com/lerna-stack/akka-entity-replication/blob/master/CHANGELOG.md#vXYZ---YYYY-MM-DD
80 | ```
81 |
--------------------------------------------------------------------------------
/core/src/main/mima-filters/1.0.0.backwards.excludes/pr-47-make-internal-api-private.excludes:
--------------------------------------------------------------------------------
1 | ProblemFilters.exclude[DirectMissingMethodProblem]("lerna.akka.entityreplication.ClusterReplication.actorNamePrefix")
2 | ProblemFilters.exclude[DirectMissingMethodProblem]("lerna.akka.entityreplication.ReplicationRegion.props")
3 | ProblemFilters.exclude[DirectMissingMethodProblem]("lerna.akka.entityreplication.ClusterReplication.actorNamePrefix")
4 | ProblemFilters.exclude[DirectMissingMethodProblem]("lerna.akka.entityreplication.raft.RaftSettings.randomized")
5 | ProblemFilters.exclude[FinalClassProblem]("lerna.akka.entityreplication.raft.model.MatchIndex")
6 | ProblemFilters.exclude[FinalClassProblem]("lerna.akka.entityreplication.raft.model.Term")
7 | ProblemFilters.exclude[FinalClassProblem]("lerna.akka.entityreplication.raft.model.NextIndex")
8 | ProblemFilters.exclude[FinalClassProblem]("lerna.akka.entityreplication.raft.model.LogEntryIndex")
9 | ProblemFilters.exclude[FinalClassProblem]("lerna.akka.entityreplication.raft.model.ClientContext")
10 | ProblemFilters.exclude[FinalClassProblem]("lerna.akka.entityreplication.raft.model.ReplicatedLog")
11 | ProblemFilters.exclude[FinalClassProblem]("lerna.akka.entityreplication.raft.model.RaftMember")
12 | ProblemFilters.exclude[FinalClassProblem]("lerna.akka.entityreplication.raft.model.SnapshottingProgress")
13 | ProblemFilters.exclude[FinalClassProblem]("lerna.akka.entityreplication.raft.protocol.RaftCommands$AppendEntries")
14 | ProblemFilters.exclude[FinalClassProblem]("lerna.akka.entityreplication.raft.protocol.RaftCommands$RequestVote")
15 | ProblemFilters.exclude[FinalClassProblem]("lerna.akka.entityreplication.raft.protocol.RaftCommands$AppendEntriesFailed")
16 | ProblemFilters.exclude[FinalClassProblem]("lerna.akka.entityreplication.raft.protocol.RaftCommands$RequestVoteDenied")
17 | ProblemFilters.exclude[FinalClassProblem]("lerna.akka.entityreplication.raft.protocol.RaftCommands$AppendEntriesSucceeded")
18 | ProblemFilters.exclude[FinalClassProblem]("lerna.akka.entityreplication.raft.protocol.RaftCommands$RequestVoteAccepted")
19 |
--------------------------------------------------------------------------------
/core/src/main/mima-filters/1.0.0.backwards.excludes/pr-62-arrange-internal-raft-protocol.excludes:
--------------------------------------------------------------------------------
1 | # Following classes and objects will be package private in next release
2 | #
3 | # object lerna.akka.entityreplication.ReplicationActor#EntityRecoveryTimeoutException does not have a correspondent in current version
4 | ProblemFilters.exclude[MissingClassProblem]("lerna.akka.entityreplication.ReplicationActor$EntityRecoveryTimeoutException$")
5 | # class lerna.akka.entityreplication.ReplicationActor#Snapshot does not have a correspondent in current version
6 | ProblemFilters.exclude[MissingClassProblem]("lerna.akka.entityreplication.ReplicationActor$Snapshot")
7 | # object lerna.akka.entityreplication.ReplicationActor#TakeSnapshot does not have a correspondent in current version
8 | ProblemFilters.exclude[MissingClassProblem]("lerna.akka.entityreplication.ReplicationActor$TakeSnapshot$")
9 | # class lerna.akka.entityreplication.ReplicationActor#EntityRecoveryTimeoutException does not have a correspondent in current version
10 | ProblemFilters.exclude[MissingClassProblem]("lerna.akka.entityreplication.ReplicationActor$EntityRecoveryTimeoutException")
11 | # object lerna.akka.entityreplication.ReplicationActor#RecoveryTimeout does not have a correspondent in current version
12 | ProblemFilters.exclude[MissingClassProblem]("lerna.akka.entityreplication.ReplicationActor$RecoveryTimeout$")
13 | # object lerna.akka.entityreplication.ReplicationActor#Snapshot does not have a correspondent in current version
14 | ProblemFilters.exclude[MissingClassProblem]("lerna.akka.entityreplication.ReplicationActor$Snapshot$")
15 | # class lerna.akka.entityreplication.ReplicationActor#TakeSnapshot does not have a correspondent in current version
16 | ProblemFilters.exclude[MissingClassProblem]("lerna.akka.entityreplication.ReplicationActor$TakeSnapshot")
17 | # class lerna.akka.entityreplication.raft.RaftProtocol#ReplicationSucceeded is declared final in current version
18 | ProblemFilters.exclude[FinalClassProblem]("lerna.akka.entityreplication.raft.RaftProtocol$ReplicationSucceeded")
19 | # class lerna.akka.entityreplication.raft.RaftProtocol#Command is declared final in current version
20 | ProblemFilters.exclude[FinalClassProblem]("lerna.akka.entityreplication.raft.RaftProtocol$Command")
21 | # class lerna.akka.entityreplication.raft.RaftProtocol#Replicate is declared final in current version
22 | ProblemFilters.exclude[FinalClassProblem]("lerna.akka.entityreplication.raft.RaftProtocol$Replicate")
23 | # class lerna.akka.entityreplication.raft.RaftProtocol#ForwardedCommand is declared final in current version
24 | ProblemFilters.exclude[FinalClassProblem]("lerna.akka.entityreplication.raft.RaftProtocol$ForwardedCommand")
25 | # class lerna.akka.entityreplication.raft.RaftProtocol#Replica is declared final in current version
26 | ProblemFilters.exclude[FinalClassProblem]("lerna.akka.entityreplication.raft.RaftProtocol$Replica")
27 |
--------------------------------------------------------------------------------
/core/src/main/mima-filters/1.0.0.backwards.excludes/pr-66-implement-typed-cluster-replication.excludes:
--------------------------------------------------------------------------------
1 | # [ClusterReplication(system): ClusterReplication] can be used as before.
2 | #
3 | # static method apply(akka.actor.ActorSystem)lerna.akka.entityreplication.ClusterReplication in class lerna.akka.entityreplication.ClusterReplication has a different result type in current version, where it is akka.actor.Extension rather than lerna.akka.entityreplication.ClusterReplication
4 | ProblemFilters.exclude[IncompatibleResultTypeProblem]("lerna.akka.entityreplication.ClusterReplication.apply")
5 | # method this(akka.actor.ActorSystem)Unit in class lerna.akka.entityreplication.ClusterReplication's type is different in current version, where it is (akka.actor.ExtendedActorSystem)Unit instead of (akka.actor.ActorSystem)Unit
6 | ProblemFilters.exclude[IncompatibleMethTypeProblem]("lerna.akka.entityreplication.ClusterReplication.this")
7 | # method apply(akka.actor.ActorSystem)lerna.akka.entityreplication.ClusterReplication in object lerna.akka.entityreplication.ClusterReplication has a different result type in current version, where it is akka.actor.Extension rather than lerna.akka.entityreplication.ClusterReplication
8 | ProblemFilters.exclude[IncompatibleResultTypeProblem]("lerna.akka.entityreplication.ClusterReplication.apply")
9 |
--------------------------------------------------------------------------------
/core/src/main/mima-filters/1.0.0.backwards.excludes/pr-88-add-apis-for-persisting-to-multiple-data-stores.excludes:
--------------------------------------------------------------------------------
1 | # ClusterReplicationSettings constructor doesn't expose to users.
2 | # Also, they can still create the instance by [ClusterReplicationSettings(system)].
3 | #
4 | # declaration of class lerna.akka.entityreplication.ClusterReplicationSettings is interface lerna.akka.entityreplication.ClusterReplicationSettings in current version; changing class to interface breaks client code
5 | ProblemFilters.exclude[IncompatibleTemplateDefProblem]("lerna.akka.entityreplication.ClusterReplicationSettings")
6 |
--------------------------------------------------------------------------------
/core/src/main/mima-filters/1.0.0.backwards.excludes/pr-92-version-up-scalapb.excludes:
--------------------------------------------------------------------------------
1 | # Automatically generated code
2 | # It is not supposed to be called by the library user.
3 | # Serialization compatibility is guaranteed by Protocol Buffers.
4 | ProblemFilters.exclude[Problem]("lerna.akka.entityreplication.protobuf.msg.*")
5 |
--------------------------------------------------------------------------------
/core/src/main/mima-filters/2.0.0.backwards.excludes/pr-114-efficient-recovery-of-commit-log-store.excludes:
--------------------------------------------------------------------------------
1 | # ClusterReplicationSettings should not be extended by users
2 | ProblemFilters.exclude[ReversedMissingMethodProblem]("lerna.akka.entityreplication.ClusterReplicationSettings.withEventSourcedSnapshotStorePluginId")
3 | ProblemFilters.exclude[ReversedMissingMethodProblem]("lerna.akka.entityreplication.typed.ClusterReplicationSettings.withEventSourcedSnapshotStorePluginId")
--------------------------------------------------------------------------------
/core/src/main/mima-filters/2.0.0.backwards.excludes/pr-119-raft-actor-auto-start.excludes:
--------------------------------------------------------------------------------
1 | # It is safe to exclude the following since ClusterReplicationGuardian is package-private.
2 | ProblemFilters.exclude[DirectMissingMethodProblem]("lerna.akka.entityreplication.ClusterReplicationGuardian#Start.copy")
3 | ProblemFilters.exclude[DirectMissingMethodProblem]("lerna.akka.entityreplication.ClusterReplicationGuardian#Start.this")
4 | ProblemFilters.exclude[MissingTypesProblem]("lerna.akka.entityreplication.ClusterReplicationGuardian$Start$")
5 | ProblemFilters.exclude[DirectMissingMethodProblem]("lerna.akka.entityreplication.ClusterReplicationGuardian#Start.apply")
6 | ProblemFilters.exclude[IncompatibleSignatureProblem]("lerna.akka.entityreplication.ClusterReplicationGuardian#Start.unapply")
--------------------------------------------------------------------------------
/core/src/main/mima-filters/2.0.0.backwards.excludes/pr-128-fix-inconsistency-after-installsnapshot.excludes:
--------------------------------------------------------------------------------
1 | # It is safe to exclude the following since CompactionCompletedTag (that is renamed to EntitySnapshotsUpdatedTag) is package-private.
2 | ProblemFilters.exclude[MissingClassProblem]("lerna.akka.entityreplication.raft.persistence.CompactionCompletedTag")
3 | ProblemFilters.exclude[MissingClassProblem]("lerna.akka.entityreplication.raft.persistence.CompactionCompletedTag$")
4 | # It is safe to exclude the following since SnapshotSyncManager is package-private.
5 | ProblemFilters.exclude[MissingClassProblem]("lerna.akka.entityreplication.raft.snapshot.sync.SnapshotSyncManager$CompactionEnvelope")
6 | ProblemFilters.exclude[MissingClassProblem]("lerna.akka.entityreplication.raft.snapshot.sync.SnapshotSyncManager$CompactionEnvelope$")
7 | ProblemFilters.exclude[MissingClassProblem]("lerna.akka.entityreplication.raft.snapshot.sync.SnapshotSyncManager$SyncCompleteAll")
8 | ProblemFilters.exclude[MissingClassProblem]("lerna.akka.entityreplication.raft.snapshot.sync.SnapshotSyncManager$SyncCompleteAll$")
9 |
--------------------------------------------------------------------------------
/core/src/main/mima-filters/2.0.0.backwards.excludes/pr-134-fix-lost-committed-events-by-moving-leader.excludes:
--------------------------------------------------------------------------------
1 | # It is safe to exclude the following since the case classes used in protobuf is not an API intend to use by users
2 | ProblemFilters.exclude[IncompatibleResultTypeProblem]("lerna.akka.entityreplication.protobuf.msg.SnapshotStatus.$default$3")
3 | ProblemFilters.exclude[IncompatibleSignatureProblem]("lerna.akka.entityreplication.protobuf.msg.SnapshotStatus.unapply")
4 | ProblemFilters.exclude[IncompatibleResultTypeProblem]("lerna.akka.entityreplication.protobuf.msg.SnapshotStatus.apply$default$3")
5 | ProblemFilters.exclude[DirectMissingMethodProblem]("lerna.akka.entityreplication.protobuf.msg.SnapshotStatus.apply")
6 | ProblemFilters.exclude[DirectMissingMethodProblem]("lerna.akka.entityreplication.protobuf.msg.SnapshotStatus.of")
7 | ProblemFilters.exclude[DirectMissingMethodProblem]("lerna.akka.entityreplication.protobuf.msg.SnapshotStatus.copy")
8 | ProblemFilters.exclude[IncompatibleResultTypeProblem]("lerna.akka.entityreplication.protobuf.msg.SnapshotStatus.copy$default$3")
9 | ProblemFilters.exclude[DirectMissingMethodProblem]("lerna.akka.entityreplication.protobuf.msg.SnapshotStatus.this")
10 | ProblemFilters.exclude[IncompatibleResultTypeProblem]("lerna.akka.entityreplication.protobuf.msg.SnapshotStatus.$default$3")
11 | ProblemFilters.exclude[DirectMissingMethodProblem]("lerna.akka.entityreplication.protobuf.msg.SnapshotStatus.of")
12 | ProblemFilters.exclude[DirectMissingMethodProblem]("lerna.akka.entityreplication.protobuf.msg.SnapshotStatus.apply")
13 | ProblemFilters.exclude[IncompatibleResultTypeProblem]("lerna.akka.entityreplication.protobuf.msg.SnapshotStatus.apply$default$3")
14 | ProblemFilters.exclude[IncompatibleSignatureProblem]("lerna.akka.entityreplication.protobuf.msg.SnapshotStatus.unapply")
15 |
--------------------------------------------------------------------------------
/core/src/main/mima-filters/2.0.0.backwards.excludes/pr-137-event-sourcing-progress-tracking.excludes:
--------------------------------------------------------------------------------
1 | # It is safe to exclude the following since these classes are package private.
2 | ProblemFilters.exclude[MissingClassProblem]("lerna.akka.entityreplication.raft.eventsourced.CommitLogStore")
3 | ProblemFilters.exclude[MissingClassProblem]("lerna.akka.entityreplication.raft.eventsourced.ShardedCommitLogStore")
4 |
--------------------------------------------------------------------------------
/core/src/main/mima-filters/2.0.0.backwards.excludes/pr-155-improve-entity-recovery-process.excludes:
--------------------------------------------------------------------------------
1 | # These command classes are enclosed with the package private object of [entityreplication]
2 | # related discussion: https://github.com/lightbend/mima/issues/53
3 | ProblemFilters.exclude[MissingClassProblem]("lerna.akka.entityreplication.raft.RaftProtocol$RequestRecovery")
4 | ProblemFilters.exclude[MissingClassProblem]("lerna.akka.entityreplication.raft.RaftProtocol$RequestRecovery$")
5 |
--------------------------------------------------------------------------------
/core/src/main/mima-filters/2.1.0.backwards.excludes/pr-151-fix-append-entries-handling.excludes:
--------------------------------------------------------------------------------
1 | # It's OK to exclude the following since protobuf.msg.AppendedEntries is not intend to use by users.
2 | ProblemFilters.exclude[DirectMissingMethodProblem]("lerna.akka.entityreplication.protobuf.msg.AppendedEntries.$default$4")
3 | ProblemFilters.exclude[IncompatibleSignatureProblem]("lerna.akka.entityreplication.protobuf.msg.AppendedEntries.unapply")
4 | ProblemFilters.exclude[DirectMissingMethodProblem]("lerna.akka.entityreplication.protobuf.msg.AppendedEntries.apply$default$4")
5 | ProblemFilters.exclude[DirectMissingMethodProblem]("lerna.akka.entityreplication.protobuf.msg.AppendedEntries.apply")
6 | ProblemFilters.exclude[DirectMissingMethodProblem]("lerna.akka.entityreplication.protobuf.msg.AppendedEntries.of")
7 | ProblemFilters.exclude[DirectMissingMethodProblem]("lerna.akka.entityreplication.protobuf.msg.AppendedEntries.PREV_LOG_INDEX_FIELD_NUMBER")
8 | ProblemFilters.exclude[DirectMissingMethodProblem]("lerna.akka.entityreplication.protobuf.msg.AppendedEntries.prevLogIndex")
9 | ProblemFilters.exclude[DirectMissingMethodProblem]("lerna.akka.entityreplication.protobuf.msg.AppendedEntries.withPrevLogIndex")
10 | ProblemFilters.exclude[DirectMissingMethodProblem]("lerna.akka.entityreplication.protobuf.msg.AppendedEntries.copy")
11 | ProblemFilters.exclude[IncompatibleResultTypeProblem]("lerna.akka.entityreplication.protobuf.msg.AppendedEntries.copy$default$3")
12 | ProblemFilters.exclude[DirectMissingMethodProblem]("lerna.akka.entityreplication.protobuf.msg.AppendedEntries.copy$default$4")
13 | ProblemFilters.exclude[DirectMissingMethodProblem]("lerna.akka.entityreplication.protobuf.msg.AppendedEntries.this")
14 |
15 | # It's safe to exclude the following since RaftActor#AppendedEntries is package-private.
16 | ProblemFilters.exclude[DirectMissingMethodProblem]("lerna.akka.entityreplication.raft.RaftActor#AppendedEntries.prevLogIndex")
17 | ProblemFilters.exclude[DirectMissingMethodProblem]("lerna.akka.entityreplication.raft.RaftActor#AppendedEntries.copy")
18 | ProblemFilters.exclude[DirectMissingMethodProblem]("lerna.akka.entityreplication.raft.RaftActor#AppendedEntries.copy$default$3")
19 | ProblemFilters.exclude[DirectMissingMethodProblem]("lerna.akka.entityreplication.raft.RaftActor#AppendedEntries.this")
20 | ProblemFilters.exclude[MissingTypesProblem]("lerna.akka.entityreplication.raft.RaftActor$AppendedEntries$")
21 | ProblemFilters.exclude[DirectMissingMethodProblem]("lerna.akka.entityreplication.raft.RaftActor#AppendedEntries.apply")
22 | ProblemFilters.exclude[IncompatibleSignatureProblem]("lerna.akka.entityreplication.raft.RaftActor#AppendedEntries.unapply")
--------------------------------------------------------------------------------
/core/src/main/mima-filters/2.1.0.backwards.excludes/pr-159-refactor-replicate-message.excludes:
--------------------------------------------------------------------------------
1 | # It's safe to exclude the following since RaftProtocol is package-private.
2 | ProblemFilters.exclude[IncompatibleTemplateDefProblem]("lerna.akka.entityreplication.raft.RaftProtocol$Replicate")
3 | ProblemFilters.exclude[MissingTypesProblem]("lerna.akka.entityreplication.raft.RaftProtocol$Replicate$")
4 | ProblemFilters.exclude[DirectMissingMethodProblem]("lerna.akka.entityreplication.raft.RaftProtocol#Replicate.apply")
5 |
--------------------------------------------------------------------------------
/core/src/main/mima-filters/2.1.0.backwards.excludes/pr-161-replication-failure-based-on-entitys-applied-index.excludes:
--------------------------------------------------------------------------------
1 | # It's safe to exclude the following since RaftProtocol is package-private.
2 | ProblemFilters.exclude[DirectMissingMethodProblem]("lerna.akka.entityreplication.raft.RaftProtocol#Replicate.unapply")
3 |
--------------------------------------------------------------------------------
/core/src/main/mima-filters/2.1.0.backwards.excludes/pr-172-impl-shard-id-extractor.excludes:
--------------------------------------------------------------------------------
1 | # New function is added in lerna.akka.entityreplication.typed.ClusterReplication.
2 | ProblemFilters.exclude[ReversedMissingMethodProblem]("lerna.akka.entityreplication.typed.ClusterReplication.shardIdOf")
3 |
--------------------------------------------------------------------------------
/core/src/main/mima-filters/2.1.0.backwards.excludes/pr-186-allow-only-specific-actors-to-become-leader.excludes:
--------------------------------------------------------------------------------
1 | # ClusterReplicationSettings should not be extended by users
2 | ProblemFilters.exclude[ReversedMissingMethodProblem]("lerna.akka.entityreplication.ClusterReplicationSettings.withStickyLeaders")
--------------------------------------------------------------------------------
/core/src/main/mima-filters/2.1.0.backwards.excludes/pr-188-set-disabled-shards-only-api.excludes:
--------------------------------------------------------------------------------
1 | # ClusterReplicationSettings should not be extended by users
2 | ProblemFilters.exclude[ReversedMissingMethodProblem]("lerna.akka.entityreplication.ClusterReplicationSettings.withDisabledShards")
3 |
--------------------------------------------------------------------------------
/core/src/main/mima-filters/2.1.0.backwards.excludes/pr-189-typed-cluster-replication-settings-supports-new-methods.excludes:
--------------------------------------------------------------------------------
1 | # typed.ClusterReplicationSettings should not be extended by users
2 | ProblemFilters.exclude[ReversedMissingMethodProblem]("lerna.akka.entityreplication.typed.ClusterReplicationSettings.withDisabledShards")
3 | ProblemFilters.exclude[ReversedMissingMethodProblem]("lerna.akka.entityreplication.typed.ClusterReplicationSettings.withStickyLeaders")
4 |
--------------------------------------------------------------------------------
/core/src/main/mima-filters/2.3.0.backwards.excludes/pr-217-fix-old-snapshot-read-in-consecutive-snapshot-synchronization.excludes:
--------------------------------------------------------------------------------
1 | # It's safe to exclude the following since Stop is a private class.
2 | ProblemFilters.exclude[MissingClassProblem]("lerna.akka.entityreplication.raft.snapshot.sync.SnapshotSyncManager$Stop$")
3 |
--------------------------------------------------------------------------------
/core/src/main/resources/example.conf:
--------------------------------------------------------------------------------
1 | akka {
2 |
3 | actor {
4 | provider = "cluster"
5 |
6 | allow-java-serialization = off
7 | }
8 |
9 | remote {
10 | artery {
11 | transport = tcp
12 | canonical {
13 | hostname = "localhost"
14 | port = 25521
15 | }
16 | }
17 | }
18 |
19 | cluster {
20 | seed-nodes = [
21 | "akka://ClusterSystem@localhost:25521"
22 | ]
23 | roles = ["request", "replica-group-1"] // "replica-group-1" was selected from lerna.akka.entityreplication.raft.multi-raft-roles
24 | sharding {
25 | role = "request"
26 | }
27 |
28 | auto-down-unreachable-after = off
29 | }
30 |
31 | loggers = ["akka.event.slf4j.Slf4jLogger"]
32 | loglevel = "DEBUG"
33 | logging-filter = "akka.event.slf4j.Slf4jLoggingFilter"
34 | }
35 |
--------------------------------------------------------------------------------
/core/src/main/scala/akka/lerna/DeferredBehavior.scala:
--------------------------------------------------------------------------------
1 | package akka.lerna
2 |
3 | /**
4 | * Expose internal API of Akka to use it in [[lerna]] package.
5 | */
6 | abstract class DeferredBehavior[Command] extends akka.actor.typed.internal.BehaviorImpl.DeferredBehavior[Command]
7 |
--------------------------------------------------------------------------------
/core/src/main/scala/akka/lerna/InternalActorRefProxy.scala:
--------------------------------------------------------------------------------
1 | package akka.lerna
2 |
3 | import akka.actor.ActorRefProvider
4 | import akka.actor.typed.ActorRef
5 | import akka.actor.typed.internal.adapter.ActorRefAdapter
6 |
7 | object InternalActorRefProxy {
8 | def apply[T](ref: ActorRef[T]): InternalActorRefProxy[T] =
9 | new InternalActorRefProxy[T](ref)
10 | }
11 |
12 | class InternalActorRefProxy[T](ref: ActorRef[T]) {
13 |
14 | private[this] val classicRef = ActorRefAdapter.toClassic(ref)
15 |
16 | def provider: ActorRefProvider = classicRef.provider
17 |
18 | def isTerminated: Boolean = classicRef.isTerminated
19 | }
20 |
--------------------------------------------------------------------------------
/core/src/main/scala/akka/lerna/InternalRecipientRef.scala:
--------------------------------------------------------------------------------
1 | package akka.lerna
2 |
3 | /**
4 | * Expose internal API of Akka to use it in [[lerna]] package.
5 | */
6 | trait InternalRecipientRef[-T] extends akka.actor.typed.internal.InternalRecipientRef[T]
7 |
--------------------------------------------------------------------------------
/core/src/main/scala/akka/lerna/StashFactory.scala:
--------------------------------------------------------------------------------
1 | package akka.lerna
2 |
3 | import akka.actor.{ ActorContext, ActorRef, StashSupport }
4 |
5 | /** Proxy for using `def createStash` from [[lerna]] package
6 | */
7 | trait StashFactory extends akka.actor.StashFactory { this: akka.actor.Actor =>
8 | override final protected[akka] def createStash()(implicit ctx: ActorContext, ref: ActorRef): StashSupport =
9 | super.createStash()(ctx, ref)
10 | }
11 |
--------------------------------------------------------------------------------
/core/src/main/scala/lerna/akka/entityreplication/ClusterReplicationSerializable.scala:
--------------------------------------------------------------------------------
1 | package lerna.akka.entityreplication
2 | private[entityreplication] trait ClusterReplicationSerializable extends Serializable
3 |
--------------------------------------------------------------------------------
/core/src/main/scala/lerna/akka/entityreplication/ClusterReplicationSettings.scala:
--------------------------------------------------------------------------------
1 | package lerna.akka.entityreplication
2 |
3 | import akka.actor.ActorSystem
4 | import akka.cluster.Cluster
5 | import com.typesafe.config.Config
6 | import lerna.akka.entityreplication.internal.ClusterReplicationSettingsImpl
7 | import lerna.akka.entityreplication.raft.RaftSettings
8 | import lerna.akka.entityreplication.raft.routing.MemberIndex
9 |
10 | import scala.concurrent.duration.FiniteDuration
11 |
12 | object ClusterReplicationSettings {
13 |
14 | @deprecated("Use typed.ClusterReplicationSettings instead", since = "2.0.0")
15 | def apply(system: ActorSystem): ClusterReplicationSettings = create(system)
16 |
17 | // for internal use
18 | private[entityreplication] def create(system: ActorSystem): ClusterReplicationSettings = {
19 | val cluster = Cluster(system)
20 | ClusterReplicationSettingsImpl(system.settings.config, cluster.settings.Roles)
21 | }
22 | }
23 |
24 | trait ClusterReplicationSettings {
25 |
26 | /*
27 | * NOTE:
28 | * When you changed this API,
29 | * make sure that we don't have to also change [lerna.akka.entityreplication.typed.ClusterReplicationSettings].
30 | */
31 |
32 | def config: Config
33 |
34 | def recoveryEntityTimeout: FiniteDuration
35 |
36 | def raftSettings: RaftSettings
37 |
38 | def allMemberIndexes: Set[MemberIndex]
39 |
40 | def selfMemberIndex: MemberIndex
41 |
42 | def withDisabledShards(disabledShards: Set[String]): ClusterReplicationSettings
43 |
44 | def withStickyLeaders(stickyLeaders: Map[String, String]): ClusterReplicationSettings
45 |
46 | def withRaftJournalPluginId(pluginId: String): ClusterReplicationSettings
47 |
48 | def withRaftSnapshotPluginId(pluginId: String): ClusterReplicationSettings
49 |
50 | def withRaftQueryPluginId(pluginId: String): ClusterReplicationSettings
51 |
52 | def withEventSourcedJournalPluginId(pluginId: String): ClusterReplicationSettings
53 |
54 | def withEventSourcedSnapshotStorePluginId(pluginId: String): ClusterReplicationSettings
55 |
56 | }
57 |
--------------------------------------------------------------------------------
/core/src/main/scala/lerna/akka/entityreplication/ReplicationActorContext.scala:
--------------------------------------------------------------------------------
1 | package lerna.akka.entityreplication
2 |
3 | import akka.actor.ActorRef
4 |
5 | private[entityreplication] class ReplicationActorContext(val entityId: String, val shard: ActorRef)
6 |
--------------------------------------------------------------------------------
/core/src/main/scala/lerna/akka/entityreplication/model/EntityInstanceId.scala:
--------------------------------------------------------------------------------
1 | package lerna.akka.entityreplication.model
2 |
3 | private[entityreplication] final case class EntityInstanceId(underlying: Int) extends AnyVal
4 |
--------------------------------------------------------------------------------
/core/src/main/scala/lerna/akka/entityreplication/model/NormalizedEntityId.scala:
--------------------------------------------------------------------------------
1 | package lerna.akka.entityreplication.model
2 |
3 | import akka.actor.ActorPath
4 | import akka.util.ByteString
5 | import lerna.akka.entityreplication.ReplicationRegion.EntityId
6 |
7 | import java.net.{ URLDecoder, URLEncoder }
8 |
9 | private[entityreplication] object NormalizedEntityId {
10 | def from(entityId: EntityId): NormalizedEntityId = new NormalizedEntityId(URLEncoder.encode(entityId, "utf-8"))
11 |
12 | def of(entityPath: ActorPath): NormalizedEntityId = new NormalizedEntityId(entityPath.name)
13 |
14 | private[entityreplication] def fromEncodedValue(encodedEntityId: EntityId): NormalizedEntityId =
15 | new NormalizedEntityId(encodedEntityId)
16 | }
17 |
18 | private[entityreplication] final case class NormalizedEntityId private (underlying: String) extends AnyVal {
19 |
20 | def raw: EntityId = URLDecoder.decode(underlying, ByteString.UTF_8)
21 | }
22 |
--------------------------------------------------------------------------------
/core/src/main/scala/lerna/akka/entityreplication/model/NormalizedShardId.scala:
--------------------------------------------------------------------------------
1 | package lerna.akka.entityreplication.model
2 |
3 | import java.net.{ URLDecoder, URLEncoder }
4 |
5 | import akka.actor.ActorPath
6 |
7 | private[entityreplication] object NormalizedShardId {
8 | def from(shardId: String): NormalizedShardId = new NormalizedShardId(URLEncoder.encode(shardId, "utf-8"))
9 |
10 | private[entityreplication] def from(path: ActorPath) = new NormalizedShardId(path.name)
11 |
12 | private[entityreplication] def fromEncodedValue(encodedShardId: String): NormalizedShardId =
13 | new NormalizedShardId(encodedShardId)
14 | }
15 |
16 | private[entityreplication] final case class NormalizedShardId private (underlying: String) extends AnyVal {
17 | def raw: String = URLDecoder.decode(underlying, "utf-8")
18 | }
19 |
--------------------------------------------------------------------------------
/core/src/main/scala/lerna/akka/entityreplication/model/TypeName.scala:
--------------------------------------------------------------------------------
1 | package lerna.akka.entityreplication.model
2 |
3 | import java.net.URLEncoder
4 |
5 | private[entityreplication] object TypeName {
6 | def from(typeName: String): TypeName = new TypeName(URLEncoder.encode(typeName, "utf-8"))
7 | }
8 |
9 | private[entityreplication] final class TypeName private (val underlying: String) extends AnyVal {
10 | override def toString: String = underlying
11 | }
12 |
--------------------------------------------------------------------------------
/core/src/main/scala/lerna/akka/entityreplication/protobuf/OffsetEnvelope.scala:
--------------------------------------------------------------------------------
1 | package lerna.akka.entityreplication.protobuf
2 |
3 | import akka.persistence.query.{ Sequence, TimeBasedUUID }
4 | import lerna.akka.entityreplication.ClusterReplicationSerializable
5 |
6 | private[protobuf] sealed trait OffsetEnvelope
7 | private[protobuf] object NoOffsetEnvelope extends OffsetEnvelope with ClusterReplicationSerializable
8 | private[protobuf] case class SequenceEnvelope(underlying: Sequence)
9 | extends OffsetEnvelope
10 | with ClusterReplicationSerializable
11 | private[protobuf] case class TimeBasedUUIDEnvelope(underlying: TimeBasedUUID)
12 | extends OffsetEnvelope
13 | with ClusterReplicationSerializable
14 |
--------------------------------------------------------------------------------
/core/src/main/scala/lerna/akka/entityreplication/raft/eventsourced/InternalEvent.scala:
--------------------------------------------------------------------------------
1 | package lerna.akka.entityreplication.raft.eventsourced
2 |
3 | import lerna.akka.entityreplication.ClusterReplicationSerializable
4 |
5 | /**
6 | * index を揃えるために InternalEvent も永続化必要
7 | */
8 | private[entityreplication] case object InternalEvent extends ClusterReplicationSerializable
9 |
--------------------------------------------------------------------------------
/core/src/main/scala/lerna/akka/entityreplication/raft/model/ClientContext.scala:
--------------------------------------------------------------------------------
1 | package lerna.akka.entityreplication.raft.model
2 |
3 | import akka.actor.ActorRef
4 | import lerna.akka.entityreplication.model.EntityInstanceId
5 |
6 | private[entityreplication] final case class ClientContext(
7 | ref: ActorRef,
8 | instanceId: Option[EntityInstanceId],
9 | originSender: Option[ActorRef],
10 | ) {
11 |
12 | /** Sends the given `message` to the actor `ref`, including the sender `originSender`
13 | *
14 | * If `originSender` is `None`, [[ActorRef.noSender]] is included as the sender.
15 | */
16 | def forward(message: Any): Unit = {
17 | ref.tell(message, originSender.getOrElse(ActorRef.noSender))
18 | }
19 |
20 | }
21 |
--------------------------------------------------------------------------------
/core/src/main/scala/lerna/akka/entityreplication/raft/model/EntityEvent.scala:
--------------------------------------------------------------------------------
1 | package lerna.akka.entityreplication.raft.model
2 |
3 | import lerna.akka.entityreplication.model.NormalizedEntityId
4 |
5 | private[entityreplication] final case class EntityEvent(entityId: Option[NormalizedEntityId], event: Any)
6 |
--------------------------------------------------------------------------------
/core/src/main/scala/lerna/akka/entityreplication/raft/model/LogEntry.scala:
--------------------------------------------------------------------------------
1 | package lerna.akka.entityreplication.raft.model
2 |
3 | private[entityreplication] object LogEntry {
4 |
5 | def apply(index: LogEntryIndex, event: EntityEvent, term: Term) =
6 | new LogEntry(index, event, term)
7 | }
8 |
9 | private[entityreplication] class LogEntry(val index: LogEntryIndex, val event: EntityEvent, val term: Term)
10 | extends Serializable {
11 | require(index > LogEntryIndex.initial())
12 |
13 | def canEqual(other: Any): Boolean = other.isInstanceOf[LogEntry]
14 |
15 | override def equals(other: Any): Boolean =
16 | other match {
17 | case that: LogEntry =>
18 | (that canEqual this) &&
19 | index == that.index &&
20 | term == that.term
21 | case _ => false
22 | }
23 |
24 | override def hashCode(): Int = {
25 | val state = Seq(index, term)
26 | state.map(_.hashCode()).foldLeft(0)((a, b) => 31 * a + b)
27 | }
28 |
29 | override def toString = s"LogEntry($index, $event, $term)"
30 | }
31 |
--------------------------------------------------------------------------------
/core/src/main/scala/lerna/akka/entityreplication/raft/model/LogEntryIndex.scala:
--------------------------------------------------------------------------------
1 | package lerna.akka.entityreplication.raft.model
2 |
3 | import lerna.akka.entityreplication.raft.model.exception.SeqIndexOutOfBoundsException
4 |
5 | private[entityreplication] object LogEntryIndex {
6 |
7 | def initial(): LogEntryIndex = LogEntryIndex(0)
8 |
9 | def min(a: LogEntryIndex, b: LogEntryIndex): LogEntryIndex = {
10 | if (a <= b) a else b
11 | }
12 | }
13 |
14 | private[entityreplication] final case class LogEntryIndex(private[entityreplication] val underlying: Long)
15 | extends Ordered[LogEntryIndex] {
16 | require(underlying >= 0)
17 |
18 | def next(): LogEntryIndex = copy(underlying + 1)
19 |
20 | def plus(count: Int): LogEntryIndex = copy(underlying + count)
21 |
22 | def prev(): LogEntryIndex =
23 | if (underlying > 0) copy(underlying - 1)
24 | else LogEntryIndex.initial()
25 |
26 | override def compare(that: LogEntryIndex): Int =
27 | underlying.compareTo(that.underlying)
28 |
29 | override def toString: String = underlying.toString
30 |
31 | def toSeqIndex(offset: LogEntryIndex): Int = {
32 | val maybeSeqIndex = underlying - offset.underlying - 1
33 | if (maybeSeqIndex > Int.MaxValue) {
34 | throw SeqIndexOutOfBoundsException(this, offset)
35 | } else {
36 | maybeSeqIndex.toInt
37 | }
38 | }
39 | }
40 |
--------------------------------------------------------------------------------
/core/src/main/scala/lerna/akka/entityreplication/raft/model/MatchIndex.scala:
--------------------------------------------------------------------------------
1 | package lerna.akka.entityreplication.raft.model
2 |
3 | import lerna.akka.entityreplication.raft.routing.MemberIndex
4 |
5 | private[entityreplication] final case class MatchIndex(indexes: Map[MemberIndex, LogEntryIndex] = Map()) {
6 |
7 | def update(follower: MemberIndex, index: LogEntryIndex): MatchIndex = {
8 | copy(indexes + (follower -> index))
9 | }
10 |
11 | def countMatch(predicate: LogEntryIndex => Boolean): Int = {
12 | indexes.values.count(predicate)
13 | }
14 | }
15 |
--------------------------------------------------------------------------------
/core/src/main/scala/lerna/akka/entityreplication/raft/model/NextIndex.scala:
--------------------------------------------------------------------------------
1 | package lerna.akka.entityreplication.raft.model
2 |
3 | import lerna.akka.entityreplication.raft.routing.MemberIndex
4 |
5 | private[entityreplication] final case class NextIndex(
6 | leaderLog: ReplicatedLog,
7 | indexes: Map[MemberIndex, LogEntryIndex] = Map(),
8 | ) {
9 |
10 | val initialLogIndex: LogEntryIndex = leaderLog.lastOption.map(_.index).getOrElse(LogEntryIndex.initial()).next()
11 |
12 | def apply(followerMemberIndex: MemberIndex): LogEntryIndex = {
13 | indexes.getOrElse(followerMemberIndex, initialLogIndex)
14 | }
15 |
16 | def update(followerMemberIndex: MemberIndex, index: LogEntryIndex): NextIndex = {
17 | copy(indexes = indexes + (followerMemberIndex -> index))
18 | }
19 | }
20 |
--------------------------------------------------------------------------------
/core/src/main/scala/lerna/akka/entityreplication/raft/model/NoOp.scala:
--------------------------------------------------------------------------------
1 | package lerna.akka.entityreplication.raft.model
2 |
3 | import lerna.akka.entityreplication.ClusterReplicationSerializable
4 |
5 | private[entityreplication] case object NoOp extends ClusterReplicationSerializable
6 |
--------------------------------------------------------------------------------
/core/src/main/scala/lerna/akka/entityreplication/raft/model/RaftMember.scala:
--------------------------------------------------------------------------------
1 | package lerna.akka.entityreplication.raft.model
2 |
3 | import akka.actor.{ ActorPath, ActorSelection }
4 |
5 | private[entityreplication] final case class RaftMember(path: ActorPath, selection: ActorSelection) {
6 |
7 | override def toString: String = s"RaftMember(${path.toString})"
8 | }
9 |
--------------------------------------------------------------------------------
/core/src/main/scala/lerna/akka/entityreplication/raft/model/SnapshotStatus.scala:
--------------------------------------------------------------------------------
1 | package lerna.akka.entityreplication.raft.model
2 |
3 | private[entityreplication] object SnapshotStatus {
4 | def empty: SnapshotStatus =
5 | SnapshotStatus(
6 | snapshotLastTerm = Term.initial(),
7 | snapshotLastLogIndex = LogEntryIndex.initial(),
8 | targetSnapshotLastTerm = Term.initial(),
9 | targetSnapshotLastLogIndex = LogEntryIndex.initial(),
10 | )
11 | }
12 |
13 | /**
14 | * Tracks status of snapshots.
15 | *
16 | * @param snapshotLastTerm Maximum [[Term]] in all persisted snapshots
17 | * @param snapshotLastLogIndex Maximum [[LogEntryIndex]] in all persisted snapshots
18 | * @param targetSnapshotLastTerm Maximum [[Term]] of snapshots that might be persisted
19 | * @param targetSnapshotLastLogIndex Maximum [[LogEntryIndex]] of snapshots that might be persisted
20 | */
21 | private[entityreplication] final case class SnapshotStatus(
22 | snapshotLastTerm: Term,
23 | snapshotLastLogIndex: LogEntryIndex,
24 | targetSnapshotLastTerm: Term,
25 | targetSnapshotLastLogIndex: LogEntryIndex,
26 | ) {
27 |
28 | require(
29 | snapshotLastTerm <= targetSnapshotLastTerm,
30 | Seq(
31 | s"snapshotLastTerm[$snapshotLastTerm] must not exceed targetSnapshotLastTerm[$targetSnapshotLastTerm]",
32 | s"(snapshotLastLogIndex[$snapshotLastLogIndex], targetSnapshotLastLogIndex[$targetSnapshotLastLogIndex])",
33 | ).mkString(" "),
34 | )
35 | require(
36 | snapshotLastLogIndex <= targetSnapshotLastLogIndex,
37 | Seq(
38 | s"snapshotLastLogIndex[$snapshotLastLogIndex] must not exceed targetSnapshotLastLogIndex[$targetSnapshotLastLogIndex]",
39 | s"(snapshotLastTerm[$snapshotLastTerm], targetSnapshotLastTerm[$targetSnapshotLastTerm])",
40 | ).mkString(" "),
41 | )
42 |
43 | def updateSnapshotsCompletely(snapshotLastTerm: Term, snapshotLastLogIndex: LogEntryIndex): SnapshotStatus =
44 | copy(
45 | snapshotLastTerm = snapshotLastTerm,
46 | snapshotLastLogIndex = snapshotLastLogIndex,
47 | targetSnapshotLastTerm = snapshotLastTerm,
48 | targetSnapshotLastLogIndex = snapshotLastLogIndex,
49 | )
50 |
51 | def startSnapshotSync(snapshotLastTerm: Term, snapshotLastLogIndex: LogEntryIndex): SnapshotStatus =
52 | copy(
53 | targetSnapshotLastTerm = snapshotLastTerm,
54 | targetSnapshotLastLogIndex = snapshotLastLogIndex,
55 | )
56 |
57 | /**
58 | * `true` means snapshot-synchronization has started but it has not completed yet.
59 | */
60 | def isDirty: Boolean =
61 | snapshotLastTerm != targetSnapshotLastTerm || snapshotLastLogIndex != targetSnapshotLastLogIndex
62 | }
63 |
--------------------------------------------------------------------------------
/core/src/main/scala/lerna/akka/entityreplication/raft/model/SnapshottingProgress.scala:
--------------------------------------------------------------------------------
1 | package lerna.akka.entityreplication.raft.model
2 |
3 | import lerna.akka.entityreplication.model.NormalizedEntityId
4 |
5 | private[entityreplication] object SnapshottingProgress {
6 | def empty: SnapshottingProgress =
7 | SnapshottingProgress(
8 | snapshotLastLogTerm = Term.initial(),
9 | snapshotLastLogIndex = LogEntryIndex.initial(),
10 | inProgressEntities = Set(),
11 | completedEntities = Set(),
12 | )
13 | }
14 |
15 | private[entityreplication] final case class SnapshottingProgress(
16 | snapshotLastLogTerm: Term,
17 | snapshotLastLogIndex: LogEntryIndex,
18 | inProgressEntities: Set[NormalizedEntityId],
19 | completedEntities: Set[NormalizedEntityId],
20 | ) {
21 |
22 | def isInProgress: Boolean = inProgressEntities.nonEmpty
23 |
24 | def isCompleted: Boolean = inProgressEntities.isEmpty
25 |
26 | def recordSnapshottingComplete(
27 | snapshotLastLogIndex: LogEntryIndex,
28 | entityId: NormalizedEntityId,
29 | ): SnapshottingProgress = {
30 | require(
31 | snapshotLastLogIndex == this.snapshotLastLogIndex,
32 | s"snapshotLastLogIndexes should be same (current: ${this.snapshotLastLogIndex}, got: ${snapshotLastLogIndex})",
33 | )
34 | copy(inProgressEntities = inProgressEntities - entityId, completedEntities = completedEntities + entityId)
35 | }
36 | }
37 |
--------------------------------------------------------------------------------
/core/src/main/scala/lerna/akka/entityreplication/raft/model/Term.scala:
--------------------------------------------------------------------------------
1 | package lerna.akka.entityreplication.raft.model
2 |
3 | private[entityreplication] object Term {
4 | def initial() = new Term(0)
5 | }
6 |
7 | private[entityreplication] final case class Term(term: Long) extends Ordered[Term] {
8 | def next(): Term = this.copy(term = term + 1)
9 | def isOlderThan(other: Term): Boolean = this.term < other.term
10 | def isNewerThan(other: Term): Boolean = this.term > other.term
11 |
12 | override def compare(that: Term): Int =
13 | term.compareTo(that.term)
14 | }
15 |
--------------------------------------------------------------------------------
/core/src/main/scala/lerna/akka/entityreplication/raft/model/exception/SeqIndexOutOfBoundsException.scala:
--------------------------------------------------------------------------------
1 | package lerna.akka.entityreplication.raft.model.exception
2 |
3 | import lerna.akka.entityreplication.raft.model.LogEntryIndex
4 |
5 | private[entityreplication] final case class SeqIndexOutOfBoundsException(self: LogEntryIndex, offset: LogEntryIndex)
6 | extends RuntimeException(s"The Seq index of $self from $offset is out of bounds")
7 |
--------------------------------------------------------------------------------
/core/src/main/scala/lerna/akka/entityreplication/raft/persistence/EntitySnapshotsUpdatedTag.scala:
--------------------------------------------------------------------------------
1 | package lerna.akka.entityreplication.raft.persistence
2 |
3 | import lerna.akka.entityreplication.model.NormalizedShardId
4 | import lerna.akka.entityreplication.raft.routing.MemberIndex
5 |
6 | private[entityreplication] final case class EntitySnapshotsUpdatedTag(
7 | memberIndex: MemberIndex,
8 | shardId: NormalizedShardId,
9 | ) {
10 | private[this] val delimiter = ":"
11 |
12 | // Do not change this tag format for compatibility
13 | override def toString: String = s"CompactionCompleted${delimiter}${shardId.underlying}${delimiter}${memberIndex.role}"
14 | }
15 |
--------------------------------------------------------------------------------
/core/src/main/scala/lerna/akka/entityreplication/raft/persistence/RaftEventAdapter.scala:
--------------------------------------------------------------------------------
1 | package lerna.akka.entityreplication.raft.persistence
2 |
3 | import akka.persistence.journal.{ EventAdapter, EventSeq, Tagged }
4 | import lerna.akka.entityreplication.raft.RaftActor.CompactionCompleted
5 | import lerna.akka.entityreplication.raft.snapshot.sync.SnapshotSyncManager.SnapshotCopied
6 |
7 | private[entityreplication] class RaftEventAdapter extends EventAdapter {
8 |
9 | override def manifest(event: Any): String = "" // No need
10 |
11 | override def fromJournal(event: Any, manifest: String): EventSeq = EventSeq.single(event)
12 |
13 | override def toJournal(event: Any): Any = {
14 | event match {
15 | case event: CompactionCompleted =>
16 | Tagged(event, Set(EntitySnapshotsUpdatedTag(event.memberIndex, event.shardId).toString))
17 | case event: SnapshotCopied =>
18 | Tagged(event, Set(EntitySnapshotsUpdatedTag(event.memberIndex, event.shardId).toString))
19 | case event => event
20 | }
21 | }
22 | }
23 |
--------------------------------------------------------------------------------
/core/src/main/scala/lerna/akka/entityreplication/raft/protocol/EntityPassivationPermitCommands.scala:
--------------------------------------------------------------------------------
1 | package lerna.akka.entityreplication.raft.protocol
2 |
3 | import lerna.akka.entityreplication.ClusterReplicationSerializable
4 | import lerna.akka.entityreplication.model.{ NormalizedEntityId, NormalizedShardId }
5 |
6 | /** Non-leader RaftActors (followers or candidates) will send this message to the leader RaftActor if it want to passivate
7 | * the entity with `entityId`. The leader should reply with [[EntityPassivationPermitResponse]] to the sender RaftActor.
8 | */
9 | private[entityreplication] final case class EntityPassivationPermitRequest(
10 | shardId: NormalizedShardId,
11 | entityId: NormalizedEntityId,
12 | stopMessage: Any,
13 | ) extends ShardRequest
14 | with ClusterReplicationSerializable
15 |
16 | /** The leader RaftActor will send this message back to the sender RaftActor (follower or candidate). */
17 | private[entityreplication] sealed trait EntityPassivationPermitResponse extends ClusterReplicationSerializable
18 |
19 | /** The leader RaftActor will send this message back to the sender RaftActor (follower or candidate) if it permits the
20 | * passivation request ([[EntityPassivationPermitRequest]]).
21 | */
22 | private[entityreplication] final case class EntityPassivationPermitted(
23 | entityId: NormalizedEntityId,
24 | stopMessage: Any,
25 | ) extends EntityPassivationPermitResponse
26 |
27 | /** The leader RaftActor will send this message back to the sender RaftActor (follower or candidate) if it denies the
28 | * passivation request ([[EntityPassivationPermitRequest]]).
29 | */
30 | private[entityreplication] final case class EntityPassivationDenied(
31 | entityId: NormalizedEntityId,
32 | ) extends EntityPassivationPermitResponse
33 |
--------------------------------------------------------------------------------
/core/src/main/scala/lerna/akka/entityreplication/raft/protocol/FetchEntityEvents.scala:
--------------------------------------------------------------------------------
1 | package lerna.akka.entityreplication.raft.protocol
2 |
3 | import akka.actor.typed.ActorRef
4 | import lerna.akka.entityreplication.model.NormalizedEntityId
5 | import lerna.akka.entityreplication.raft.model.{ LogEntry, LogEntryIndex }
6 | import lerna.akka.entityreplication.typed.ClusterReplication.ShardCommand
7 |
8 | private[entityreplication] final case class FetchEntityEvents(
9 | entityId: NormalizedEntityId,
10 | from: LogEntryIndex,
11 | to: LogEntryIndex,
12 | replyTo: ActorRef[FetchEntityEventsResponse],
13 | ) extends ShardCommand
14 |
15 | private[entityreplication] final case class FetchEntityEventsResponse(events: Seq[LogEntry])
16 |
--------------------------------------------------------------------------------
/core/src/main/scala/lerna/akka/entityreplication/raft/protocol/ShardRequest.scala:
--------------------------------------------------------------------------------
1 | package lerna.akka.entityreplication.raft.protocol
2 |
3 | import lerna.akka.entityreplication.model.NormalizedShardId
4 |
5 | private[entityreplication] trait ShardRequest {
6 | def shardId: NormalizedShardId
7 | }
8 |
--------------------------------------------------------------------------------
/core/src/main/scala/lerna/akka/entityreplication/raft/protocol/SnapshotOffer.scala:
--------------------------------------------------------------------------------
1 | package lerna.akka.entityreplication.raft.protocol
2 |
3 | final case class SnapshotOffer(snapshot: Any)
4 |
--------------------------------------------------------------------------------
/core/src/main/scala/lerna/akka/entityreplication/raft/protocol/SuspendEntity.scala:
--------------------------------------------------------------------------------
1 | package lerna.akka.entityreplication.raft.protocol
2 |
3 | import lerna.akka.entityreplication.ClusterReplicationSerializable
4 | import lerna.akka.entityreplication.model.{ NormalizedEntityId, NormalizedShardId }
5 |
6 | private[entityreplication] final case class SuspendEntity(
7 | shardId: NormalizedShardId,
8 | entityId: NormalizedEntityId,
9 | stopMessage: Any,
10 | ) extends ShardRequest
11 | with ClusterReplicationSerializable
12 |
--------------------------------------------------------------------------------
/core/src/main/scala/lerna/akka/entityreplication/raft/protocol/TryCreateEntity.scala:
--------------------------------------------------------------------------------
1 | package lerna.akka.entityreplication.raft.protocol
2 |
3 | import lerna.akka.entityreplication.ClusterReplicationSerializable
4 | import lerna.akka.entityreplication.model.{ NormalizedEntityId, NormalizedShardId }
5 |
6 | private[entityreplication] final case class TryCreateEntity(shardId: NormalizedShardId, entityId: NormalizedEntityId)
7 | extends ShardRequest
8 | with ClusterReplicationSerializable
9 |
--------------------------------------------------------------------------------
/core/src/main/scala/lerna/akka/entityreplication/raft/routing/MemberIndex.scala:
--------------------------------------------------------------------------------
1 | package lerna.akka.entityreplication.raft.routing
2 |
3 | import java.net.URLEncoder
4 |
5 | private[entityreplication] object MemberIndex {
6 | def apply(role: String): MemberIndex = new MemberIndex(URLEncoder.encode(role, "utf-8"))
7 | private[entityreplication] def fromEncodedValue(encodedRole: String): MemberIndex = new MemberIndex(encodedRole)
8 | }
9 |
10 | private[entityreplication] final case class MemberIndex private (role: String) {
11 | override def toString: String = role
12 | }
13 |
--------------------------------------------------------------------------------
/core/src/main/scala/lerna/akka/entityreplication/raft/snapshot/ShardSnapshotStore.scala:
--------------------------------------------------------------------------------
1 | package lerna.akka.entityreplication.raft.snapshot
2 |
3 | import akka.actor.{ Actor, ActorLogging, ActorRef, Props }
4 | import lerna.akka.entityreplication.model.{ NormalizedEntityId, TypeName }
5 | import lerna.akka.entityreplication.raft.RaftSettings
6 | import lerna.akka.entityreplication.raft.routing.MemberIndex
7 |
8 | private[entityreplication] object ShardSnapshotStore {
9 |
10 | def props(typeName: TypeName, settings: RaftSettings, selfMemberIndex: MemberIndex): Props =
11 | Props(new ShardSnapshotStore(typeName, settings, selfMemberIndex))
12 |
13 | }
14 |
15 | private[entityreplication] class ShardSnapshotStore(
16 | typeName: TypeName,
17 | settings: RaftSettings,
18 | selfMemberIndex: MemberIndex,
19 | ) extends Actor
20 | with ActorLogging {
21 | import SnapshotProtocol._
22 |
23 | override def receive: Receive = {
24 | case command: Command =>
25 | snapshotStore(command.entityId) forward command
26 | }
27 |
28 | def snapshotStore(entityId: NormalizedEntityId): ActorRef = {
29 | val name = snapshotStoreName(typeName, entityId)
30 | context
31 | .child(name).getOrElse(context.actorOf(SnapshotStore.props(typeName, entityId, settings, selfMemberIndex), name))
32 | }
33 |
34 | def snapshotStoreName(typeName: TypeName, entityId: NormalizedEntityId): String =
35 | s"SnapshotStore-${typeName.underlying}-${entityId.underlying}"
36 | }
37 |
--------------------------------------------------------------------------------
/core/src/main/scala/lerna/akka/entityreplication/raft/snapshot/SnapshotProtocol.scala:
--------------------------------------------------------------------------------
1 | package lerna.akka.entityreplication.raft.snapshot
2 |
3 | import akka.actor.ActorRef
4 | import lerna.akka.entityreplication.ClusterReplicationSerializable
5 | import lerna.akka.entityreplication.model.NormalizedEntityId
6 | import lerna.akka.entityreplication.raft.model.LogEntryIndex
7 |
8 | private[entityreplication] object SnapshotProtocol {
9 |
10 | sealed trait Command {
11 | def entityId: NormalizedEntityId
12 | }
13 | final case class SaveSnapshot(snapshot: EntitySnapshot, replyTo: ActorRef) extends Command {
14 | override def entityId: NormalizedEntityId = snapshot.metadata.entityId
15 | }
16 | final case class FetchSnapshot(entityId: NormalizedEntityId, replyTo: ActorRef) extends Command
17 |
18 | sealed trait Response {
19 | def metadata: EntitySnapshotMetadata
20 | }
21 | sealed trait SaveSnapshotResponse extends Response
22 | final case class SaveSnapshotSuccess(metadata: EntitySnapshotMetadata) extends SaveSnapshotResponse
23 | final case class SaveSnapshotFailure(metadata: EntitySnapshotMetadata) extends SaveSnapshotResponse
24 | sealed trait FetchSnapshotResponse
25 | final case class SnapshotFound(snapshot: EntitySnapshot) extends FetchSnapshotResponse
26 | final case class SnapshotNotFound(entityId: NormalizedEntityId) extends FetchSnapshotResponse
27 |
28 | final case class EntityState(underlying: Any)
29 | final case class EntitySnapshotMetadata(
30 | entityId: NormalizedEntityId,
31 | logEntryIndex: LogEntryIndex,
32 | )
33 | final case class EntitySnapshot(
34 | metadata: EntitySnapshotMetadata,
35 | state: EntityState,
36 | ) extends ClusterReplicationSerializable
37 | }
38 |
--------------------------------------------------------------------------------
/core/src/main/scala/lerna/akka/entityreplication/testkit/TestReplicationActor.scala:
--------------------------------------------------------------------------------
1 | package lerna.akka.entityreplication.testkit
2 |
3 | import akka.actor.{ Actor, Props, Terminated }
4 | import lerna.akka.entityreplication.ReplicationRegion.Passivate
5 | import lerna.akka.entityreplication.raft.model.LogEntryIndex
6 | import lerna.akka.entityreplication.raft.protocol.{ FetchEntityEvents, FetchEntityEventsResponse }
7 | import lerna.akka.entityreplication.raft.snapshot.SnapshotProtocol
8 |
9 | protected[testkit] class TestReplicationActor(replicationActorProps: Props) extends Actor {
10 | import lerna.akka.entityreplication.raft.RaftProtocol._
11 |
12 | private[this] val replicationActor = context.watch(context.actorOf(replicationActorProps))
13 |
14 | // recoveryIndex is arbitrary value but recoveryIndex is greater than 0 normally.
15 | replicationActor ! Activate(self, recoveryIndex = LogEntryIndex.initial().next())
16 |
17 | override def receive: Receive = active(LogEntryIndex(1))
18 |
19 | def active(dummyLogEntryIndex: LogEntryIndex): Receive = {
20 | case fetchSnapshot: SnapshotProtocol.FetchSnapshot =>
21 | fetchSnapshot.replyTo ! SnapshotProtocol.SnapshotNotFound(fetchSnapshot.entityId)
22 | case fetchEvents: FetchEntityEvents =>
23 | fetchEvents.replyTo ! FetchEntityEventsResponse(Seq())
24 | case replicate: Replicate =>
25 | val sender = replicate.originSender.getOrElse(self)
26 | replicate.replyTo.tell(ReplicationSucceeded(replicate.event, dummyLogEntryIndex, replicate.instanceId), sender)
27 | context.become(active(dummyLogEntryIndex.next()))
28 | case passivate: Passivate =>
29 | if (passivate.entityPath == replicationActor.path) {
30 | replicationActor ! passivate.stopMessage
31 | } else {
32 | context.system.deadLetters ! passivate
33 | }
34 | case Terminated(`replicationActor`) =>
35 | context.stop(self)
36 | case message =>
37 | replicationActor forward ProcessCommand(message)
38 | }
39 | }
40 |
--------------------------------------------------------------------------------
/core/src/main/scala/lerna/akka/entityreplication/testkit/TestReplicationActorProps.scala:
--------------------------------------------------------------------------------
1 | package lerna.akka.entityreplication.testkit
2 |
3 | import akka.actor.Props
4 | import lerna.akka.entityreplication.ReplicationActor
5 |
6 | /**
7 | * The [[TestReplicationActorProps]] allows to test [[ReplicationActor]] like a normal Actor.
8 | */
9 | @deprecated(message = "Use typed.testkit.ReplicatedEntityBehaviorTestKit instead", since = "2.0.0")
10 | object TestReplicationActorProps {
11 |
12 | def apply(replicationActorProps: Props): Props = {
13 | if (classOf[ReplicationActor[_]].isAssignableFrom(replicationActorProps.actorClass())) {
14 | Props(new TestReplicationActor(replicationActorProps))
15 | } else {
16 | throw new IllegalArgumentException(
17 | s"The Props for [${replicationActorProps.actorClass()}] doesn't provide ReplicationActor",
18 | )
19 | }
20 | }
21 | }
22 |
--------------------------------------------------------------------------------
/core/src/main/scala/lerna/akka/entityreplication/typed/ClusterReplication.scala:
--------------------------------------------------------------------------------
1 | package lerna.akka.entityreplication.typed
2 |
3 | import akka.actor.typed.{ ActorRef, ActorSystem, Extension, ExtensionId }
4 | import lerna.akka.entityreplication.typed.internal.ClusterReplicationImpl
5 |
6 | object ClusterReplication extends ExtensionId[ClusterReplication] {
7 |
8 | override def createExtension(system: ActorSystem[_]): ClusterReplication = new ClusterReplicationImpl(system)
9 |
10 | trait ShardCommand
11 | }
12 |
13 | /**
14 | * This extension provides fast recovery by creating replicas of entities in multiple locations
15 | * and always synchronizing their status.
16 | */
17 | trait ClusterReplication extends Extension {
18 |
19 | def init[M, E](entity: ReplicatedEntity[M, E]): ActorRef[E]
20 |
21 | /**
22 | * Create an [[ActorRef]]-like reference to a specific replicated entity.
23 | */
24 | def entityRefFor[M](typeKey: ReplicatedEntityTypeKey[M], entityId: String): ReplicatedEntityRef[M]
25 |
26 | /**
27 | * Extract shard id of given entity id.
28 | *
29 | * @param typeKey
30 | * @param entityId
31 | * @tparam M the type parameter of the typeKey
32 | * @return shard id
33 | */
34 | def shardIdOf[M](typeKey: ReplicatedEntityTypeKey[M], entityId: String): String
35 | }
36 |
--------------------------------------------------------------------------------
/core/src/main/scala/lerna/akka/entityreplication/typed/ClusterReplicationSettings.scala:
--------------------------------------------------------------------------------
1 | package lerna.akka.entityreplication.typed
2 |
3 | import akka.actor.typed.ActorSystem
4 | import akka.cluster.Cluster
5 | import lerna.akka.entityreplication.internal.ClusterReplicationSettingsImpl
6 | import lerna.akka.{ entityreplication => classic }
7 |
8 | object ClusterReplicationSettings {
9 |
10 | def apply(system: ActorSystem[_]): ClusterReplicationSettings = {
11 | val cluster = Cluster(system)
12 | ClusterReplicationSettingsImpl(system.settings.config, cluster.settings.Roles)
13 | }
14 | }
15 |
16 | trait ClusterReplicationSettings extends classic.ClusterReplicationSettings {
17 |
18 | /*
19 | * NOTE:
20 | * When you changed this API,
21 | * make sure that we don't have to also change [lerna.akka.entityreplication.ClusterReplicationSettings].
22 | *
23 | * This API currently has same API with (classic) ClusterReplicationSettings
24 | * but 'withXxx' methods override classic one because they should return own type.
25 | * This trait allows us to use type API by just importing lerna.akka.entityreplication.typed_.
26 | */
27 |
28 | override def withDisabledShards(disabledShards: Set[String]): ClusterReplicationSettings
29 |
30 | override def withStickyLeaders(stickyLeaders: Map[String, String]): ClusterReplicationSettings
31 |
32 | override def withRaftJournalPluginId(pluginId: String): ClusterReplicationSettings
33 |
34 | override def withRaftSnapshotPluginId(pluginId: String): ClusterReplicationSettings
35 |
36 | override def withRaftQueryPluginId(pluginId: String): ClusterReplicationSettings
37 |
38 | override def withEventSourcedJournalPluginId(pluginId: String): ClusterReplicationSettings
39 |
40 | override def withEventSourcedSnapshotStorePluginId(pluginId: String): ClusterReplicationSettings
41 |
42 | }
43 |
--------------------------------------------------------------------------------
/core/src/main/scala/lerna/akka/entityreplication/typed/ReplicatedEntity.scala:
--------------------------------------------------------------------------------
1 | package lerna.akka.entityreplication.typed
2 |
3 | import akka.actor.typed.Behavior
4 | import lerna.akka.entityreplication.typed.internal.ReplicatedEntityImpl
5 |
6 | object ReplicatedEntity {
7 |
8 | /**
9 | * Creates a [[ReplicatedEntity]].
10 | */
11 | def apply[M](typeKey: ReplicatedEntityTypeKey[M])(
12 | createBehavior: ReplicatedEntityContext[M] => Behavior[M],
13 | ): ReplicatedEntity[M, ReplicationEnvelope[M]] = ReplicatedEntityImpl(typeKey, createBehavior)
14 | }
15 |
16 | /**
17 | * Defines how the entity should be created. Used in [[ClusterReplication.init()]].
18 | */
19 | trait ReplicatedEntity[M, E] {
20 |
21 | def createBehavior: ReplicatedEntityContext[M] => Behavior[M]
22 |
23 | def typeKey: ReplicatedEntityTypeKey[M]
24 |
25 | def withSettings(settings: ClusterReplicationSettings): ReplicatedEntity[M, E]
26 |
27 | private[entityreplication] def settings: Option[ClusterReplicationSettings]
28 | }
29 |
--------------------------------------------------------------------------------
/core/src/main/scala/lerna/akka/entityreplication/typed/ReplicatedEntityBehavior.scala:
--------------------------------------------------------------------------------
1 | package lerna.akka.entityreplication.typed
2 |
3 | import akka.actor.typed.Signal
4 | import akka.lerna.DeferredBehavior
5 | import lerna.akka.entityreplication.typed.internal.behavior.ReplicatedEntityBehaviorImpl
6 |
7 | object ReplicatedEntityBehavior {
8 |
9 | type CommandHandler[Command, Event, State] = (State, Command) => Effect[Event, State]
10 |
11 | type EventHandler[State, Event] = (State, Event) => State
12 |
13 | def apply[Command, Event, State](
14 | entityContext: ReplicatedEntityContext[Command],
15 | emptyState: State,
16 | commandHandler: CommandHandler[Command, Event, State],
17 | eventHandler: EventHandler[State, Event],
18 | ): ReplicatedEntityBehavior[Command, Event, State] =
19 | new ReplicatedEntityBehaviorImpl[Command, Event, State](
20 | entityContext,
21 | emptyState,
22 | commandHandler,
23 | eventHandler,
24 | )
25 | }
26 |
27 | trait ReplicatedEntityBehavior[Command, Event, State] extends DeferredBehavior[Command] {
28 |
29 | def entityContext: ReplicatedEntityContext[Command]
30 |
31 | def receiveSignal(
32 | signalHandler: PartialFunction[(State, Signal), Unit],
33 | ): ReplicatedEntityBehavior[Command, Event, State]
34 |
35 | def signalHandler: PartialFunction[(State, Signal), Unit]
36 |
37 | def withStopMessage(message: Command): ReplicatedEntityBehavior[Command, Event, State]
38 | }
39 |
--------------------------------------------------------------------------------
/core/src/main/scala/lerna/akka/entityreplication/typed/ReplicatedEntityContext.scala:
--------------------------------------------------------------------------------
1 | package lerna.akka.entityreplication.typed
2 |
3 | import akka.actor.typed.ActorRef
4 |
5 | class ReplicatedEntityContext[M](
6 | val entityTypeKey: ReplicatedEntityTypeKey[M],
7 | val entityId: String,
8 | val shard: ActorRef[ClusterReplication.ShardCommand],
9 | )
10 |
--------------------------------------------------------------------------------
/core/src/main/scala/lerna/akka/entityreplication/typed/ReplicatedEntityTypeKey.scala:
--------------------------------------------------------------------------------
1 | package lerna.akka.entityreplication.typed
2 |
3 | import lerna.akka.entityreplication.typed.internal.ReplicatedEntityTypeKeyImpl
4 |
5 | import scala.reflect.ClassTag
6 |
7 | object ReplicatedEntityTypeKey {
8 |
9 | /**
10 | * Creates an [[ReplicatedEntityTypeKey]]. The `name` must be unique.
11 | */
12 | def apply[M](name: String)(implicit mTag: ClassTag[M]): ReplicatedEntityTypeKey[M] =
13 | ReplicatedEntityTypeKeyImpl(name, mTag.runtimeClass.getName)
14 | }
15 |
16 | /**
17 | * The key of an entity type, the `name` must be unique.
18 | */
19 | trait ReplicatedEntityTypeKey[-M] {
20 |
21 | def name: String
22 | }
23 |
--------------------------------------------------------------------------------
/core/src/main/scala/lerna/akka/entityreplication/typed/ReplicationEnvelope.scala:
--------------------------------------------------------------------------------
1 | package lerna.akka.entityreplication.typed
2 |
3 | import akka.actor.WrappedMessage
4 | import lerna.akka.entityreplication.ClusterReplicationSerializable
5 |
6 | /**
7 | * Envelope type that is used by with Cluster Replication
8 | */
9 | final case class ReplicationEnvelope[M](entityId: String, message: M)
10 | extends WrappedMessage
11 | with ClusterReplicationSerializable
12 |
--------------------------------------------------------------------------------
/core/src/main/scala/lerna/akka/entityreplication/typed/internal/ReplicatedEntityImpl.scala:
--------------------------------------------------------------------------------
1 | package lerna.akka.entityreplication.typed.internal
2 |
3 | import akka.actor.typed.Behavior
4 | import lerna.akka.entityreplication.typed.{
5 | ClusterReplicationSettings,
6 | ReplicatedEntity,
7 | ReplicatedEntityContext,
8 | ReplicatedEntityTypeKey,
9 | }
10 |
11 | private[entityreplication] final case class ReplicatedEntityImpl[M, E](
12 | typeKey: ReplicatedEntityTypeKey[M],
13 | createBehavior: ReplicatedEntityContext[M] => Behavior[M],
14 | settings: Option[ClusterReplicationSettings] = None,
15 | ) extends ReplicatedEntity[M, E] {
16 |
17 | override def withSettings(settings: ClusterReplicationSettings): ReplicatedEntity[M, E] =
18 | copy(settings = Option(settings))
19 | }
20 |
--------------------------------------------------------------------------------
/core/src/main/scala/lerna/akka/entityreplication/typed/internal/ReplicatedEntityRefImpl.scala:
--------------------------------------------------------------------------------
1 | package lerna.akka.entityreplication.typed.internal
2 |
3 | import akka.actor.ActorRefProvider
4 | import akka.actor.typed.{ ActorRef, ActorSystem, Scheduler }
5 | import akka.lerna.{ InternalActorRefProxy, InternalRecipientRef }
6 | import akka.pattern.StatusReply
7 | import akka.util.{ ByteString, Timeout }
8 | import lerna.akka.entityreplication.typed.{ ReplicatedEntityRef, ReplicatedEntityTypeKey, ReplicationEnvelope }
9 | import akka.actor.typed.scaladsl.AskPattern._
10 | import lerna.akka.entityreplication.util.ActorIds
11 |
12 | import java.net.URLEncoder
13 | import scala.concurrent.Future
14 |
15 | private[entityreplication] class ReplicatedEntityRefImpl[-M](
16 | override val typeKey: ReplicatedEntityTypeKey[M],
17 | override val entityId: String,
18 | replicationRegion: ActorRef[ReplicationEnvelope[M]],
19 | system: ActorSystem[_],
20 | ) extends ReplicatedEntityRef[M]
21 | with InternalRecipientRef[M] {
22 |
23 | override def refPrefix: String =
24 | ActorIds.actorName(Seq(typeKey.name, entityId).map(URLEncoder.encode(_, ByteString.UTF_8)): _*)
25 |
26 | private[this] implicit val scheduler: Scheduler = system.scheduler
27 |
28 | override def tell(message: M): Unit =
29 | replicationRegion ! ReplicationEnvelope(entityId, message)
30 |
31 | override def ask[Reply](message: ActorRef[Reply] => M)(implicit timeout: Timeout): Future[Reply] = {
32 | replicationRegion.ask[Reply](replyTo => ReplicationEnvelope(entityId, message(replyTo)))
33 | }
34 |
35 | override def askWithStatus[Reply](
36 | massage: ActorRef[StatusReply[Reply]] => M,
37 | )(implicit timeout: Timeout): Future[Reply] = {
38 | replicationRegion.askWithStatus[Reply](replyTo => ReplicationEnvelope(entityId, massage(replyTo)))
39 | }
40 |
41 | private[this] val internalActorRef = InternalActorRefProxy(replicationRegion)
42 |
43 | override def provider: ActorRefProvider = internalActorRef.provider
44 |
45 | override def isTerminated: Boolean = internalActorRef.isTerminated
46 |
47 | override def toString: String = s"ReplicatedEntityRef($typeKey, $entityId)"
48 |
49 | override def equals(other: Any): Boolean =
50 | other match {
51 | case that: ReplicatedEntityRefImpl[_] =>
52 | entityId == that.entityId &&
53 | typeKey == that.typeKey
54 | case _ => false
55 | }
56 |
57 | override def hashCode(): Int = {
58 | val state: Seq[Any] = Seq(entityId, typeKey)
59 | state.map(_.hashCode()).foldLeft(0)((a, b) => 31 * a + b)
60 | }
61 | }
62 |
--------------------------------------------------------------------------------
/core/src/main/scala/lerna/akka/entityreplication/typed/internal/ReplicatedEntityTypeKeyImpl.scala:
--------------------------------------------------------------------------------
1 | package lerna.akka.entityreplication.typed.internal
2 |
3 | import lerna.akka.entityreplication.typed.ReplicatedEntityTypeKey
4 |
5 | private[entityreplication] final case class ReplicatedEntityTypeKeyImpl[Command](name: String, messageClassName: String)
6 | extends ReplicatedEntityTypeKey[Command] {
7 | override def toString: String = s"ReplicatedEntityTypeKey[$messageClassName]($name)"
8 | }
9 |
--------------------------------------------------------------------------------
/core/src/main/scala/lerna/akka/entityreplication/typed/internal/ReplicationId.scala:
--------------------------------------------------------------------------------
1 | package lerna.akka.entityreplication.typed.internal
2 |
3 | import lerna.akka.entityreplication.model.NormalizedEntityId
4 | import lerna.akka.entityreplication.typed.ReplicatedEntityTypeKey
5 |
6 | private[entityreplication] object ReplicationId {
7 |
8 | def apply[Command](entityTypeKey: ReplicatedEntityTypeKey[Command], entityId: String): ReplicationId[Command] =
9 | ReplicationIdImpl(entityTypeKey, NormalizedEntityId.from(entityId))
10 | }
11 |
12 | private[entityreplication] trait ReplicationId[Command] {
13 |
14 | def entityTypeKey: ReplicatedEntityTypeKey[Command]
15 |
16 | def entityId: NormalizedEntityId
17 | }
18 |
--------------------------------------------------------------------------------
/core/src/main/scala/lerna/akka/entityreplication/typed/internal/ReplicationIdImpl.scala:
--------------------------------------------------------------------------------
1 | package lerna.akka.entityreplication.typed.internal
2 |
3 | import lerna.akka.entityreplication.model.NormalizedEntityId
4 | import lerna.akka.entityreplication.typed.ReplicatedEntityTypeKey
5 |
6 | private[entityreplication] final case class ReplicationIdImpl[Command](
7 | entityTypeKey: ReplicatedEntityTypeKey[Command],
8 | entityId: NormalizedEntityId,
9 | ) extends ReplicationId[Command]
10 |
--------------------------------------------------------------------------------
/core/src/main/scala/lerna/akka/entityreplication/typed/internal/behavior/BehaviorSetup.scala:
--------------------------------------------------------------------------------
1 | package lerna.akka.entityreplication.typed.internal.behavior
2 |
3 | import akka.actor.typed.scaladsl.{ ActorContext, Behaviors, StashBuffer }
4 | import akka.actor.typed.{ ActorRef, Behavior, Signal }
5 | import lerna.akka.entityreplication.ClusterReplicationSettings
6 | import lerna.akka.entityreplication.model.EntityInstanceId
7 | import lerna.akka.entityreplication.typed.{ ClusterReplication, ReplicatedEntityBehavior, ReplicatedEntityContext }
8 | import lerna.akka.entityreplication.raft.RaftProtocol.EntityCommand
9 | import lerna.akka.entityreplication.typed.internal.ReplicationId
10 |
11 | private[entityreplication] final class BehaviorSetup[Command, Event, State](
12 | val entityContext: ReplicatedEntityContext[Command],
13 | val emptyState: State,
14 | val commandHandler: ReplicatedEntityBehavior.CommandHandler[Command, Event, State],
15 | val eventHandler: ReplicatedEntityBehavior.EventHandler[State, Event],
16 | val signalHandler: PartialFunction[(State, Signal), Unit],
17 | val stopMessage: Option[Command],
18 | val replicationId: ReplicationId[Command],
19 | val shard: ActorRef[ClusterReplication.ShardCommand],
20 | val settings: ClusterReplicationSettings,
21 | val context: ActorContext[EntityCommand],
22 | val instanceId: EntityInstanceId,
23 | val stashBuffer: StashBuffer[EntityCommand],
24 | ) {
25 |
26 | def onSignal(state: State): PartialFunction[(ActorContext[EntityCommand], Signal), Behavior[EntityCommand]] = {
27 | case (_, signal) if signalHandler.isDefinedAt((state, signal)) =>
28 | signalHandler((state, signal))
29 | Behaviors.same
30 | }
31 | }
32 |
--------------------------------------------------------------------------------
/core/src/main/scala/lerna/akka/entityreplication/typed/internal/effect/EffectBuilderImpl.scala:
--------------------------------------------------------------------------------
1 | package lerna.akka.entityreplication.typed.internal.effect
2 |
3 | import akka.actor.typed.ActorRef
4 | import lerna.akka.entityreplication.typed.internal.effect
5 | import lerna.akka.entityreplication.typed.{ Effect, EffectBuilder }
6 |
7 | import scala.collection.immutable
8 |
9 | private[entityreplication] object EffectBuilderImpl {
10 |
11 | def unhandled[Event, State](mainEffect: MainEffect[Event, State]): EffectBuilder[Event, State] =
12 | new EffectBuilderImpl(mainEffect, immutable.Seq(new UnhandledEffect[State]))
13 | }
14 |
15 | private[entityreplication] final case class EffectBuilderImpl[+Event, State](
16 | mainEffect: MainEffect[Event, State],
17 | sideEffects: immutable.Seq[SideEffect[State]] = immutable.Seq.empty,
18 | ) extends EffectBuilder[Event, State] {
19 |
20 | override def thenRun(callback: State => Unit): EffectBuilder[Event, State] = {
21 | val sideEffect = new Callback(callback)
22 | decideEnsureConsistency(
23 | noNeed = {
24 | copy(sideEffects = sideEffects :+ sideEffect)
25 | },
26 | need = {
27 | copy(EnsureConsistencyEffect(), sideEffects :+ sideEffect)
28 | },
29 | )
30 | }
31 |
32 | override def thenPassivate(): EffectBuilder[Event, State] =
33 | copy(sideEffects = sideEffects :+ PassivateEffect())
34 |
35 | override def thenStopLocally(): Effect[Event, State] =
36 | copy(sideEffects = sideEffects :+ StopLocallyEffect()).thenNoReply()
37 |
38 | override def thenUnstashAll(): EffectBuilder[Event, State] =
39 | copy(sideEffects = sideEffects :+ UnstashAllEffect())
40 |
41 | override def thenReply[Reply](replyTo: ActorRef[Reply])(replyMessage: State => Reply): Effect[Event, State] = {
42 | val sideEffect = new ReplyEffect[Reply, State](replyTo, replyMessage)
43 | decideEnsureConsistency(
44 | noNeed = {
45 | EffectImpl(mainEffect, sideEffects :+ sideEffect)
46 | },
47 | need = {
48 | EffectImpl(EnsureConsistencyEffect(), sideEffects :+ sideEffect)
49 | },
50 | )
51 | }
52 |
53 | override def thenNoReply(): Effect[Event, State] =
54 | effect.EffectImpl(mainEffect, sideEffects :+ new NoReplyEffect[State]())
55 |
56 | /**
57 | * SideEffects that may affect outside the entity world must ensure consistency
58 | */
59 | private[this] def decideEnsureConsistency[T](noNeed: => T, need: => T): T =
60 | mainEffect match {
61 | case _: ReplicateEffect[Event, State] => noNeed
62 | case _: ReplicateNothingEffect[Event, State] => need
63 | case _: EnsureConsistencyEffect[Event, State] => noNeed // e.g. Effect.none.thenRun(...).thenReply(...)(...)
64 | case _: StashEffect[Event, State] =>
65 | throw new IllegalStateException("Must not affect outside the entity when stash")
66 | }
67 | }
68 |
--------------------------------------------------------------------------------
/core/src/main/scala/lerna/akka/entityreplication/typed/internal/effect/EffectImpl.scala:
--------------------------------------------------------------------------------
1 | package lerna.akka.entityreplication.typed.internal.effect
2 |
3 | import lerna.akka.entityreplication.typed.Effect
4 |
5 | import scala.collection.immutable
6 |
7 | private[entityreplication] case class EffectImpl[+Event, State](
8 | mainEffect: MainEffect[Event, State],
9 | sideEffects: immutable.Seq[SideEffect[State]],
10 | ) extends Effect[Event, State] {
11 | override def event: Option[Event] = mainEffect.event
12 | }
13 |
--------------------------------------------------------------------------------
/core/src/main/scala/lerna/akka/entityreplication/typed/internal/effect/MainEffect.scala:
--------------------------------------------------------------------------------
1 | package lerna.akka.entityreplication.typed.internal.effect
2 |
3 | private[entityreplication] sealed abstract class MainEffect[+Event, State] {
4 | def event: Option[Event]
5 | }
6 |
7 | private[entityreplication] final case class ReplicateEffect[+Event, State](_event: Event)
8 | extends MainEffect[Event, State] {
9 |
10 | override def event: Option[Event] = Option(_event)
11 |
12 | override def toString: String = s"Replicate(${_event.getClass.getName})"
13 | }
14 |
15 | private[entityreplication] final case class ReplicateNothingEffect[+Event, State]() extends MainEffect[Event, State] {
16 |
17 | override def event: Option[Event] = None
18 |
19 | override def toString: String = "ReplicateNothing"
20 | }
21 |
22 | private[entityreplication] final case class EnsureConsistencyEffect[+Event, State]() extends MainEffect[Event, State] {
23 |
24 | override def event: Option[Event] = None
25 |
26 | override def toString: String = "EnsureConsistency"
27 | }
28 |
29 | private[entityreplication] final case class StashEffect[+Event, State]() extends MainEffect[Event, State] {
30 |
31 | override def event: Option[Event] = None
32 |
33 | override def toString: String = "Stash"
34 | }
35 |
--------------------------------------------------------------------------------
/core/src/main/scala/lerna/akka/entityreplication/typed/internal/effect/SideEffect.scala:
--------------------------------------------------------------------------------
1 | package lerna.akka.entityreplication.typed.internal.effect
2 |
3 | import akka.actor.typed.ActorRef
4 |
5 | private[entityreplication] sealed abstract class SideEffect[State]
6 |
7 | private[entityreplication] class Callback[State](val sideEffect: State => Unit) extends SideEffect[State] {
8 | override def toString: String = "Callback"
9 | }
10 |
11 | private[entityreplication] final class ReplyEffect[Reply, State](
12 | replyTo: ActorRef[Reply],
13 | message: State => Reply,
14 | ) extends Callback[State](state => replyTo ! message(state)) {
15 | override def toString: String = s"Reply(${message.getClass.getName})"
16 | }
17 |
18 | private[entityreplication] final class NoReplyEffect[State] extends SideEffect[State] {
19 | override def toString: String = "NoReply"
20 | }
21 |
22 | /**
23 | * [[UnhandledEffect]] will send the command to the event stream in Akka
24 | */
25 | private[entityreplication] final class UnhandledEffect[State] extends SideEffect[State] {
26 | override def toString: String = "Unhandled"
27 | }
28 |
29 | private[entityreplication] case class PassivateEffect[State]() extends SideEffect[State] {
30 | override def toString: String = "Passivate"
31 | }
32 |
33 | private[entityreplication] case class StopLocallyEffect[State]() extends SideEffect[State] {
34 | override def toString: String = "StopLocally"
35 | }
36 |
37 | private[entityreplication] case class UnstashAllEffect[State]() extends SideEffect[State] {
38 | override def toString: String = "UnstashAll"
39 | }
40 |
--------------------------------------------------------------------------------
/core/src/main/scala/lerna/akka/entityreplication/typed/internal/testkit/CommandResultImpl.scala:
--------------------------------------------------------------------------------
1 | package lerna.akka.entityreplication.typed.internal.testkit
2 |
3 | import lerna.akka.entityreplication.typed.testkit.ReplicatedEntityBehaviorTestKit
4 |
5 | import scala.reflect.ClassTag
6 |
7 | private[entityreplication] final case class CommandResultImpl[Command, Event, State, Reply](
8 | command: Command,
9 | eventOption: Option[Event],
10 | state: State,
11 | replyOption: Option[Reply],
12 | ) extends ReplicatedEntityBehaviorTestKit.CommandResultWithReply[Command, Event, State, Reply] {
13 |
14 | override def hasNoEvents: Boolean = eventOption.isEmpty
15 |
16 | override def event: Event = eventOption.getOrElse(throw new AssertionError("No event"))
17 |
18 | override def eventOfType[E <: Event: ClassTag]: E = ofType(event, "event")
19 |
20 | override def stateOfType[S <: State: ClassTag]: S = ofType(state, "state")
21 |
22 | override def reply: Reply = replyOption.getOrElse(throw new AssertionError("No reply"))
23 |
24 | override def replyOfType[R <: Reply: ClassTag]: R = ofType(reply, "reply")
25 |
26 | private[this] def ofType[T: ClassTag](obj: Any, objCategory: String): T = {
27 | obj match {
28 | case t: T => t
29 | case other =>
30 | val expectedClass = implicitly[ClassTag[T]].runtimeClass
31 | throw new AssertionError(
32 | s"Expected $objCategory class [${expectedClass.getName}], but was [${other.getClass.getName}]",
33 | )
34 | }
35 | }
36 | }
37 |
--------------------------------------------------------------------------------
/core/src/main/scala/lerna/akka/entityreplication/typed/internal/testkit/RestartResultImpl.scala:
--------------------------------------------------------------------------------
1 | package lerna.akka.entityreplication.typed.internal.testkit
2 |
3 | import lerna.akka.entityreplication.typed.testkit.ReplicatedEntityBehaviorTestKit.RestartResult
4 |
5 | private[entityreplication] final case class RestartResultImpl[State](state: State) extends RestartResult[State]
6 |
--------------------------------------------------------------------------------
/core/src/main/scala/lerna/akka/entityreplication/util/ActorIds.scala:
--------------------------------------------------------------------------------
1 | package lerna.akka.entityreplication.util
2 |
3 | private[entityreplication] object ActorIds {
4 |
5 | private[this] val delimiter = ":"
6 |
7 | def actorName(urlEncodedElements: String*): String = validateAndCreate(urlEncodedElements: _*)
8 |
9 | def persistenceId(urlEncodedElements: String*): String = validateAndCreate(urlEncodedElements: _*)
10 |
11 | private[this] def validateAndCreate(urlEncodedElements: String*): String = {
12 | val invalidElements = urlEncodedElements.zipWithIndex.filter { case (e, _) => e.contains(delimiter) }
13 | // Not URL encoded values induce ID duplication
14 | require(
15 | invalidElements.isEmpty,
16 | s"Not URL encoded value found: ${invalidElements.map { case (e, i) => s"(${i + 1}: $e)" }.mkString(", ")}",
17 | )
18 | String.join(delimiter, urlEncodedElements: _*)
19 | }
20 | }
21 |
--------------------------------------------------------------------------------
/core/src/multi-jvm/resources/logback.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
5 |
6 | %msg%n
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
--------------------------------------------------------------------------------
/core/src/multi-jvm/scala/lerna/akka/entityreplication/PersistencePluginProxySupport.scala:
--------------------------------------------------------------------------------
1 | package lerna.akka.entityreplication
2 |
3 | import akka.persistence.journal.PersistencePluginProxy
4 | import akka.remote.testkit.MultiNodeSpec
5 | import lerna.akka.entityreplication.util.persistence.query.proxy.scaladsl.ReadJournalPluginProxy
6 |
7 | trait PersistencePluginProxySupport { self: MultiNodeSpec =>
8 |
9 | PersistencePluginProxy.setTargetLocation(system, node(roles.head).address)
10 | ReadJournalPluginProxy.setTargetLocation(system, node(roles.head).address)
11 | }
12 |
--------------------------------------------------------------------------------
/core/src/multi-jvm/scala/lerna/akka/entityreplication/STMultiNodeSerializable.scala:
--------------------------------------------------------------------------------
1 | package lerna.akka.entityreplication
2 |
3 | trait STMultiNodeSerializable extends Serializable
4 |
--------------------------------------------------------------------------------
/core/src/multi-jvm/scala/lerna/akka/entityreplication/Sample.scala:
--------------------------------------------------------------------------------
1 | package lerna.akka.entityreplication
2 |
3 | import akka.remote.testkit.{ MultiNodeConfig, MultiNodeSpec }
4 | import com.typesafe.config.ConfigFactory
5 |
6 | class SampleSpecMultiJvmNode1 extends SampleSpec
7 | class SampleSpecMultiJvmNode2 extends SampleSpec
8 |
9 | object SampleSpecConfig extends MultiNodeConfig {
10 | val node1 = role("node1")
11 | val node2 = role("node2")
12 |
13 | commonConfig(
14 | debugConfig(false)
15 | .withFallback(ConfigFactory.parseString("""
16 | akka.actor.provider = cluster
17 | """))
18 | .withFallback(ConfigFactory.parseResources("multi-jvm-testing.conf")),
19 | )
20 | }
21 |
22 | class SampleSpec extends MultiNodeSpec(SampleSpecConfig) with STMultiNodeSpec {
23 | import SampleSpecConfig._
24 |
25 | override def initialParticipants: Int = roles.size
26 |
27 | "Nodes" should {
28 |
29 | "wait for all nodes to enter a barrier" in {
30 | enterBarrier("startup")
31 | }
32 |
33 | "be able to say hello" in {
34 | runOn(node1) {
35 | val message = "Hello from node 1"
36 | message should be("Hello from node 1")
37 | }
38 | runOn(node2) {
39 | val message = "Hello from node 2"
40 | message should be("Hello from node 2")
41 | }
42 | }
43 | }
44 | }
45 |
--------------------------------------------------------------------------------
/core/src/multi-jvm/scala/lerna/akka/entityreplication/util/persistence/query/proxy/ReadJournalPluginProxyProvider.scala:
--------------------------------------------------------------------------------
1 | package lerna.akka.entityreplication.util.persistence.query.proxy
2 |
3 | import akka.actor.ExtendedActorSystem
4 | import akka.persistence.query.ReadJournalProvider
5 | import com.typesafe.config.Config
6 |
7 | class ReadJournalPluginProxyProvider(system: ExtendedActorSystem, config: Config) extends ReadJournalProvider {
8 |
9 | override val scaladslReadJournal: scaladsl.ReadJournalPluginProxy =
10 | new scaladsl.ReadJournalPluginProxy(config)(system)
11 |
12 | override val javadslReadJournal: javadsl.ReadJournalPluginProxy =
13 | new javadsl.ReadJournalPluginProxy(scaladslReadJournal)
14 | }
15 |
--------------------------------------------------------------------------------
/core/src/multi-jvm/scala/lerna/akka/entityreplication/util/persistence/query/proxy/javadsl/ReadJournalPluginProxy.scala:
--------------------------------------------------------------------------------
1 | package lerna.akka.entityreplication.util.persistence.query.proxy.javadsl
2 |
3 | import akka.NotUsed
4 | import akka.persistence.query.javadsl._
5 | import akka.persistence.query.{ EventEnvelope, Offset }
6 | import akka.stream.javadsl.Source
7 | import lerna.akka.entityreplication.util.persistence.query.proxy.scaladsl.{
8 | ReadJournalPluginProxy => ScalaInMemoryReadJournal,
9 | }
10 |
11 | class ReadJournalPluginProxy(journal: ScalaInMemoryReadJournal)
12 | extends ReadJournal
13 | with CurrentPersistenceIdsQuery
14 | with PersistenceIdsQuery
15 | with CurrentEventsByPersistenceIdQuery
16 | with EventsByPersistenceIdQuery
17 | with CurrentEventsByTagQuery
18 | with EventsByTagQuery {
19 |
20 | override def currentPersistenceIds(): Source[String, NotUsed] =
21 | journal.currentPersistenceIds().asJava
22 |
23 | override def persistenceIds(): Source[String, NotUsed] =
24 | journal.persistenceIds().asJava
25 |
26 | override def currentEventsByPersistenceId(
27 | persistenceId: String,
28 | fromSequenceNr: Long,
29 | toSequenceNr: Long,
30 | ): Source[EventEnvelope, NotUsed] =
31 | journal.currentEventsByPersistenceId(persistenceId, fromSequenceNr, toSequenceNr).asJava
32 |
33 | override def eventsByPersistenceId(
34 | persistenceId: String,
35 | fromSequenceNr: Long,
36 | toSequenceNr: Long,
37 | ): Source[EventEnvelope, NotUsed] =
38 | journal.eventsByPersistenceId(persistenceId, fromSequenceNr, toSequenceNr).asJava
39 |
40 | override def currentEventsByTag(tag: String, offset: Offset): Source[EventEnvelope, NotUsed] =
41 | journal.currentEventsByTag(tag, offset).asJava
42 |
43 | override def eventsByTag(tag: String, offset: Offset): Source[EventEnvelope, NotUsed] =
44 | journal.eventsByTag(tag, offset).asJava
45 | }
46 |
--------------------------------------------------------------------------------
/core/src/test/resources/application-test.conf:
--------------------------------------------------------------------------------
1 | // configuration file for ActorTestKit
2 | // see also: https://doc.akka.io/docs/akka/2.6/typed/testing-async.html#configuration
3 |
4 | include "application"
5 |
--------------------------------------------------------------------------------
/core/src/test/resources/application.conf:
--------------------------------------------------------------------------------
1 | include "example.conf"
2 |
3 | akka {
4 | remote {
5 | artery {
6 | # 複数 ActorSystem が起動すると bind port が重複して起動に失敗する可能性があるので、テスト時は(0)ランダムに設定
7 | canonical.port = 0
8 | }
9 | }
10 | }
11 |
12 | akka.cluster {
13 | // disable cluster auto-formation
14 | seed-nodes = []
15 | }
16 |
17 | akka.actor {
18 | allow-java-serialization = off
19 | serializers {
20 | kryo = "io.altoo.akka.serialization.kryo.KryoSerializer"
21 | }
22 | serialization-bindings {
23 | "lerna.akka.entityreplication.testkit.KryoSerializable" = kryo
24 | }
25 | }
26 |
27 | akka.cluster {
28 | // disable cluster auto-formation
29 | seed-nodes = []
30 | // Not rquired for tesintg
31 | jmx.enabled = off
32 | }
33 |
34 | akka-kryo-serialization {
35 | type = "graph"
36 | id-strategy = "default"
37 | }
38 |
39 | lerna.akka.entityreplication.raft {
40 | persistence {
41 | journal.plugin = "inmemory-journal"
42 | snapshot-store.plugin = "inmemory-snapshot-store"
43 | query.plugin = "inmemory-read-journal"
44 | }
45 | }
46 |
47 | lerna.akka.entityreplication.raft.eventsourced {
48 | persistence {
49 | journal.plugin = "inmemory-journal"
50 | snapshot-store.plugin = "inmemory-snapshot-store"
51 | }
52 | }
53 |
--------------------------------------------------------------------------------
/core/src/test/resources/logback.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
5 |
6 | %msg%n
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
--------------------------------------------------------------------------------
/core/src/test/scala/lerna/akka/entityreplication/protobuf/ClusterReplicationSerializerBindingSpec.scala:
--------------------------------------------------------------------------------
1 | package lerna.akka.entityreplication.protobuf
2 |
3 | import akka.actor.ActorSystem
4 | import akka.serialization.SerializationExtension
5 | import lerna.akka.entityreplication.ClusterReplicationSerializable
6 |
7 | final class ClusterReplicationSerializerBindingSpec
8 | extends SerializerSpecBase(ActorSystem("ClusterReplicationSerializerBingingSpec")) {
9 |
10 | private val serialization = SerializationExtension(system)
11 |
12 | "ClusterReplicationSerializer " should {
13 |
14 | "be bound to ClusterReplicationSerializable" in {
15 | val serializer = serialization.serializerFor(classOf[ClusterReplicationSerializable])
16 | serializer shouldBe a[ClusterReplicationSerializer]
17 | }
18 |
19 | }
20 |
21 | }
22 |
--------------------------------------------------------------------------------
/core/src/test/scala/lerna/akka/entityreplication/protobuf/SerializerSpecBase.scala:
--------------------------------------------------------------------------------
1 | package lerna.akka.entityreplication.protobuf
2 |
3 | import akka.actor.ActorSystem
4 | import akka.testkit.TestKit
5 | import org.scalatest.{ Matchers, WordSpecLike }
6 |
7 | abstract class SerializerSpecBase(system: ActorSystem) extends TestKit(system) with WordSpecLike with Matchers
8 |
--------------------------------------------------------------------------------
/core/src/test/scala/lerna/akka/entityreplication/raft/ActorSpec.scala:
--------------------------------------------------------------------------------
1 | package lerna.akka.entityreplication.raft
2 |
3 | import akka.Done
4 | import akka.actor.{ Actor, ActorRef, Identify, Props, Terminated }
5 | import akka.pattern.ask
6 | import akka.testkit.{ DefaultTimeout, ImplicitSender, TestKit }
7 | import org.scalatest.{ BeforeAndAfterEach, Matchers, WordSpecLike }
8 |
9 | import scala.concurrent.{ Await, Awaitable }
10 |
11 | object ActorSpec {
12 |
13 | object TestActorAutoKillManager {
14 | def props = Props(new TestActorAutoKillManager)
15 |
16 | final case class Register(ref: ActorRef)
17 | final case object KillAll
18 | }
19 |
20 | class TestActorAutoKillManager extends Actor {
21 | import TestActorAutoKillManager._
22 |
23 | var refs: Set[ActorRef] = Set()
24 |
25 | override def receive: Receive = ready
26 |
27 | def ready: Receive = {
28 | case Terminated(ref) =>
29 | refs -= ref
30 | case Register(ref) =>
31 | refs += context.watch(ref)
32 | case KillAll if refs.isEmpty =>
33 | sender() ! Done
34 | case KillAll =>
35 | refs.foreach(context.stop)
36 | context.become(terminating(replyTo = sender()))
37 | }
38 |
39 | def terminating(replyTo: ActorRef): Receive = {
40 | case Terminated(ref) =>
41 | refs -= ref
42 | if (refs.isEmpty) {
43 | replyTo ! Done
44 | context.become(ready)
45 | }
46 | }
47 | }
48 | }
49 |
50 | trait ActorSpec extends WordSpecLike with Matchers with BeforeAndAfterEach with ImplicitSender with DefaultTimeout {
51 | self: TestKit =>
52 | import ActorSpec._
53 |
54 | /**
55 | * (ported from akka.remote.testkit.MultiNodeSpec)
56 | *
57 | * Enrich `.await()` onto all Awaitables, using remaining duration from the innermost
58 | * enclosing `within` block or QueryTimeout.
59 | */
60 | implicit class AwaitHelper[T](w: Awaitable[T]) {
61 | def await: T = Await.result(w, timeout.duration)
62 | }
63 |
64 | /**
65 | * テストケース終了時に登録されたアクターを自動 kill します
66 | *
67 | * テストケース内で作成したアクターをケース終了時に一括で kill させたいときに便利です。
68 | */
69 | private[this] lazy val autoKillManager: ActorRef =
70 | system.actorOf(TestActorAutoKillManager.props, "TestActorAutoKillManager")
71 |
72 | protected def planAutoKill(ref: ActorRef): ActorRef = {
73 | autoKillManager ! TestActorAutoKillManager.Register(ref)
74 | ref
75 | }
76 |
77 | override def beforeEach(): Unit = {
78 | super.beforeEach()
79 | (autoKillManager ? Identify("to wait for start-up")).await
80 |
81 | // Ignoring all messages sent in the previous unit test case
82 | ignoreAllMessagesSentBefore()
83 | }
84 |
85 | override def afterEach(): Unit = {
86 | (autoKillManager ? TestActorAutoKillManager.KillAll).await
87 | super.afterEach()
88 | }
89 |
90 | private def ignoreAllMessagesSentBefore(): Unit = {
91 | case object SentinelMessage
92 | testActor.tell(SentinelMessage, ActorRef.noSender)
93 | fishForMessage(hint = "ignoring all messages sent before")(_ == SentinelMessage)
94 | }
95 |
96 | }
97 |
--------------------------------------------------------------------------------
/core/src/test/scala/lerna/akka/entityreplication/raft/RaftTestProbe.scala:
--------------------------------------------------------------------------------
1 | package lerna.akka.entityreplication.raft
2 |
3 | import lerna.akka.entityreplication.raft.RaftActor._
4 |
5 | // テスト用
6 | // FIXME: このクラスに依存しないテストにする
7 | object RaftTestProbe {
8 |
9 | case class SetState(stateName: State, stateData: RaftMemberData)
10 | case object StateChanged
11 |
12 | case object GetState
13 | case class RaftState(stateName: State, stateData: RaftMemberData)
14 |
15 | case class Using(data: RaftMemberData) extends NonPersistEventLike
16 |
17 | trait RaftTestProbeSupport extends RaftActorBase {
18 |
19 | override def unhandled(message: Any): Unit =
20 | message match {
21 | case SetState(stateName, stateData) =>
22 | applyDomainEvent(Using(stateData)) { _ =>
23 | become(stateName)
24 | sender() ! StateChanged
25 | }
26 | case GetState =>
27 | sender() ! RaftState(currentState, currentData)
28 | case msg =>
29 | super.unhandled(msg)
30 | }
31 |
32 | abstract override def updateState(domainEvent: DomainEvent): RaftMemberData =
33 | domainEvent match {
34 | case Using(data) => data
35 | case _ => super.updateState(domainEvent)
36 | }
37 | }
38 | }
39 |
--------------------------------------------------------------------------------
/core/src/test/scala/lerna/akka/entityreplication/raft/model/ClientContextSpec.scala:
--------------------------------------------------------------------------------
1 | package lerna.akka.entityreplication.raft.model
2 |
3 | import akka.actor.ActorSystem
4 | import akka.testkit.{ TestKit, TestProbe }
5 | import lerna.akka.entityreplication.raft.ActorSpec
6 |
7 | final class ClientContextSpec extends TestKit(ActorSystem("ClientContextSpec")) with ActorSpec {
8 |
9 | "ClientContext.forward should send the given message to the actor, including no sender, if the context doesn't have an original sender" in {
10 | val probe = TestProbe()
11 | val clientContext = ClientContext(probe.ref, instanceId = None, originSender = None)
12 | clientContext.forward("message-1")
13 | probe.expectMsg("message-1")
14 | probe.sender() should be(system.deadLetters)
15 | }
16 |
17 | "ClientContext.forward should send the given message to the actor, including an original sender, if the context has the original sender" in {
18 | val probe = TestProbe()
19 | val originalSender = TestProbe().ref
20 | val clientContext = ClientContext(probe.ref, instanceId = None, originSender = Some(originalSender))
21 | clientContext.forward("message-1")
22 | probe.expectMsg("message-1")
23 | probe.sender() should be(originalSender)
24 | }
25 |
26 | }
27 |
--------------------------------------------------------------------------------
/core/src/test/scala/lerna/akka/entityreplication/raft/model/LogEntryIndexSpec.scala:
--------------------------------------------------------------------------------
1 | package lerna.akka.entityreplication.raft.model
2 |
3 | import lerna.akka.entityreplication.raft.model.exception.SeqIndexOutOfBoundsException
4 | import org.scalatest.{ Matchers, WordSpec }
5 |
6 | class LogEntryIndexSpec extends WordSpec with Matchers {
7 |
8 | "LogEntryIndex" should {
9 |
10 | "compute an index of Seq from self and offset" in {
11 | val index = LogEntryIndex(Int.MaxValue.toLong + 2)
12 | val offset = LogEntryIndex(1)
13 |
14 | index.toSeqIndex(offset) should be(Int.MaxValue)
15 | }
16 |
17 | "throw an exception if the index of Seq is out of bounds" in {
18 | val index = LogEntryIndex(Int.MaxValue.toLong + 2)
19 | val offset = LogEntryIndex(0)
20 |
21 | val caught = intercept[SeqIndexOutOfBoundsException] {
22 | index.toSeqIndex(offset)
23 | }
24 |
25 | caught.self should be(index)
26 | caught.offset should be(offset)
27 | }
28 | }
29 | }
30 |
--------------------------------------------------------------------------------
/core/src/test/scala/lerna/akka/entityreplication/raft/snapshot/ShardSnapshotStoreSpecBase.scala:
--------------------------------------------------------------------------------
1 | package lerna.akka.entityreplication.raft.snapshot
2 |
3 | import akka.persistence.testkit.{ PersistenceTestKitPlugin, PersistenceTestKitSnapshotPlugin }
4 | import com.typesafe.config.{ Config, ConfigFactory }
5 |
6 | object ShardSnapshotStoreSpecBase {
7 |
8 | def configWithPersistenceTestKits: Config = {
9 | PersistenceTestKitPlugin.config
10 | .withFallback(PersistenceTestKitSnapshotPlugin.config)
11 | .withFallback(raftPersistenceConfigWithPersistenceTestKits)
12 | .withFallback(ConfigFactory.load())
13 | }
14 |
15 | private val raftPersistenceConfigWithPersistenceTestKits: Config = ConfigFactory.parseString(
16 | s"""
17 | |lerna.akka.entityreplication.raft.persistence {
18 | | journal.plugin = ${PersistenceTestKitPlugin.PluginId}
19 | | snapshot-store.plugin = ${PersistenceTestKitSnapshotPlugin.PluginId}
20 | | # Might be possible to use PersistenceTestKitReadJournal
21 | | // query.plugin = ""
22 | |}
23 | |""".stripMargin,
24 | )
25 |
26 | }
27 |
--------------------------------------------------------------------------------
/core/src/test/scala/lerna/akka/entityreplication/raft/snapshot/SnapshotStoreSpec.scala:
--------------------------------------------------------------------------------
1 | package lerna.akka.entityreplication.raft.snapshot
2 |
3 | import akka.actor.ActorSystem
4 | import akka.testkit.TestKit
5 | import lerna.akka.entityreplication.model.{ NormalizedEntityId, TypeName }
6 | import lerna.akka.entityreplication.raft.ActorSpec
7 | import lerna.akka.entityreplication.raft.routing.MemberIndex
8 |
9 | final class SnapshotStoreSpec extends TestKit(ActorSystem("SnapshotStoreSpec")) with ActorSpec {
10 |
11 | "SnapshotStore.persistenceId" should {
12 |
13 | "return a persistence ID for the given type name, entity ID, and member index" in {
14 | val persistenceId = SnapshotStore.persistenceId(
15 | TypeName.from("test-type-name"),
16 | NormalizedEntityId.from("test-entity-id"),
17 | MemberIndex("test-member-index"),
18 | )
19 | assert(persistenceId === "SnapshotStore:test-type-name:test-entity-id:test-member-index")
20 | }
21 |
22 | }
23 |
24 | }
25 |
--------------------------------------------------------------------------------
/core/src/test/scala/lerna/akka/entityreplication/testkit/CustomTestProbe.scala:
--------------------------------------------------------------------------------
1 | package lerna.akka.entityreplication.testkit
2 |
3 | import akka.testkit.TestProbe
4 |
5 | object CustomTestProbe {
6 |
7 | implicit class CustomTestProbe(testProbe: TestProbe) {
8 | def fishForMessageN[T](messages: Int)(f: PartialFunction[Any, T]): Seq[T] = {
9 | var fishedMessages = Seq.empty[T]
10 | testProbe.fishForMessage() {
11 | case msg if f.isDefinedAt(msg) =>
12 | fishedMessages :+= f(msg)
13 | fishedMessages.sizeIs >= messages
14 | case _ => false // ignore
15 | }
16 | fishedMessages
17 | }
18 | }
19 | }
20 |
--------------------------------------------------------------------------------
/core/src/test/scala/lerna/akka/entityreplication/testkit/CustomTestProbeSpec.scala:
--------------------------------------------------------------------------------
1 | package lerna.akka.entityreplication.testkit
2 |
3 | import akka.actor.ActorSystem
4 | import akka.testkit.{ TestKit, TestProbe }
5 | import lerna.akka.entityreplication.raft.ActorSpec
6 |
7 | class CustomTestProbeSpec extends TestKit(ActorSystem("CustomTestProbeSpec")) with ActorSpec {
8 | import CustomTestProbe._
9 |
10 | "CustomTestProbe.fishForMessageN" should {
11 |
12 | "pass when the probe receives messages that match the condition" in {
13 | val probe = TestProbe()
14 |
15 | probe.ref ! "match"
16 |
17 | var called = false
18 | probe.fishForMessageN(messages = 1) {
19 | case "match" =>
20 | called = true
21 | }
22 | called should be(true)
23 | }
24 |
25 | "throw AssertionError when the probe doesn't receive any messages that match the condition" in {
26 | val probe = TestProbe()
27 |
28 | probe.ref ! "invalid"
29 |
30 | intercept[AssertionError] {
31 | probe.fishForMessageN(messages = 1) {
32 | case "match" =>
33 | }
34 | }
35 | }
36 |
37 | "ignore messages that doesn't match the condition" in {
38 | val probe = TestProbe()
39 |
40 | probe.ref ! "ignore"
41 | probe.ref ! "ignore"
42 | probe.ref ! "match"
43 |
44 | var called = false
45 | probe.fishForMessageN(messages = 1) {
46 | case "match" =>
47 | called = true
48 | }
49 | called should be(true)
50 | }
51 |
52 | "call the function until the count of matched messages reaches 'messages' parameter" in {
53 | val probe = TestProbe()
54 |
55 | probe.ref ! "match"
56 | probe.ref ! "match"
57 |
58 | var count = 0
59 | probe.fishForMessageN(messages = 2) {
60 | case "match" =>
61 | count = count + 1
62 | }
63 | count should be(2)
64 | }
65 |
66 | "return the values the PartialFunction provided to verify coverage of the patterns matched" in {
67 | val probe = TestProbe()
68 |
69 | probe.ref ! "first"
70 | probe.ref ! "second"
71 |
72 | probe.fishForMessageN(messages = 2) {
73 | case msg @ "first" =>
74 | msg
75 | case msg @ "second" =>
76 | msg
77 | } should contain theSameElementsInOrderAs Seq("first", "second")
78 | }
79 | }
80 | }
81 |
--------------------------------------------------------------------------------
/core/src/test/scala/lerna/akka/entityreplication/testkit/KryoSerializable.scala:
--------------------------------------------------------------------------------
1 | package lerna.akka.entityreplication.testkit
2 |
3 | trait KryoSerializable
4 |
--------------------------------------------------------------------------------
/core/src/test/scala/lerna/akka/entityreplication/typed/ClusterReplicationSpec.scala:
--------------------------------------------------------------------------------
1 | package lerna.akka.entityreplication.typed
2 |
3 | import akka.NotUsed
4 | import akka.actor.testkit.typed.scaladsl.ActorTestKit
5 | import akka.actor.typed.scaladsl.Behaviors
6 | import org.scalatest.{ BeforeAndAfterAll, FlatSpec, Matchers }
7 | import org.scalatest.concurrent.ScalaFutures
8 |
9 | class ClusterReplicationSpec extends FlatSpec with Matchers with ScalaFutures with BeforeAndAfterAll {
10 |
11 | private[this] val actorTestKit = ActorTestKit()
12 |
13 | private[this] val clusterReplication = ClusterReplication(actorTestKit.system)
14 |
15 | override def afterAll(): Unit = {
16 | actorTestKit.shutdownTestKit()
17 | super.afterAll()
18 | }
19 |
20 | behavior of "ClusterReplication.init"
21 |
22 | it should "provide a same ActorRef instance even if it was called multiple time" in {
23 | val typeKey = ReplicatedEntityTypeKey[NotUsed]("InitMultipleTime")
24 | val entity = ReplicatedEntity(typeKey)(_ => Behaviors.empty)
25 |
26 | val region1 = clusterReplication.init(entity)
27 | val region2 = clusterReplication.init(entity)
28 |
29 | region1 should be theSameInstanceAs region2
30 | }
31 |
32 | behavior of "ClusterReplication.entityRefFor"
33 |
34 | it should "throw an exception if the typeKey has not initialized" in {
35 | val typeKey = ReplicatedEntityTypeKey[NotUsed]("NotInitialized")
36 |
37 | val exception =
38 | intercept[IllegalStateException] {
39 | clusterReplication.entityRefFor(typeKey, "dummy")
40 | }
41 | exception.getMessage should be(
42 | "The type [ReplicatedEntityTypeKey[akka.NotUsed](NotInitialized)] must be init first",
43 | )
44 | }
45 |
46 | it should "provide ReplicatedEntityRef after the region was initialized" in {
47 | val typeKey = ReplicatedEntityTypeKey[NotUsed]("ProvideReplicatedEntityRef")
48 | val entity = ReplicatedEntity(typeKey)(_ => Behaviors.empty)
49 |
50 | clusterReplication.init(entity)
51 |
52 | clusterReplication.entityRefFor(typeKey, "test") shouldBe a[ReplicatedEntityRef[_]]
53 | }
54 |
55 | behavior of "ClusterReplication.shardIdOf"
56 |
57 | it should "throw an exception if the typeKey has not initialized" in {
58 | val typeKey = ReplicatedEntityTypeKey[NotUsed]("NotInitialized")
59 | val entityId = "entity-id"
60 | val exception = intercept[IllegalStateException] {
61 | clusterReplication.shardIdOf(typeKey, entityId)
62 | }
63 | exception.getMessage should be(
64 | "The type [ReplicatedEntityTypeKey[akka.NotUsed](NotInitialized)] must be init first",
65 | )
66 | }
67 |
68 | it should "extract shardId from given entityId" in {
69 | val typeKey = ReplicatedEntityTypeKey[NotUsed]("ExtractShardId")
70 | val entity = ReplicatedEntity(typeKey)(_ => Behaviors.empty)
71 | clusterReplication.init(entity)
72 |
73 | val entityId = "entity-id"
74 | val shardId = clusterReplication.shardIdOf(typeKey, entityId)
75 | val settings = ClusterReplicationSettings(actorTestKit.system)
76 | assert(shardId.toInt >= 0 && shardId.toInt < settings.raftSettings.numberOfShards)
77 | }
78 | }
79 |
--------------------------------------------------------------------------------
/core/src/test/scala/lerna/akka/entityreplication/typed/EffectSpec.scala:
--------------------------------------------------------------------------------
1 | package lerna.akka.entityreplication.typed
2 |
3 | import akka.actor.testkit.typed.scaladsl.ActorTestKit
4 | import org.scalatest.{ FlatSpec, Matchers }
5 |
6 | object EffectSpec {
7 | final case object Event
8 | final case object Reply
9 | trait State
10 | }
11 |
12 | class EffectSpec extends FlatSpec with Matchers {
13 |
14 | import EffectSpec._
15 |
16 | private[this] val actorTestKit = ActorTestKit()
17 |
18 | private[this] val replyTo = actorTestKit.createTestProbe[Reply.type]()
19 |
20 | "Effect when replicate() is called" should "return the event" in {
21 | val effect = Effect.replicate(Event).thenNoReply()
22 | effect.event should contain(Event)
23 | }
24 |
25 | "Effect when none is called" should behave like anEffectHasNoEvent {
26 | Effect.none.thenReply(replyTo.ref)(_ => Reply)
27 | }
28 | "Effect when unhandled is called" should behave like anEffectHasNoEvent {
29 | Effect.unhandled.thenReply(replyTo.ref)(_ => Reply)
30 | }
31 | "Effect when passivate() is called" should behave like anEffectHasNoEvent {
32 | Effect.passivate().thenNoReply()
33 | }
34 | "Effect when stopLocally() is called" should behave like anEffectHasNoEvent {
35 | Effect.stopLocally()
36 | }
37 | "Effect when unstashAll() is called" should behave like anEffectHasNoEvent {
38 | Effect.unstashAll().thenNoReply()
39 | }
40 | "Effect when stash() is called" should behave like anEffectHasNoEvent {
41 | Effect.stash()
42 | }
43 | "Effect when reply() is called" should behave like anEffectHasNoEvent {
44 | Effect.reply(replyTo.ref)(Reply)
45 | }
46 | "Effect when noReply is called" should behave like anEffectHasNoEvent {
47 | Effect.noReply
48 | }
49 |
50 | def anEffectHasNoEvent(effect: => Effect[Event.type, State]): Unit = {
51 | it should s"return None from event method" in {
52 | effect.event should be(empty)
53 | }
54 | }
55 | }
56 |
--------------------------------------------------------------------------------
/core/src/test/scala/lerna/akka/entityreplication/util/ActorIdsSpec.scala:
--------------------------------------------------------------------------------
1 | package lerna.akka.entityreplication.util
2 |
3 | import java.net.URLEncoder
4 |
5 | import org.scalatest.{ Matchers, WordSpec }
6 |
7 | class ActorIdsSpec extends WordSpec with Matchers {
8 |
9 | private[this] def urlEncode(value: String) = URLEncoder.encode(value, "utf-8")
10 |
11 | "ActorIds" when {
12 |
13 | "creates actorName" should {
14 |
15 | "return a name for an actor" in {
16 | ActorIds.actorName("test", "actor") should be("test:actor")
17 | }
18 |
19 | "be passed encoded elements with URLEncoder if they can contain the delimiter" in {
20 | ActorIds.actorName(urlEncode("test:a"), urlEncode("actor:b")) should be("test%3Aa:actor%3Ab")
21 | }
22 |
23 | "throw an exception when it gets invalid element which contains the delimiter" in {
24 | val exception = intercept[IllegalArgumentException] {
25 | ActorIds.actorName("test:a", "actor:b")
26 | }
27 | exception.getMessage should be("requirement failed: Not URL encoded value found: (1: test:a), (2: actor:b)")
28 | }
29 | }
30 |
31 | "creates persistenceId" should {
32 |
33 | "return an id for a PersistentActor" in {
34 | ActorIds.persistenceId("test", "actor") should be("test:actor")
35 | }
36 |
37 | "be passed encoded elements with URLEncoder if they can contain the delimiter" in {
38 | ActorIds.persistenceId(urlEncode("test:a"), urlEncode("actor:b")) should be("test%3Aa:actor%3Ab")
39 | }
40 |
41 | "throw an exception when it gets invalid element which contains the delimiter" in {
42 | val exception = intercept[IllegalArgumentException] {
43 | ActorIds.persistenceId("test:a", "actor:b")
44 | }
45 | exception.getMessage should be("requirement failed: Not URL encoded value found: (1: test:a), (2: actor:b)")
46 | }
47 | }
48 | }
49 | }
50 |
--------------------------------------------------------------------------------
/core/src/test/scala/lerna/akka/entityreplication/util/EventStore.scala:
--------------------------------------------------------------------------------
1 | package lerna.akka.entityreplication.util
2 |
3 | import akka.Done
4 | import akka.actor.{ Actor, Props }
5 | import akka.persistence.{ PersistentActor, RuntimePluginConfig }
6 | import com.typesafe.config.{ Config, ConfigFactory }
7 | import lerna.akka.entityreplication.ClusterReplicationSettings
8 |
9 | object EventStore {
10 | def props(settings: ClusterReplicationSettings): Props = Props(new EventStore(settings))
11 | final case class PersistEvents(events: Seq[Any])
12 |
13 | def persistenceId: String = getClass.getCanonicalName
14 | }
15 |
16 | class EventStore(settings: ClusterReplicationSettings) extends PersistentActor with RuntimePluginConfig {
17 | import EventStore._
18 |
19 | override def journalPluginId: String = settings.raftSettings.journalPluginId
20 |
21 | override def journalPluginConfig: Config = settings.raftSettings.journalPluginAdditionalConfig
22 |
23 | override def snapshotPluginId: String = settings.raftSettings.snapshotStorePluginId
24 |
25 | override def snapshotPluginConfig: Config = ConfigFactory.empty()
26 |
27 | override def persistenceId: String = EventStore.persistenceId
28 |
29 | override def receiveRecover: Receive = Actor.emptyBehavior
30 |
31 | private[this] var persisting: Int = 0
32 |
33 | override def receiveCommand: Receive = {
34 | case cmd: PersistEvents if cmd.events.isEmpty =>
35 | sender() ! Done
36 | case cmd: PersistEvents =>
37 | persisting = cmd.events.size
38 | persistAll(cmd.events.toVector) { _ =>
39 | persisting -= 1
40 | if (persisting == 0) {
41 | sender() ! Done
42 | }
43 | }
44 | }
45 | }
46 |
--------------------------------------------------------------------------------
/docs/example/read-side.sc:
--------------------------------------------------------------------------------
1 | import akka.Done
2 | import akka.actor.typed.{ ActorRef, ActorSystem }
3 | import akka.cluster.typed.{ ClusterSingleton, SingletonActor }
4 | import akka.persistence.query.Offset
5 | import akka.projection.eventsourced.EventEnvelope
6 | import akka.projection.scaladsl.ExactlyOnceProjection
7 | import akka.projection.slick.{ SlickHandler, SlickProjection }
8 | import akka.projection.{ ProjectionBehavior, ProjectionId }
9 | import slick.basic.DatabaseConfig
10 | import slick.dbio.DBIO
11 | import slick.jdbc.JdbcProfile
12 |
13 | sealed trait Event
14 | final case class Deposited(amount: Int) extends Event
15 | final case class Withdrawed(amount: Int) extends Event
16 |
17 | trait StatisticsActions {
18 | def insertWithdrawalRecord(amount: Int): DBIO[Done]
19 | def insertDepositRecord(amount: Int): DBIO[Done]
20 | }
21 |
22 | class EventHandler(actions: StatisticsActions) extends SlickHandler[EventEnvelope[Event]] {
23 | override def process(envelope: EventEnvelope[Event]): DBIO[Done] = {
24 | envelope.event match {
25 | case Deposited(amount) =>
26 | actions.insertDepositRecord(amount)
27 | case Withdrawed(amount) =>
28 | actions.insertWithdrawalRecord(amount)
29 | }
30 | }
31 | }
32 |
33 | import lerna.akka.entityreplication.raft.eventsourced.EntityReplicationEventSource
34 |
35 | object EventHandler {
36 | def start(
37 | actions: StatisticsActions,
38 | databaseConfig: DatabaseConfig[JdbcProfile],
39 | )(implicit
40 | system: ActorSystem[_],
41 | ): ActorRef[ProjectionBehavior.Command] = {
42 | def generateProjection(): ExactlyOnceProjection[Offset, EventEnvelope[Event]] =
43 | SlickProjection.exactlyOnce(
44 | projectionId = ProjectionId(name = "BankAccount", key = "aggregate"),
45 | sourceProvider = EntityReplicationEventSource.sourceProvider,
46 | databaseConfig = databaseConfig,
47 | handler = () => new EventHandler(actions),
48 | )
49 |
50 | val projection = generateProjection()
51 | ClusterSingleton(system).init(SingletonActor(ProjectionBehavior(projection), projection.projectionId.id))
52 | }
53 | }
54 |
--------------------------------------------------------------------------------
/docs/images/demo.apng:
--------------------------------------------------------------------------------
1 | version https://git-lfs.github.com/spec/v1
2 | oid sha256:e1ef0ed3cb021c42ea29eb2c5592f955edb254204ef081703d57aff0593bb8fa
3 | size 4667291
4 |
--------------------------------------------------------------------------------
/docs/migration_guide.md:
--------------------------------------------------------------------------------
1 | # Migration Guide
2 |
3 | ## 2.1.0 from 2.0.0
4 |
5 | ### Configure a snapshot store on the query side
6 | *akka-entity-replication 2.1.0* introduces an efficient recovery on the query side.
7 | We've achieved this efficient recovery by using a snapshot feature of Akka persistence.
8 | This efficient recovery requires you to configure a snapshot store like the following:
9 | ```hocon
10 | lerna.akka.entityreplication.raft.eventsourced.persistence {
11 | snapshot-store.plugin = "Specify your snapshot store plugin ID to use"
12 | }
13 | ```
14 | Note that this snapshot store is mandatory.
15 | You have to configure the snapshot store.
16 |
17 | This efficient recovery also introduces new settings named `lerna.akka.entityreplication.raft.eventsourced.persistence.snapshot-every`.
18 | *akka-entity-replication 2.1.0* saves a snapshot every `snapshot-every` events.
19 | The default value of `snapshot-every` is 1000.
20 | You can override this setting according to your requirements.
21 |
--------------------------------------------------------------------------------
/docs/testing_guide.md:
--------------------------------------------------------------------------------
1 | # Testing Guide
2 |
3 | ---
4 | **Warning**
5 |
6 | This API has been deprecated.
7 | Use [Typed API](./typed/implementation_guide.md) instead to build new applications.
8 |
9 | ---
10 |
11 | ## Testing ReplicationActors
12 |
13 | `akka-entity-replication` requires implementing your Entities with special trait `ReplicationActor`.
14 |
15 | `TestReplicationActorProps` allows testing behavior of an entity with `akka-testkit`.
16 | First, you need to add a dependency to your project.
17 |
18 | For more details for `akka-testkit`, see the following page.
19 |
20 | [Testing Classic Actors • Akka Documentation](https://doc.akka.io/docs/akka/2.6/testing.html)
21 |
22 | The `TestReplicationActorProps` allows to test `ReplicationActor` like a normal Actor.
23 | For more information on the behavior of `TestReplicationActorProps`, please see [TestReplicationActorPropsSpec](/src/test/scala/lerna/akka/entityreplication/testkit/TestReplicationActorPropsSpec.scala).
24 |
25 | ```scala
26 | import akka.actor.ActorSystem
27 | import akka.testkit.{ ImplicitSender, TestActors, TestKit }
28 | import org.scalatest.{ Matchers, WordSpecLike, BeforeAndAfterAll }
29 | import lerna.akka.entityreplication.testkit.TestReplicationActorProps
30 |
31 | class WordCountReplicationActorSpec
32 | extends TestKit(ActorSystem("WordCountReplicationActorSpec"))
33 | with ImplicitSender
34 | with WordSpecLike
35 | with Matchers
36 | with BeforeAndAfterAll {
37 |
38 | override def afterAll(): Unit = {
39 | TestKit.shutdownActorSystem(system)
40 | }
41 |
42 | import WordCountReplicationActor._
43 |
44 | "A WordCountReplicationActorSpec" should {
45 |
46 | "send back a Counted event after sending a CountWord command" in {
47 |
48 | val actor = system.actorOf(TestReplicationActorProps(WordCountReplicationActor.props))
49 |
50 | actor ! CountWord("hello")
51 | expectMsg(Counted(wordCount = "hello".length))
52 | }
53 | }
54 | }
55 | ```
56 |
--------------------------------------------------------------------------------
/project/build.properties:
--------------------------------------------------------------------------------
1 | sbt.version=1.3.13
2 |
--------------------------------------------------------------------------------
/project/plugins.sbt:
--------------------------------------------------------------------------------
1 | addSbtPlugin("com.geirsson" % "sbt-ci-release" % "1.5.5")
2 |
3 | addSbtPlugin("org.scalameta" % "sbt-scalafmt" % "2.4.2")
4 |
5 | addSbtPlugin("com.typesafe.sbt" % "sbt-multi-jvm" % "0.4.0")
6 |
7 | addSbtPlugin("ch.epfl.scala" % "sbt-scalafix" % "0.9.25")
8 |
9 | addSbtPlugin("org.scoverage" % "sbt-scoverage" % "1.6.1")
10 |
11 | addSbtPlugin("com.eed3si9n" % "sbt-unidoc" % "0.4.3")
12 |
13 | addSbtPlugin("com.typesafe.sbt" % "sbt-site" % "1.4.1")
14 |
15 | addSbtPlugin("com.typesafe.sbt" % "sbt-ghpages" % "0.6.3")
16 |
17 | addSbtPlugin("com.typesafe" % "sbt-mima-plugin" % "0.9.0")
18 |
--------------------------------------------------------------------------------
/project/scalapb.sbt:
--------------------------------------------------------------------------------
1 | addSbtPlugin("com.thesamet" % "sbt-protoc" % "1.0.2")
2 | libraryDependencies += "com.thesamet.scalapb" %% "compilerplugin" % "0.11.3"
3 |
--------------------------------------------------------------------------------
/publish.sbt:
--------------------------------------------------------------------------------
1 | ThisBuild / organization := "com.lerna-stack"
2 | ThisBuild / organizationName := "Lerna Project"
3 | ThisBuild / organizationHomepage := Some(url("https://lerna-stack.github.io/"))
4 |
5 | ThisBuild / developers := List(
6 | Developer(
7 | id = "lerna",
8 | name = "Lerna Team",
9 | email = "go-reactive@tis.co.jp",
10 | url = url("https://lerna-stack.github.io/"),
11 | ),
12 | )
13 |
14 | ThisBuild / description := "Akka extension for fast recovery from failure with replicating stateful entity on multiple nodes in Cluster."
15 | ThisBuild / licenses := List("Apache 2" -> new URL("http://www.apache.org/licenses/LICENSE-2.0.txt"))
16 | ThisBuild / homepage := Some(url("https://github.com/lerna-stack/akka-entity-replication"))
17 |
18 | // Remove all additional repository other than Maven Central from POM
19 | ThisBuild / pomIncludeRepository := { _ => false }
20 |
--------------------------------------------------------------------------------
/rollback-tool-cassandra/src/main/mima-filters/2.2.0.backwards.excludes/pr-203-rollback-deletes-only-target-tagged-events.excludes:
--------------------------------------------------------------------------------
1 | # CassandraPersistentActorRollback is a private class.
2 | ProblemFilters.exclude[DirectMissingMethodProblem]("lerna.akka.entityreplication.rollback.cassandra.CassandraPersistentActorRollback.deleteTagView")
3 |
--------------------------------------------------------------------------------
/rollback-tool-cassandra/src/main/mima-filters/2.2.0.backwards.excludes/pr-209-cassandra-persistence-queries-handles-deleted-partitions.excludes:
--------------------------------------------------------------------------------
1 | # CassandraJournalSettings is a private class.
2 | ProblemFilters.exclude[DirectMissingMethodProblem]("lerna.akka.entityreplication.rollback.cassandra.CassandraJournalSettings.this")
3 |
--------------------------------------------------------------------------------
/rollback-tool-cassandra/src/main/mima-filters/2.2.0.backwards.excludes/pr-210-rollback-preparation-fails-if-required-data-have-been-deleted:
--------------------------------------------------------------------------------
1 | # PersistenceQueries#TaggedEventEnvelope is private:
2 | ProblemFilters.exclude[DirectMissingMethodProblem]("lerna.akka.entityreplication.rollback.PersistenceQueries#TaggedEventEnvelope.copy")
3 | ProblemFilters.exclude[IncompatibleResultTypeProblem]("lerna.akka.entityreplication.rollback.PersistenceQueries#TaggedEventEnvelope.copy$default$5")
4 | ProblemFilters.exclude[IncompatibleResultTypeProblem]("lerna.akka.entityreplication.rollback.PersistenceQueries#TaggedEventEnvelope.copy$default$6")
5 | ProblemFilters.exclude[DirectMissingMethodProblem]("lerna.akka.entityreplication.rollback.PersistenceQueries#TaggedEventEnvelope.this")
6 | ProblemFilters.exclude[MissingTypesProblem]("lerna.akka.entityreplication.rollback.PersistenceQueries$TaggedEventEnvelope$")
7 | ProblemFilters.exclude[DirectMissingMethodProblem]("lerna.akka.entityreplication.rollback.PersistenceQueries#TaggedEventEnvelope.apply")
8 | ProblemFilters.exclude[IncompatibleSignatureProblem]("lerna.akka.entityreplication.rollback.PersistenceQueries#TaggedEventEnvelope.unapply")
9 |
10 | # PersistentActorRollback is private:
11 | ProblemFilters.exclude[ReversedMissingMethodProblem]("lerna.akka.entityreplication.rollback.PersistentActorRollback.findRollbackRequirements")
12 |
13 | # RaftEventSourcedPersistence is private:
14 | ProblemFilters.exclude[DirectMissingMethodProblem]("lerna.akka.entityreplication.rollback.RaftEventSourcedPersistence.this")
15 |
16 | # RaftPersistence is private:
17 | ProblemFilters.exclude[DirectMissingMethodProblem]("lerna.akka.entityreplication.rollback.RaftPersistence.this")
18 |
19 | # CassandraSnapshotSettings is private:
20 | ProblemFilters.exclude[DirectMissingMethodProblem]("lerna.akka.entityreplication.rollback.cassandra.CassandraSnapshotSettings.this")
21 |
--------------------------------------------------------------------------------
/rollback-tool-cassandra/src/main/resources/reference.conf:
--------------------------------------------------------------------------------
1 | # Settings for the Rollback tool
2 | lerna.akka.entityreplication.rollback {
3 | # The rollback tool executes no write operations by default (`true`). Instead, it logs messages at the INFO level.
4 | # Use `false` to execute the write operations (deletes,inserts,updates).
5 | # Note that the tool runs read operations even if this value is true.
6 | dry-run = true
7 |
8 | # The rollback tool logs its progress every this number of rollback operations.
9 | # It logs each rollback operation if this settin value is 1.
10 | log-progress-every = 100
11 |
12 | # This setting value must be greater than the clock synchronization gap between all Akka nodes.
13 | # If this value is higher, the rollback tool requires more persistence operations.
14 | clock-out-of-sync-tolerance = 10s
15 |
16 | # How many read query is executed in parallel
17 | read-parallelism = 1
18 |
19 | # How many write query is executed in parallel
20 | write-parallelism = 1
21 |
22 | # Full configuration path of Akka Persistence Cassandra plugin to use for
23 | # `lerna.akka.entityreplication.raft.persistence`
24 | cassandra.raft-persistence-plugin-location = "akka.persistence.cassandra"
25 |
26 | # Full configuration path of Akka Persistence Cassandra plugin to use for
27 | # `lerna.akka.entityreplication.raft.eventsourced.persistence`
28 | cassandra.raft-eventsourced-persistence-plugin-location = "akka.persistence.cassandra"
29 |
30 | }
31 |
--------------------------------------------------------------------------------
/rollback-tool-cassandra/src/main/scala/akka/persistence/cassandra/lerna/CassandraReadJournalExt.scala:
--------------------------------------------------------------------------------
1 | package akka.persistence.cassandra.lerna
2 |
3 | import akka.NotUsed
4 | import akka.actor.{ ActorSystem, ExtendedActorSystem }
5 | import akka.persistence.cassandra.PluginSettings
6 | import akka.persistence.cassandra.journal.TimeBucket
7 | import akka.persistence.cassandra.query.scaladsl.CassandraReadJournal
8 | import akka.persistence.query.Offset
9 | import akka.stream.scaladsl.Source
10 |
11 | import java.util.UUID
12 |
13 | object CassandraReadJournalExt {
14 |
15 | final case class CassandraEventEnvelope(
16 | persistenceId: String,
17 | sequenceNr: Long,
18 | timeBucket: Long,
19 | timestamp: UUID,
20 | tagPidSequenceNr: Long,
21 | )
22 |
23 | /** Creates a [[CassandraReadJournalExt]] from the given system and config path
24 | *
25 | * It resolves a config at the given path from the system's config. The resolved config should have the same structure
26 | * as the one of Akka Persistence Cassandra plugin (`akka.persistence.cassandra`).
27 | */
28 | def apply(system: ActorSystem, configPath: String): CassandraReadJournalExt = {
29 | val config = system.settings.config.getConfig(configPath)
30 | val queries = new CassandraReadJournal(system.asInstanceOf[ExtendedActorSystem], config, configPath)
31 | val settings = new PluginSettings(system, config)
32 | new CassandraReadJournalExt(queries, settings)
33 | }
34 |
35 | }
36 |
37 | /** Provides Akka Persistence Cassandra Queries ([[akka.persistence.cassandra.query.scaladsl.CassandraReadJournal]])
38 | *
39 | * Since it depends on internal APIs of Akka Persistence Cassandra, this class is under namespace `akka.persistence.cassandra`.
40 | */
41 | final class CassandraReadJournalExt private (
42 | queries: CassandraReadJournal,
43 | settings: PluginSettings,
44 | ) {
45 | import CassandraReadJournalExt._
46 |
47 | /** Returns an event source that emits events with the given tag
48 | *
49 | * It's an alternative version of the public API [[akka.persistence.cassandra.query.scaladsl.CassandraReadJournal.currentEventsByTag]].
50 | * This method returns some internal information (`timebucket` and `tag_pid_sequence_nr`) that the public API doesn't provide.
51 | */
52 | def currentEventsByTag(tag: String, offset: Offset): Source[CassandraEventEnvelope, NotUsed] = {
53 | queries.currentEventsByTagInternal(tag, offset).map { repr =>
54 | CassandraEventEnvelope(
55 | repr.persistentRepr.persistenceId,
56 | repr.persistentRepr.sequenceNr,
57 | TimeBucket(repr.offset, settings.eventsByTagSettings.bucketSize).key,
58 | repr.offset,
59 | repr.tagPidSequenceNr,
60 | )
61 | }
62 | }
63 |
64 | }
65 |
--------------------------------------------------------------------------------
/rollback-tool-cassandra/src/main/scala/akka/persistence/cassandra/lerna/Extractor.scala:
--------------------------------------------------------------------------------
1 | package akka.persistence.cassandra.lerna
2 |
3 | import akka.actor.ActorSystem
4 | import akka.persistence.PersistentRepr
5 | import akka.persistence.cassandra.Extractors
6 | import akka.persistence.cassandra.journal.CassandraJournal
7 | import akka.persistence.cassandra.lerna.Extractor.TaggedPersistentRepr
8 | import akka.persistence.query.TimeBasedUUID
9 | import akka.serialization.{ Serialization, SerializationExtension }
10 | import com.datastax.oss.driver.api.core.cql.Row
11 |
12 | import scala.concurrent.{ ExecutionContext, Future }
13 |
14 | object Extractor {
15 | final case class TaggedPersistentRepr(repr: PersistentRepr, offset: TimeBasedUUID, tags: Set[String])
16 | }
17 |
18 | /** Provides extractor from rows to event envelopes
19 | *
20 | * Since it depends on some internal APIs of Akka Persistence Cassandra, this class is under namespace
21 | * `akka.persistence.cassandra`.
22 | */
23 | final class Extractor(system: ActorSystem) {
24 |
25 | private val eventDeserializer: CassandraJournal.EventDeserializer =
26 | new CassandraJournal.EventDeserializer(system)
27 | private val serialization: Serialization =
28 | SerializationExtension(system)
29 | private val extractor: Extractors.Extractor[Extractors.TaggedPersistentRepr] =
30 | Extractors.taggedPersistentRepr(eventDeserializer, serialization)
31 |
32 | /** Returns `TaggedPersistentRepr` extracted from the given row
33 | *
34 | * If `async` is true, internal deserialization is executed asynchronously.
35 | */
36 | def extract(row: Row, async: Boolean)(implicit executionContext: ExecutionContext): Future[TaggedPersistentRepr] = {
37 | extractor
38 | .extract(row, async)
39 | .map { repr =>
40 | TaggedPersistentRepr(repr.pr, TimeBasedUUID(repr.offset), repr.tags)
41 | }
42 | }
43 |
44 | }
45 |
--------------------------------------------------------------------------------
/rollback-tool-cassandra/src/main/scala/lerna/akka/entityreplication/rollback/LinearRollbackTimestampHintFinder.scala:
--------------------------------------------------------------------------------
1 | package lerna.akka.entityreplication.rollback
2 | import akka.actor.{ ActorSystem, ClassicActorSystemProvider }
3 | import akka.stream.scaladsl.Sink
4 |
5 | import java.time.Instant
6 | import scala.concurrent.Future
7 |
8 | /** @inheritdoc */
9 | private final class LinearRollbackTimestampHintFinder(
10 | systemProvider: ClassicActorSystemProvider,
11 | queries: PersistenceQueries,
12 | ) extends RollbackTimestampHintFinder {
13 |
14 | private implicit val system: ActorSystem =
15 | systemProvider.classicSystem
16 |
17 | import system.dispatcher
18 |
19 | /** @inheritdoc */
20 | override def findTimestampHint(
21 | requirements: PersistentActorRollback.RollbackRequirements,
22 | ): Future[RollbackTimestampHintFinder.TimestampHint] = {
23 | // NOTE: In most cases, an event with `lowestSequenceNr` or `lowestSequenceNr+1` exists.
24 | // TODO Search a timestamp hint from snapshots since the persistent actor can delete the event with `lowestSequenceNr`.
25 | queries
26 | .currentEventsAfter(requirements.persistenceId, requirements.lowestSequenceNr)
27 | .runWith(Sink.headOption)
28 | .flatMap {
29 | case Some(hintEvent) =>
30 | val hint = RollbackTimestampHintFinder.TimestampHint(
31 | requirements.persistenceId,
32 | hintEvent.sequenceNr,
33 | Instant.ofEpochMilli(hintEvent.timestamp),
34 | )
35 | Future.successful(hint)
36 | case None =>
37 | Future.failed(
38 | new RollbackTimestampHintNotFound(
39 | s"no events of persistenceId=[${requirements.persistenceId}] with a sequence number" +
40 | s" greater than or equal to lowestSequenceNr=[${requirements.lowestSequenceNr.value}]",
41 | ),
42 | )
43 | }
44 | }
45 |
46 | }
47 |
--------------------------------------------------------------------------------
/rollback-tool-cassandra/src/main/scala/lerna/akka/entityreplication/rollback/LinearSequenceNrSearchStrategy.scala:
--------------------------------------------------------------------------------
1 | package lerna.akka.entityreplication.rollback
2 |
3 | import akka.actor.{ ActorSystem, ClassicActorSystemProvider }
4 | import akka.stream.scaladsl.{ Sink, Source }
5 | import lerna.akka.entityreplication.rollback.PersistenceQueries.TaggedEventEnvelope
6 |
7 | import java.time.Instant
8 | import scala.concurrent.Future
9 |
10 | /** @inheritdoc
11 | *
12 | * This strategy searches for the highest sequence number by iterating events in the descending order of the sequence
13 | * number. While this strategy can work if there is a clock out-of-sync, it requires a linear search. It is suitable
14 | * for searching the sequence number for near-past timestamps.
15 | */
16 | private final class LinearSequenceNrSearchStrategy(
17 | systemProvider: ClassicActorSystemProvider,
18 | queries: PersistenceQueries,
19 | ) extends SequenceNrSearchStrategy {
20 |
21 | private implicit val system: ActorSystem =
22 | systemProvider.classicSystem
23 |
24 | import system.dispatcher
25 |
26 | /** @inheritdoc */
27 | override def findUpperBound(persistenceId: String, timestamp: Instant): Future[Option[SequenceNr]] = {
28 | val targetTimestampMillis = timestamp.toEpochMilli
29 | val currentEventsBefore = Source.futureSource(for {
30 | highestSequenceNr <- queries.findHighestSequenceNrAfter(persistenceId, SequenceNr(1))
31 | } yield {
32 | highestSequenceNr.fold(Source.empty[TaggedEventEnvelope])(queries.currentEventsBefore(persistenceId, _))
33 | })
34 | currentEventsBefore
35 | .dropWhile(_.timestamp > targetTimestampMillis)
36 | .map(_.sequenceNr)
37 | .runWith(Sink.headOption)
38 | }
39 |
40 | }
41 |
--------------------------------------------------------------------------------
/rollback-tool-cassandra/src/main/scala/lerna/akka/entityreplication/rollback/PersistenceQueries.scala:
--------------------------------------------------------------------------------
1 | package lerna.akka.entityreplication.rollback
2 |
3 | import akka.NotUsed
4 | import akka.persistence.query.Offset
5 | import akka.stream.scaladsl.Source
6 |
7 | import scala.concurrent.Future
8 |
9 | private object PersistenceQueries {
10 |
11 | /** Event envelope with tags */
12 | final case class TaggedEventEnvelope(
13 | persistenceId: String,
14 | sequenceNr: SequenceNr,
15 | event: Any,
16 | offset: Offset,
17 | timestamp: Long,
18 | tags: Set[String],
19 | writerUuid: String,
20 | )
21 |
22 | }
23 |
24 | /** Provides Persistence Queries for rollback */
25 | private trait PersistenceQueries {
26 | import PersistenceQueries._
27 |
28 | /** Finds the highest sequence number after the given sequence number inclusive
29 | *
30 | * If there are no events whose sequence numbers are greater than or equal to the given sequence number, this method
31 | * returns `Future.successful(None)`.
32 | */
33 | def findHighestSequenceNrAfter(
34 | persistenceId: String,
35 | from: SequenceNr,
36 | ): Future[Option[SequenceNr]]
37 |
38 | /** Returns a `Source` that emits current events after the given sequence number inclusive
39 | *
40 | * The source will emit events in ascending order of sequence number.
41 | */
42 | def currentEventsAfter(
43 | persistenceId: String,
44 | from: SequenceNr,
45 | ): Source[TaggedEventEnvelope, NotUsed]
46 |
47 | /** Returns a `Source` that emits current events before the given sequence number inclusive
48 | *
49 | * The source will emit events in descending order of sequence number.
50 | */
51 | def currentEventsBefore(
52 | persistenceId: String,
53 | from: SequenceNr,
54 | ): Source[TaggedEventEnvelope, NotUsed]
55 |
56 | }
57 |
--------------------------------------------------------------------------------
/rollback-tool-cassandra/src/main/scala/lerna/akka/entityreplication/rollback/PersistentActorRollback.scala:
--------------------------------------------------------------------------------
1 | package lerna.akka.entityreplication.rollback
2 |
3 | import akka.Done
4 |
5 | import scala.concurrent.Future
6 |
7 | /** Provides rolling back the persistent actor to the specific sequence number
8 | *
9 | * Note that actors related to the persistence ID should not run simultaneously while this tool is running. If there
10 | * is an event subscriber, compensation might be needed depending on the content of the events. While the tools doesn't
11 | * provide such compensation, tool users might have to conduct such compensation.
12 | */
13 | private trait PersistentActorRollback {
14 | import PersistentActorRollback._
15 |
16 | /** Returns `true` if this rollback is running in dry-run mode, `false` otherwise */
17 | def isDryRun: Boolean
18 |
19 | /** Returns [[PersistenceQueries]] this rollback uses */
20 | def persistenceQueries: PersistenceQueries
21 |
22 | /** Finds rollback requirements for the persistent actor
23 | *
24 | * If any rollback is impossible, this method returns a failed `Future` containing a [[RollbackRequirementsNotFound]].
25 | */
26 | def findRollbackRequirements(persistenceId: String): Future[RollbackRequirements]
27 |
28 | /** Rolls back the persistent actor to the given sequence number
29 | *
30 | * This method doesn't verify that the rollback is actually possible. Use [[findRollbackRequirements]] to confirm that.
31 | *
32 | * Since restrictions depends on concrete implementations, see documents of concrete implementation to use.
33 | */
34 | def rollbackTo(persistenceId: String, to: SequenceNr): Future[Done]
35 |
36 | /** Delete all data for the persistent Actor
37 | *
38 | * Since restrictions depends on concrete implementations, see documents of concrete implementation to use.
39 | */
40 | def deleteAll(persistenceId: String): Future[Done]
41 |
42 | }
43 |
44 | private object PersistentActorRollback {
45 |
46 | /** Rollback requirements for the persistent actor with `persistenceId`
47 | *
48 | * The persistent actor can be rolled back to a sequence number greater than or equal to `lowestSequenceNr`.
49 | */
50 | final case class RollbackRequirements(
51 | persistenceId: String,
52 | lowestSequenceNr: SequenceNr,
53 | )
54 |
55 | }
56 |
--------------------------------------------------------------------------------
/rollback-tool-cassandra/src/main/scala/lerna/akka/entityreplication/rollback/RaftEventSourcedPersistence.scala:
--------------------------------------------------------------------------------
1 | package lerna.akka.entityreplication.rollback
2 |
3 | /** Persistence operations for persistence plugin `lerna.akka.entityreplication.raft.eventsourced.persistence` */
4 | private class RaftEventSourcedPersistence(
5 | val persistentActorRollback: PersistentActorRollback,
6 | val requirementsVerifier: RollbackRequirementsVerifier,
7 | )
8 |
--------------------------------------------------------------------------------
/rollback-tool-cassandra/src/main/scala/lerna/akka/entityreplication/rollback/RaftPersistence.scala:
--------------------------------------------------------------------------------
1 | package lerna.akka.entityreplication.rollback
2 |
3 | /** Persistence operations for persisntence plugin `lerna.akka.entityreplication.raft.persistence` */
4 | private class RaftPersistence(
5 | val persistentActorRollback: PersistentActorRollback,
6 | val raftShardPersistenceQueries: RaftShardPersistenceQueries,
7 | val sequenceNrSearchStrategy: SequenceNrSearchStrategy,
8 | val requirementsVerifier: RollbackRequirementsVerifier,
9 | )
10 |
--------------------------------------------------------------------------------
/rollback-tool-cassandra/src/main/scala/lerna/akka/entityreplication/rollback/RaftShardPersistenceQueries.scala:
--------------------------------------------------------------------------------
1 | package lerna.akka.entityreplication.rollback
2 |
3 | import akka.NotUsed
4 | import akka.stream.Materializer
5 | import akka.stream.scaladsl.{ Sink, Source }
6 | import lerna.akka.entityreplication.model.NormalizedEntityId
7 | import lerna.akka.entityreplication.raft.RaftActor.{ AppendedEvent, CompactionCompleted, SnapshotSyncCompleted }
8 | import lerna.akka.entityreplication.raft.model.LogEntryIndex
9 | import lerna.akka.entityreplication.rollback.setup.RaftActorId
10 |
11 | import scala.concurrent.Future
12 |
13 | /** Provides Persistence Queries for Raft shard */
14 | private class RaftShardPersistenceQueries(
15 | queries: PersistenceQueries,
16 | ) {
17 |
18 | /** Returns a `Source` that emits all involved entity IDs after the given RaftActor's sequence number inclusive
19 | *
20 | * Note that the source might emit the same entity ID more than once.
21 | *
22 | * The following events contain involved entities:
23 | * - [[lerna.akka.entityreplication.raft.RaftActor.AppendedEvent]]
24 | */
25 | def entityIdsAfter(
26 | raftActorId: RaftActorId,
27 | from: SequenceNr,
28 | ): Source[NormalizedEntityId, NotUsed] = {
29 | queries
30 | .currentEventsAfter(raftActorId.persistenceId, from)
31 | .map(_.event)
32 | .collect {
33 | case appendedEvent: AppendedEvent =>
34 | appendedEvent.event.entityId
35 | }
36 | .mapConcat(_.toSeq)
37 | }
38 |
39 | /** Finds the last `LogEntryIndex` with which the given `RaftActor` has truncated its `ReplicatedLog` entries
40 | *
41 | * The following events indicates that the RaftActor truncates its ReplicatedLog entries:
42 | * - [[lerna.akka.entityreplication.raft.RaftActor.CompactionCompleted]]
43 | * - [[lerna.akka.entityreplication.raft.RaftActor.SnapshotSyncCompleted]]
44 | */
45 | def findLastTruncatedLogEntryIndex(
46 | raftActorId: RaftActorId,
47 | from: SequenceNr,
48 | )(implicit materializer: Materializer): Future[Option[LogEntryIndex]] = {
49 | queries
50 | .currentEventsBefore(raftActorId.persistenceId, from)
51 | .map(_.event)
52 | .collect {
53 | case compactionCompleted: CompactionCompleted =>
54 | compactionCompleted.snapshotLastLogIndex
55 | case snapshotSyncCompleted: SnapshotSyncCompleted =>
56 | snapshotSyncCompleted.snapshotLastLogIndex
57 | }
58 | .runWith(Sink.headOption)
59 | }
60 |
61 | }
62 |
--------------------------------------------------------------------------------
/rollback-tool-cassandra/src/main/scala/lerna/akka/entityreplication/rollback/RaftShardRollbackParameters.scala:
--------------------------------------------------------------------------------
1 | package lerna.akka.entityreplication.rollback
2 |
3 | import lerna.akka.entityreplication.model.{ NormalizedShardId, TypeName }
4 | import lerna.akka.entityreplication.raft.routing.MemberIndex
5 |
6 | import java.time.Instant
7 |
8 | private final case class RaftShardRollbackParameters(
9 | typeName: TypeName,
10 | shardId: NormalizedShardId,
11 | allMemberIndices: Set[MemberIndex],
12 | leaderMemberIndex: MemberIndex,
13 | toTimestamp: Instant,
14 | ) {
15 | require(
16 | allMemberIndices.contains(leaderMemberIndex),
17 | s"allMemberIndices [$allMemberIndices] should contain the leader member index [$leaderMemberIndex]",
18 | )
19 | }
20 |
--------------------------------------------------------------------------------
/rollback-tool-cassandra/src/main/scala/lerna/akka/entityreplication/rollback/RaftShardRollbackSettings.scala:
--------------------------------------------------------------------------------
1 | package lerna.akka.entityreplication.rollback
2 |
3 | import akka.actor.ActorSystem
4 | import com.typesafe.config.Config
5 |
6 | import scala.concurrent.duration.FiniteDuration
7 | import scala.jdk.DurationConverters._
8 |
9 | private object RaftShardRollbackSettings {
10 |
11 | /** Creates a [[RaftShardRollbackSettings]] from the given system
12 | *
13 | * The root config instance for the settings is extracted from the config of the given system.
14 | * Config path `lerna.akka.entityreplication.rollback` is used for the extraction.
15 | *
16 | * @throws java.lang.IllegalArgumentException if the config contains an invalid setting value
17 | */
18 | def apply(system: ActorSystem): RaftShardRollbackSettings = {
19 | RaftShardRollbackSettings(system.settings.config.getConfig("lerna.akka.entityreplication.rollback"))
20 | }
21 |
22 | /** Creates a [[RaftShardRollbackSettings]] from the given config
23 | *
24 | * @throws java.lang.IllegalArgumentException if the config contains an invalid setting value
25 | */
26 | def apply(config: Config): RaftShardRollbackSettings = {
27 | val dryRun =
28 | config.getBoolean("dry-run")
29 | val logProgressEvery =
30 | config.getInt("log-progress-every")
31 | val clockOutOfSyncTolerance =
32 | config.getDuration("clock-out-of-sync-tolerance").toScala
33 | val readParallelism =
34 | config.getInt("read-parallelism")
35 | val writeParallelism =
36 | config.getInt("write-parallelism")
37 | new RaftShardRollbackSettings(
38 | dryRun,
39 | logProgressEvery,
40 | clockOutOfSyncTolerance,
41 | readParallelism,
42 | writeParallelism,
43 | )
44 | }
45 |
46 | }
47 |
48 | final class RaftShardRollbackSettings private (
49 | val dryRun: Boolean,
50 | val logProgressEvery: Int,
51 | val clockOutOfSyncTolerance: FiniteDuration,
52 | val readParallelism: Int,
53 | val writeParallelism: Int,
54 | ) {
55 | require(logProgressEvery > 0, s"log-progress-every [$logProgressEvery] should be greater than 0")
56 | require(readParallelism > 0, s"read-parallelism [$readParallelism] should be greater than 0")
57 | require(writeParallelism > 0, s"write-parallelism [$writeParallelism] should be greater than 0")
58 | }
59 |
--------------------------------------------------------------------------------
/rollback-tool-cassandra/src/main/scala/lerna/akka/entityreplication/rollback/RollbackException.scala:
--------------------------------------------------------------------------------
1 | package lerna.akka.entityreplication.rollback
2 |
3 | /** Exception thrown when a rollback-related operation fails */
4 | class RollbackException private[rollback] (message: String) extends RuntimeException(message)
5 |
6 | /** Exception thrown when rollback requirements are not found */
7 | private class RollbackRequirementsNotFound(message: String)
8 | extends RollbackException(s"Rollback requirements not found: $message")
9 |
10 | /** Exception thrown when a rollback request doesn't fulfill rollback requirements */
11 | private class RollbackRequirementsNotFulfilled(message: String)
12 | extends RollbackException(s"Rollback requirements not fulfilled: $message")
13 |
14 | /** Exception thrown when a rollback timestamp hint is not found */
15 | private class RollbackTimestampHintNotFound(message: String)
16 | extends RollbackException(s"Rollback timestamp hint not found: $message")
17 |
--------------------------------------------------------------------------------
/rollback-tool-cassandra/src/main/scala/lerna/akka/entityreplication/rollback/RollbackRequirementsVerifier.scala:
--------------------------------------------------------------------------------
1 | package lerna.akka.entityreplication.rollback
2 |
3 | import akka.Done
4 |
5 | import java.time.Instant
6 | import scala.concurrent.Future
7 |
8 | /** Verifies the rollback request meets rollback requirements */
9 | private trait RollbackRequirementsVerifier {
10 |
11 | /** Verifies that the given rollback request for `persistenceId` meets rollback requirements
12 | *
13 | * - If the request meets requirements, this returns a successful `Future`.
14 | * - Otherwise, this returns a failed `Future` containing a [[RollbackRequirementsNotFulfilled]].
15 | *
16 | * @param toSequenceNr
17 | * - `None` means deleting all data for rollback.
18 | * - `Some(sequenceNr)` means rollback to `sequenceNr`.
19 | * @param toTimestampHintOpt
20 | * - `None` means the request is not timestamp-based.
21 | * - `Some(_)` means the request is timestamp-based.
22 | */
23 | def verify(
24 | persistenceId: String,
25 | toSequenceNr: Option[SequenceNr],
26 | toTimestampHintOpt: Option[Instant],
27 | ): Future[Done]
28 |
29 | }
30 |
--------------------------------------------------------------------------------
/rollback-tool-cassandra/src/main/scala/lerna/akka/entityreplication/rollback/RollbackTimestampHintFinder.scala:
--------------------------------------------------------------------------------
1 | package lerna.akka.entityreplication.rollback
2 |
3 | import lerna.akka.entityreplication.rollback.RollbackTimestampHintFinder.TimestampHint
4 |
5 | import java.time.Instant
6 | import scala.concurrent.Future
7 |
8 | private object RollbackTimestampHintFinder {
9 |
10 | /** Timestamp hint for rollback
11 | *
12 | * Timestamp-based rollback for the persistent actor with `persistenceId` requires a rollback timestamp new than or
13 | * equal to `timestamp` of data (event or snapshot) with `sequenceNr`. While the timestamp is a hint and might need
14 | * to be more strictly correct, it helps to know why the timestamp-based rollback request doesn't fulfill rollback
15 | * requirements.
16 | */
17 | final case class TimestampHint(
18 | persistenceId: String,
19 | sequenceNr: SequenceNr,
20 | timestamp: Instant,
21 | )
22 |
23 | }
24 |
25 | /** Finds a rollback timestamp hint for the requirements */
26 | private trait RollbackTimestampHintFinder {
27 |
28 | /** Finds a rollback timestamp hint for the given rollback requirements
29 | *
30 | * If a hint is not found, this method returns a failed `Future` containing a [[RollbackTimestampHintNotFound]].
31 | *
32 | * @see [[RollbackTimestampHintFinder.TimestampHint]]
33 | */
34 | def findTimestampHint(requirements: PersistentActorRollback.RollbackRequirements): Future[TimestampHint]
35 |
36 | }
37 |
--------------------------------------------------------------------------------
/rollback-tool-cassandra/src/main/scala/lerna/akka/entityreplication/rollback/SequenceNr.scala:
--------------------------------------------------------------------------------
1 | package lerna.akka.entityreplication.rollback
2 |
3 | /** The sequence number in Akka Persistence */
4 | private final case class SequenceNr(value: Long) extends Ordered[SequenceNr] {
5 | require(value > 0, s"value [$value] should be greater than 0")
6 |
7 | /** Returns a SequenceNr whose value is incremented by the given delta
8 | *
9 | * @throws java.lang.IllegalArgumentException if the new value will be zero or negative
10 | */
11 | def +(delta: Long): SequenceNr = SequenceNr(value + delta)
12 |
13 | /** @inheritdoc */
14 | override def compare(that: SequenceNr): Int =
15 | this.value.compare(that.value)
16 |
17 | }
18 |
--------------------------------------------------------------------------------
/rollback-tool-cassandra/src/main/scala/lerna/akka/entityreplication/rollback/SequenceNrSearchStrategy.scala:
--------------------------------------------------------------------------------
1 | package lerna.akka.entityreplication.rollback
2 |
3 | import java.time.Instant
4 | import scala.concurrent.Future
5 |
6 | /** Searches for the sequence number from events such that it meets timestamp requirement */
7 | private trait SequenceNrSearchStrategy {
8 |
9 | /** Returns the highest sequence number from events whose timestamp is less than or equal to the given one */
10 | def findUpperBound(persistenceId: String, timestamp: Instant): Future[Option[SequenceNr]]
11 |
12 | }
13 |
--------------------------------------------------------------------------------
/rollback-tool-cassandra/src/main/scala/lerna/akka/entityreplication/rollback/cassandra/CassandraEventsByTagSettings.scala:
--------------------------------------------------------------------------------
1 | package lerna.akka.entityreplication.rollback.cassandra
2 |
3 | import com.typesafe.config.Config
4 |
5 | private object CassandraEventsByTagSettings {
6 |
7 | /** Creates a [[CassandraEventsByTagSettings]] from the given config
8 | *
9 | * The given config should have the same structure as the one of Akka Persistence Cassandra plugin (`akka.persistence.cassandra`).
10 | */
11 | def apply(pluginConfig: Config): CassandraEventsByTagSettings = {
12 | val keyspace =
13 | pluginConfig.getString("journal.keyspace")
14 | val table =
15 | pluginConfig.getString("events-by-tag.table")
16 | new CassandraEventsByTagSettings(keyspace, table)
17 | }
18 |
19 | }
20 |
21 | private final class CassandraEventsByTagSettings private (
22 | val keyspace: String,
23 | val table: String,
24 | ) {
25 |
26 | /** The tag_views table name qualified with the keyspace name */
27 | def tagViewsTableName: String = s"${keyspace}.${table}"
28 |
29 | /** The tag_write_progress table name qualified with the keyspace name */
30 | def tagWriteProgressTableName: String = s"${keyspace}.tag_write_progress"
31 |
32 | /** The tag_scanning table name qualified with the keyspace name */
33 | def tagScanningTableName: String = s"${keyspace}.tag_scanning"
34 |
35 | }
36 |
--------------------------------------------------------------------------------
/rollback-tool-cassandra/src/main/scala/lerna/akka/entityreplication/rollback/cassandra/CassandraJournalSettings.scala:
--------------------------------------------------------------------------------
1 | package lerna.akka.entityreplication.rollback.cassandra
2 |
3 | import com.typesafe.config.Config
4 |
5 | private object CassandraJournalSettings {
6 |
7 | /** Creates a [[CassandraJournalSettings]] from the given config
8 | *
9 | * The given config should have the same structure as the one of Akka Persistence Cassandra plugin
10 | * (`akka.persistence.cassandra`).
11 | *
12 | * @throws java.lang.IllegalArgumentException if the given config contains an invalid setting value
13 | */
14 | def apply(pluginConfig: Config): CassandraJournalSettings = {
15 | val readProfile =
16 | pluginConfig.getString("read-profile")
17 | val writeProfile =
18 | pluginConfig.getString("write-profile")
19 | val keyspace =
20 | pluginConfig.getString("journal.keyspace")
21 | val table =
22 | pluginConfig.getString("journal.table")
23 | val metadataTable =
24 | pluginConfig.getString("journal.metadata-table")
25 | val targetPartitionSize =
26 | pluginConfig.getLong("journal.target-partition-size")
27 | new CassandraJournalSettings(
28 | readProfile,
29 | writeProfile,
30 | keyspace,
31 | table,
32 | metadataTable,
33 | targetPartitionSize,
34 | )
35 | }
36 |
37 | }
38 |
39 | private final class CassandraJournalSettings private (
40 | val readProfile: String,
41 | val writeProfile: String,
42 | val keyspace: String,
43 | val table: String,
44 | val metadataTable: String,
45 | val targetPartitionSize: Long,
46 | ) {
47 | require(
48 | targetPartitionSize > 0,
49 | s"journal.target-partition-size [$targetPartitionSize] should be greater than 0",
50 | )
51 |
52 | /** The table name qualified with the keyspace name */
53 | def tableName: String = s"${keyspace}.${table}"
54 |
55 | /** The metadata table name qualified with the keyspace name */
56 | def metadataTableName: String = s"${keyspace}.${metadataTable}"
57 |
58 | }
59 |
--------------------------------------------------------------------------------
/rollback-tool-cassandra/src/main/scala/lerna/akka/entityreplication/rollback/cassandra/CassandraPersistenceQueriesSettings.scala:
--------------------------------------------------------------------------------
1 | package lerna.akka.entityreplication.rollback.cassandra
2 |
3 | import akka.actor.ActorSystem
4 |
5 | /** Settings of [[CassandraPersistenceQueries]] */
6 | private final class CassandraPersistenceQueriesSettings(
7 | val pluginLocation: String,
8 | ) {
9 |
10 | /** Resolves `pluginLocation` on the given system and then returns the journal plugin settings */
11 | def resolveJournalSettings(system: ActorSystem): CassandraJournalSettings = {
12 | val pluginConfig = system.settings.config.getConfig(pluginLocation)
13 | CassandraJournalSettings(pluginConfig)
14 | }
15 |
16 | /** Resolves `pluginLocation` on the given system and then returns the query plugin settings */
17 | def resolveQuerySettings(system: ActorSystem): CassandraQuerySettings = {
18 | val pluginConfig = system.settings.config.getConfig(pluginLocation)
19 | CassandraQuerySettings(pluginConfig)
20 | }
21 |
22 | /** Resolves `pluginLocation` on the given system and then returns the snapshot plugin settings */
23 | def resolveSnapshotSettings(system: ActorSystem): CassandraSnapshotSettings = {
24 | val pluginConfig = system.settings.config.getConfig(pluginLocation)
25 | CassandraSnapshotSettings(pluginConfig)
26 | }
27 |
28 | }
29 |
--------------------------------------------------------------------------------
/rollback-tool-cassandra/src/main/scala/lerna/akka/entityreplication/rollback/cassandra/CassandraPersistenceQueriesStatements.scala:
--------------------------------------------------------------------------------
1 | package lerna.akka.entityreplication.rollback.cassandra
2 |
3 | import akka.actor.ActorSystem
4 |
5 | private final class CassandraPersistenceQueriesStatements(
6 | system: ActorSystem,
7 | settings: CassandraPersistenceQueriesSettings,
8 | ) {
9 |
10 | private val journalSettings: CassandraJournalSettings =
11 | settings.resolveJournalSettings(system)
12 | private val snapshotSettings: CassandraSnapshotSettings =
13 | settings.resolveSnapshotSettings(system)
14 |
15 | val selectHighestSequenceNr: String =
16 | s"""
17 | SELECT sequence_nr FROM ${journalSettings.tableName}
18 | WHERE
19 | persistence_id = ? AND
20 | partition_nr = ?
21 | ORDER BY sequence_nr DESC
22 | LIMIT 1
23 | """
24 |
25 | val selectMessagesFromAsc: String =
26 | s"""
27 | SELECT * FROM ${journalSettings.tableName}
28 | WHERE
29 | persistence_id = ? AND
30 | partition_nr = ? AND
31 | sequence_nr >= ?
32 | ORDER BY sequence_nr ASC
33 | """
34 |
35 | val selectMessagesFromDesc: String =
36 | s"""
37 | SELECT * FROM ${journalSettings.tableName}
38 | WHERE
39 | persistence_id = ? AND
40 | partition_nr = ? AND
41 | sequence_nr <= ?
42 | ORDER BY sequence_nr DESC
43 | """
44 |
45 | val selectDeletedTo: String =
46 | s"""
47 | SELECT deleted_to FROM ${journalSettings.metadataTableName}
48 | WHERE
49 | persistence_id = ?
50 | """
51 |
52 | val selectLowestSnapshotSequenceNrFrom: String =
53 | s"""
54 | SELECT sequence_nr FROM ${snapshotSettings.tableName}
55 | WHERE
56 | persistence_id = ? AND
57 | sequence_nr >= ?
58 | ORDER BY sequence_nr ASC
59 | LIMIT 1
60 | """
61 |
62 | }
63 |
--------------------------------------------------------------------------------
/rollback-tool-cassandra/src/main/scala/lerna/akka/entityreplication/rollback/cassandra/CassandraPersistentActorRollbackSettings.scala:
--------------------------------------------------------------------------------
1 | package lerna.akka.entityreplication.rollback.cassandra
2 |
3 | import akka.actor.ActorSystem
4 | import akka.persistence.cassandra.cleanup.CleanupSettings
5 | import akka.persistence.cassandra.reconciler.ReconciliationSettings
6 | import com.typesafe.config.{ Config, ConfigFactory }
7 |
8 | private object CassandraPersistentActorRollbackSettings {
9 |
10 | /** Creates a [[CassandraPersistentActorRollbackSettings]] from the given system
11 | *
12 | * `pluginLocation` is an absolute config path to specify an Akka Persistence Cassandra plugin config. The Akka
13 | * Persistence Cassandra plugin config is resolved by using both `pluginLocation` and the given system.
14 | */
15 | def apply(
16 | system: ActorSystem,
17 | pluginLocation: String,
18 | dryRun: Boolean,
19 | ): CassandraPersistentActorRollbackSettings = {
20 | new CassandraPersistentActorRollbackSettings(system, pluginLocation, dryRun)
21 | }
22 |
23 | }
24 |
25 | private final class CassandraPersistentActorRollbackSettings private (
26 | system: ActorSystem,
27 | val pluginLocation: String,
28 | val dryRun: Boolean,
29 | ) {
30 |
31 | private val pluginConfig: Config =
32 | system.settings.config.getConfig(pluginLocation)
33 |
34 | private[cassandra] val journal: CassandraJournalSettings =
35 | CassandraJournalSettings(pluginConfig)
36 |
37 | private[cassandra] val query: CassandraQuerySettings =
38 | CassandraQuerySettings(pluginConfig)
39 |
40 | private[cassandra] val eventsByTag: CassandraEventsByTagSettings =
41 | CassandraEventsByTagSettings(pluginConfig)
42 |
43 | private[cassandra] val snapshot: CassandraSnapshotSettings =
44 | CassandraSnapshotSettings(pluginConfig)
45 |
46 | private[cassandra] val cleanup: CleanupSettings = {
47 | val baseConfig = pluginConfig.getConfig("cleanup")
48 | val cleanupConfig: Config = ConfigFactory
49 | .parseString(
50 | s"""
51 | |plugin-location = "$pluginLocation"
52 | |dry-run = $dryRun
53 | |""".stripMargin,
54 | ).withFallback(baseConfig)
55 | new CleanupSettings(cleanupConfig)
56 | }
57 |
58 | private[cassandra] val reconciliation: ReconciliationSettings = {
59 | val baseConfig = pluginConfig.getConfig("reconciler")
60 | val reconciliationConfig: Config = ConfigFactory
61 | .parseString(
62 | s"""
63 | |read-profile = "${journal.readProfile}"
64 | |write-profile = "${journal.writeProfile}"
65 | |plugin-location = "$pluginLocation"
66 | |""".stripMargin,
67 | ).withFallback(baseConfig)
68 | new ReconciliationSettings(reconciliationConfig)
69 | }
70 |
71 | private[cassandra] val queries: CassandraPersistenceQueriesSettings =
72 | new CassandraPersistenceQueriesSettings(pluginLocation)
73 |
74 | }
75 |
--------------------------------------------------------------------------------
/rollback-tool-cassandra/src/main/scala/lerna/akka/entityreplication/rollback/cassandra/CassandraPersistentActorRollbackStatements.scala:
--------------------------------------------------------------------------------
1 | package lerna.akka.entityreplication.rollback.cassandra
2 |
3 | private final class CassandraPersistentActorRollbackStatements(settings: CassandraPersistentActorRollbackSettings) {
4 |
5 | object journal {
6 | val deleteMessagesFrom: String =
7 | s"""
8 | DELETE FROM ${settings.journal.tableName}
9 | WHERE
10 | persistence_id = ? AND
11 | partition_nr = ? AND
12 | sequence_nr >= ?
13 | """
14 |
15 | val deleteTagViews: String =
16 | s"""
17 | DELETE FROM ${settings.eventsByTag.tagViewsTableName}
18 | WHERE
19 | tag_name = ? AND
20 | timebucket = ? AND
21 | timestamp = ? AND
22 | persistence_id = ? AND
23 | tag_pid_sequence_nr = ?
24 | """
25 |
26 | val insertTagWriteProgress: String =
27 | s"""
28 | INSERT INTO ${settings.eventsByTag.tagWriteProgressTableName}
29 | (persistence_id, tag, sequence_nr, tag_pid_sequence_nr, offset)
30 | VALUES (?, ?, ?, ?, ?)
31 | """
32 |
33 | val insertTagScanning: String =
34 | s"""
35 | INSERT INTO ${settings.eventsByTag.tagScanningTableName}
36 | (persistence_id, sequence_nr)
37 | VALUES (?, ?)
38 | """
39 |
40 | val deleteTagWriteProgress: String =
41 | s"""
42 | DELETE FROM ${settings.eventsByTag.tagWriteProgressTableName}
43 | WHERE
44 | persistence_id = ? AND
45 | tag = ?
46 | """
47 |
48 | val deleteTagScanning: String =
49 | s"""
50 | DELETE FROM ${settings.eventsByTag.tagScanningTableName}
51 | WHERE
52 | persistence_id = ?
53 | """
54 |
55 | }
56 |
57 | object snapshot {
58 | val deleteSnapshotsFrom: String =
59 | s"""
60 | DELETE FROM ${settings.snapshot.tableName}
61 | WHERE
62 | persistence_id = ? AND
63 | sequence_nr >= ?
64 | """
65 | }
66 |
67 | }
68 |
--------------------------------------------------------------------------------
/rollback-tool-cassandra/src/main/scala/lerna/akka/entityreplication/rollback/cassandra/CassandraQuerySettings.scala:
--------------------------------------------------------------------------------
1 | package lerna.akka.entityreplication.rollback.cassandra
2 |
3 | import com.typesafe.config.Config
4 |
5 | private object CassandraQuerySettings {
6 |
7 | /** Creates a [[CassandraQuerySettings]] from the given config
8 | *
9 | * The given config should have the same structure as the one of Akka Persistence Cassandra plugin
10 | * (`akka.persistence.cassandra`).
11 | *
12 | * @throws java.lang.IllegalArgumentException if the given config contains an invalid setting value
13 | */
14 | def apply(pluginConfig: Config): CassandraQuerySettings = {
15 | val readProfile =
16 | pluginConfig.getString("query.read-profile")
17 | val maxBufferSize =
18 | pluginConfig.getInt("query.max-buffer-size")
19 | val deserializationParallelism =
20 | pluginConfig.getInt("query.deserialization-parallelism")
21 | new CassandraQuerySettings(
22 | readProfile,
23 | maxBufferSize,
24 | deserializationParallelism,
25 | )
26 | }
27 |
28 | }
29 |
30 | private final class CassandraQuerySettings private (
31 | val readProfile: String,
32 | val maxBufferSize: Int,
33 | val deserializationParallelism: Int,
34 | ) {
35 | require(
36 | maxBufferSize > 0,
37 | s"query.max-buffer-size [$maxBufferSize] should be greater than 0",
38 | )
39 | require(
40 | deserializationParallelism > 0,
41 | s"query.deserialization-parallelism [$deserializationParallelism] should be greater than 0",
42 | )
43 | }
44 |
--------------------------------------------------------------------------------
/rollback-tool-cassandra/src/main/scala/lerna/akka/entityreplication/rollback/cassandra/CassandraRaftShardRollbackSettings.scala:
--------------------------------------------------------------------------------
1 | package lerna.akka.entityreplication.rollback.cassandra
2 |
3 | import akka.actor.{ ActorSystem, ClassicActorSystemProvider }
4 | import com.typesafe.config.Config
5 | import lerna.akka.entityreplication.rollback.RaftShardRollbackSettings
6 |
7 | object CassandraRaftShardRollbackSettings {
8 |
9 | /** Creates a [[CassandraRaftShardRollbackSettings]] from the given system
10 | *
11 | * The root config instance for the settings is extracted from the config of the given system.
12 | * Config path `lerna.akka.entityreplication.rollback` is used for the extraction.
13 | *
14 | * @throws java.lang.IllegalArgumentException if the config contains an invalid setting value
15 | */
16 | def apply(system: ClassicActorSystemProvider): CassandraRaftShardRollbackSettings = {
17 | val config = system.classicSystem.settings.config.getConfig("lerna.akka.entityreplication.rollback")
18 | CassandraRaftShardRollbackSettings(system, config)
19 | }
20 |
21 | /** Creates a [[CassandraRaftShardRollbackSettings]] from the given system and config
22 | *
23 | * The given config should have the same structure at config path `lerna.akka.entityreplication.rollback`.
24 | *
25 | * @throws java.lang.IllegalArgumentException if the config contains an invalid setting value
26 | */
27 | def apply(system: ClassicActorSystemProvider, config: Config): CassandraRaftShardRollbackSettings = {
28 | val rollbackSettings =
29 | RaftShardRollbackSettings(config)
30 | val raftPersistencePluginLocation =
31 | config.getString("cassandra.raft-persistence-plugin-location")
32 | val raftEventSourcedPersistencePluginLocation =
33 | config.getString("cassandra.raft-eventsourced-persistence-plugin-location")
34 | new CassandraRaftShardRollbackSettings(
35 | system.classicSystem,
36 | rollbackSettings,
37 | raftPersistencePluginLocation,
38 | raftEventSourcedPersistencePluginLocation,
39 | )
40 | }
41 |
42 | }
43 |
44 | /** Settings for [[CassandraRaftShardRollback]] */
45 | final class CassandraRaftShardRollbackSettings private (
46 | system: ActorSystem,
47 | val rollbackSettings: RaftShardRollbackSettings,
48 | val raftPersistencePluginLocation: String,
49 | val raftEventSourcedPersistencePluginLocation: String,
50 | ) {
51 |
52 | private[cassandra] val raftPersistenceRollbackSettings =
53 | CassandraPersistentActorRollbackSettings(
54 | system,
55 | raftPersistencePluginLocation,
56 | rollbackSettings.dryRun,
57 | )
58 |
59 | private[cassandra] val raftEventSourcedPersistenceRollbackSettings =
60 | CassandraPersistentActorRollbackSettings(
61 | system,
62 | raftEventSourcedPersistencePluginLocation,
63 | rollbackSettings.dryRun,
64 | )
65 |
66 | }
67 |
--------------------------------------------------------------------------------
/rollback-tool-cassandra/src/main/scala/lerna/akka/entityreplication/rollback/cassandra/CassandraSnapshotSettings.scala:
--------------------------------------------------------------------------------
1 | package lerna.akka.entityreplication.rollback.cassandra
2 |
3 | import com.typesafe.config.Config
4 |
5 | private object CassandraSnapshotSettings {
6 |
7 | /** Creates a [[CassandraSnapshotSettings]] from the given config
8 | *
9 | * The given config should have the same structure as the one of Akka Persistence Cassandra plugin
10 | * (`akka.persistence.cassandra`).
11 | */
12 | def apply(pluginConfig: Config): CassandraSnapshotSettings = {
13 | val readProfile: String =
14 | pluginConfig.getString("snapshot.read-profile")
15 | val writeProfile: String =
16 | pluginConfig.getString("snapshot.write-profile")
17 | val keyspace: String =
18 | pluginConfig.getString("snapshot.keyspace")
19 | val table: String =
20 | pluginConfig.getString("snapshot.table")
21 | new CassandraSnapshotSettings(
22 | readProfile,
23 | writeProfile,
24 | keyspace,
25 | table,
26 | )
27 | }
28 |
29 | }
30 |
31 | private final class CassandraSnapshotSettings private (
32 | val readProfile: String,
33 | val writeProfile: String,
34 | val keyspace: String,
35 | val table: String,
36 | ) {
37 |
38 | /** The table name qualified with the keyspace name */
39 | def tableName: String = s"${keyspace}.${table}"
40 |
41 | }
42 |
--------------------------------------------------------------------------------
/rollback-tool-cassandra/src/main/scala/lerna/akka/entityreplication/rollback/cassandra/PartitionNr.scala:
--------------------------------------------------------------------------------
1 | package lerna.akka.entityreplication.rollback.cassandra
2 |
3 | import lerna.akka.entityreplication.rollback.SequenceNr
4 |
5 | private object PartitionNr {
6 |
7 | /** Returns a PartitionNr that the given sequence number belongs to
8 | *
9 | * @throws java.lang.IllegalArgumentException if the given partition size is less than 1
10 | */
11 | def fromSequenceNr(sequenceNr: SequenceNr, partitionSize: Long): PartitionNr = {
12 | require(partitionSize > 0, s"partitionSize [$partitionSize] should be greater than 0")
13 | PartitionNr((sequenceNr.value - 1L) / partitionSize)
14 | }
15 |
16 | }
17 |
18 | /** The partition number in Akka Persistence Cassandra */
19 | private final case class PartitionNr(value: Long) extends Ordered[PartitionNr] {
20 | require(value >= 0, s"value [$value] should be greater than or equal to 0")
21 |
22 | /** Returns a PartitionNr whose value is incremented by the given delta
23 | *
24 | * @throws java.lang.IllegalArgumentException if the new value will be negative
25 | */
26 | def +(delta: Long): PartitionNr = PartitionNr(value + delta)
27 |
28 | /** @inheritdoc */
29 | override def compare(that: PartitionNr): Int =
30 | this.value.compare(that.value)
31 |
32 | }
33 |
--------------------------------------------------------------------------------
/rollback-tool-cassandra/src/main/scala/lerna/akka/entityreplication/rollback/setup/CommitLogStoreActorId.scala:
--------------------------------------------------------------------------------
1 | package lerna.akka.entityreplication.rollback.setup
2 |
3 | import lerna.akka.entityreplication.model.{ NormalizedShardId, TypeName }
4 | import lerna.akka.entityreplication.raft.eventsourced.CommitLogStoreActor
5 |
6 | private[rollback] final case class CommitLogStoreActorId(
7 | typeName: TypeName,
8 | shardId: NormalizedShardId,
9 | ) {
10 |
11 | lazy val persistenceId: String =
12 | CommitLogStoreActor.persistenceId(typeName, shardId.raw)
13 |
14 | }
15 |
--------------------------------------------------------------------------------
/rollback-tool-cassandra/src/main/scala/lerna/akka/entityreplication/rollback/setup/CommitLogStoreActorRollbackSetup.scala:
--------------------------------------------------------------------------------
1 | package lerna.akka.entityreplication.rollback.setup
2 |
3 | import lerna.akka.entityreplication.rollback.SequenceNr
4 |
5 | private[rollback] final case class CommitLogStoreActorRollbackSetup(
6 | id: CommitLogStoreActorId,
7 | to: Option[SequenceNr],
8 | )
9 |
--------------------------------------------------------------------------------
/rollback-tool-cassandra/src/main/scala/lerna/akka/entityreplication/rollback/setup/RaftActorId.scala:
--------------------------------------------------------------------------------
1 | package lerna.akka.entityreplication.rollback.setup
2 |
3 | import lerna.akka.entityreplication.model.{ NormalizedShardId, TypeName }
4 | import lerna.akka.entityreplication.raft.routing.MemberIndex
5 | import lerna.akka.entityreplication.util.ActorIds
6 |
7 | private[rollback] final case class RaftActorId(
8 | typeName: TypeName,
9 | shardId: NormalizedShardId,
10 | memberIndex: MemberIndex,
11 | ) {
12 |
13 | lazy val persistenceId: String =
14 | ActorIds.persistenceId("raft", typeName.underlying, shardId.underlying, memberIndex.role)
15 |
16 | }
17 |
--------------------------------------------------------------------------------
/rollback-tool-cassandra/src/main/scala/lerna/akka/entityreplication/rollback/setup/RaftActorRollbackSetup.scala:
--------------------------------------------------------------------------------
1 | package lerna.akka.entityreplication.rollback.setup
2 |
3 | import lerna.akka.entityreplication.rollback.SequenceNr
4 |
5 | private[rollback] final case class RaftActorRollbackSetup(
6 | id: RaftActorId,
7 | to: Option[SequenceNr],
8 | )
9 |
--------------------------------------------------------------------------------
/rollback-tool-cassandra/src/main/scala/lerna/akka/entityreplication/rollback/setup/RaftShardRollbackSetup.scala:
--------------------------------------------------------------------------------
1 | package lerna.akka.entityreplication.rollback.setup
2 |
3 | private[rollback] object RaftShardRollbackSetup {
4 |
5 | def apply(
6 | raftActorRollbackSetups: Seq[RaftActorRollbackSetup],
7 | snapshotStoreRollbackSetups: Seq[SnapshotStoreRollbackSetup],
8 | snapshotSyncManagerRollbackSetups: Seq[SnapshotSyncManagerRollbackSetup],
9 | commitLogStoreActorRollbackSetup: CommitLogStoreActorRollbackSetup,
10 | ): RaftShardRollbackSetup = {
11 | val raftPersistenceRollbackSetups: Seq[RollbackSetup] = {
12 | raftActorRollbackSetups.map(setup => RollbackSetup(setup.id.persistenceId, setup.to.fold(0L)(_.value))) ++
13 | snapshotStoreRollbackSetups.map(setup => RollbackSetup(setup.id.persistenceId, setup.to.fold(0L)(_.value))) ++
14 | snapshotSyncManagerRollbackSetups.map(setup => RollbackSetup(setup.id.persistenceId, setup.to.fold(0L)(_.value)))
15 | }
16 | val raftEventSourcedPersistenceRollbackSetups: Seq[RollbackSetup] = Seq(
17 | RollbackSetup(
18 | commitLogStoreActorRollbackSetup.id.persistenceId,
19 | commitLogStoreActorRollbackSetup.to.fold(0L)(_.value),
20 | ),
21 | )
22 | new RaftShardRollbackSetup(raftPersistenceRollbackSetups, raftEventSourcedPersistenceRollbackSetups)
23 | }
24 |
25 | }
26 |
27 | /** Rollback setup for one Raft shard */
28 | final class RaftShardRollbackSetup private[rollback] (
29 | val raftPersistenceRollbackSetups: Seq[RollbackSetup],
30 | val raftEventSourcedPersistenceRollbackSetups: Seq[RollbackSetup],
31 | )
32 |
--------------------------------------------------------------------------------
/rollback-tool-cassandra/src/main/scala/lerna/akka/entityreplication/rollback/setup/RollbackSetup.scala:
--------------------------------------------------------------------------------
1 | package lerna.akka.entityreplication.rollback.setup
2 |
3 | /** Rollback setup for the persistent actor
4 | *
5 | * The persistent actor (id = `persistenceId`) will be rolled back to the sequence number (`toSequenceNr`).
6 | * If `toSequenceNr` is equal to `0`, all data for the persistent actor will be deleted.
7 | */
8 | final case class RollbackSetup(persistenceId: String, toSequenceNr: Long) {
9 | require(toSequenceNr >= 0, s"toSequenceNr [$toSequenceNr] should be greater than or equal to 0")
10 | }
11 |
--------------------------------------------------------------------------------
/rollback-tool-cassandra/src/main/scala/lerna/akka/entityreplication/rollback/setup/SnapshotStoreId.scala:
--------------------------------------------------------------------------------
1 | package lerna.akka.entityreplication.rollback.setup
2 |
3 | import lerna.akka.entityreplication.model.{ NormalizedEntityId, TypeName }
4 | import lerna.akka.entityreplication.raft.routing.MemberIndex
5 | import lerna.akka.entityreplication.raft.snapshot.SnapshotStore
6 |
7 | private[rollback] final case class SnapshotStoreId(
8 | typeName: TypeName,
9 | memberIndex: MemberIndex,
10 | entityId: NormalizedEntityId,
11 | ) {
12 |
13 | lazy val persistenceId: String =
14 | SnapshotStore.persistenceId(typeName, entityId, memberIndex)
15 |
16 | }
17 |
--------------------------------------------------------------------------------
/rollback-tool-cassandra/src/main/scala/lerna/akka/entityreplication/rollback/setup/SnapshotStoreRollbackSetup.scala:
--------------------------------------------------------------------------------
1 | package lerna.akka.entityreplication.rollback.setup
2 |
3 | import lerna.akka.entityreplication.rollback.SequenceNr
4 |
5 | private[rollback] final case class SnapshotStoreRollbackSetup(
6 | id: SnapshotStoreId,
7 | to: Option[SequenceNr],
8 | )
9 |
--------------------------------------------------------------------------------
/rollback-tool-cassandra/src/main/scala/lerna/akka/entityreplication/rollback/setup/SnapshotSyncManagerId.scala:
--------------------------------------------------------------------------------
1 | package lerna.akka.entityreplication.rollback.setup
2 |
3 | import lerna.akka.entityreplication.model.{ NormalizedShardId, TypeName }
4 | import lerna.akka.entityreplication.raft.routing.MemberIndex
5 | import lerna.akka.entityreplication.raft.snapshot.sync.SnapshotSyncManager
6 |
7 | private[rollback] final case class SnapshotSyncManagerId(
8 | typeName: TypeName,
9 | shardId: NormalizedShardId,
10 | sourceMemberIndex: MemberIndex,
11 | destinationMemberIndex: MemberIndex,
12 | ) {
13 |
14 | lazy val persistenceId: String =
15 | SnapshotSyncManager.persistenceId(typeName, sourceMemberIndex, destinationMemberIndex, shardId)
16 |
17 | }
18 |
--------------------------------------------------------------------------------
/rollback-tool-cassandra/src/main/scala/lerna/akka/entityreplication/rollback/setup/SnapshotSyncManagerRollbackSetup.scala:
--------------------------------------------------------------------------------
1 | package lerna.akka.entityreplication.rollback.setup
2 |
3 | import lerna.akka.entityreplication.rollback.SequenceNr
4 |
5 | private[rollback] final case class SnapshotSyncManagerRollbackSetup(
6 | id: SnapshotSyncManagerId,
7 | to: Option[SequenceNr],
8 | )
9 |
--------------------------------------------------------------------------------
/rollback-tool-cassandra/src/multi-jvm/resources/application.conf:
--------------------------------------------------------------------------------
1 | akka {
2 | loggers = ["akka.event.slf4j.Slf4jLogger"]
3 | loglevel = "DEBUG"
4 | logging-filter = "akka.event.slf4j.Slf4jLoggingFilter"
5 | extensions = ["lerna.akka.entityreplication.ClusterReplication"]
6 | actor {
7 | provider = cluster
8 | serialize-messages = off
9 | }
10 | }
11 | akka.remote.artery.canonical.port = 0
12 | akka.cluster {
13 | jmx.enabled = off
14 | }
15 |
16 | lerna.akka.entityreplication.raft {
17 | heartbeat-interval = 300ms
18 | election-timeout = 2000ms
19 | number-of-shards = 5
20 | raft-actor-auto-start {
21 | frequency = 200ms
22 | number-of-actors = 2
23 | retry-interval = 500ms
24 | }
25 | persistence {
26 | journal.plugin = "akka.persistence.cassandra.journal"
27 | snapshot-store.plugin = "akka.persistence.cassandra.snapshot"
28 | query.plugin = "akka.persistence.cassandra.query"
29 | }
30 | eventsourced {
31 | committed-log-entries-check-interval = 300ms
32 | persistence {
33 | journal.plugin = "akka.persistence.cassandra.journal"
34 | snapshot-store.plugin = "akka.persistence.cassandra.snapshot"
35 | }
36 | }
37 | }
38 |
39 | // Needed for initializing the default journal plugin with event adapters by PersistenceInitializationAwaiter
40 | akka.persistence.cassandra.journal = ${akka.persistence.cassandra.journal} ${lerna.akka.entityreplication.raft.persistence.journal-plugin-additional}
41 |
--------------------------------------------------------------------------------
/rollback-tool-cassandra/src/multi-jvm/resources/logback.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | %level\t%logger\t%X{akkaSource}\t%msg%n
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
--------------------------------------------------------------------------------
/rollback-tool-cassandra/src/multi-jvm/scala/lerna/akka/entityreplication/rollback/cassandra/STMultiNodeSpec.scala:
--------------------------------------------------------------------------------
1 | package lerna.akka.entityreplication.rollback.cassandra
2 |
3 | import akka.Done
4 | import akka.cluster.Cluster
5 | import akka.cluster.ClusterEvent.{ CurrentClusterState, MemberRemoved, MemberUp }
6 | import akka.remote.testconductor.RoleName
7 | import akka.remote.testkit.{ MultiNodeSpec, MultiNodeSpecCallbacks }
8 | import akka.testkit.TestProbe
9 | import org.scalatest.{ BeforeAndAfterAll, Matchers, WordSpecLike }
10 |
11 | import scala.concurrent.duration.DurationInt
12 |
13 | /** ScalaTest with MultiNodeSpec */
14 | trait STMultiNodeSpec extends MultiNodeSpecCallbacks with WordSpecLike with Matchers with BeforeAndAfterAll {
15 | this: MultiNodeSpec =>
16 |
17 | val cluster: Cluster = Cluster(system)
18 |
19 | override def beforeAll(): Unit = {
20 | super.beforeAll()
21 | multiNodeSpecBeforeAll()
22 | }
23 |
24 | override def afterAll(): Unit = {
25 | try multiNodeSpecAfterAll()
26 | finally super.afterAll()
27 | }
28 |
29 | /** Creates a test probe to subscribe cluster events */
30 | def clusterEventProbe(): TestProbe = {
31 | val probe = TestProbe()
32 | cluster.subscribe(probe.ref, classOf[MemberUp], classOf[MemberRemoved])
33 | probe
34 | }
35 |
36 | /** Forms a new cluster consisting of the given nodes. */
37 | def newCluster(seedNode: RoleName, nodes: RoleName*): Unit = {
38 | val allNodes = seedNode +: nodes
39 | runOn(allNodes: _*) {
40 | val probe = clusterEventProbe()
41 | cluster.join(node(seedNode).address)
42 | probe.fishForSpecificMessage(max = 30.seconds) {
43 | case currentState: CurrentClusterState if currentState.members.map(_.address) == myAddress => Done
44 | case memberUp: MemberUp if memberUp.member.address == myAddress => Done
45 | }
46 | }
47 | }
48 |
49 | }
50 |
--------------------------------------------------------------------------------
/rollback-tool-cassandra/src/test/resources/application.conf:
--------------------------------------------------------------------------------
1 | akka {
2 | loggers = ["akka.event.slf4j.Slf4jLogger"]
3 | loglevel = "DEBUG"
4 | logging-filter = "akka.event.slf4j.Slf4jLoggingFilter"
5 | }
6 | akka.actor.serialize-messages = on
7 | akka.actor.serialization-bindings {
8 | "lerna.akka.entityreplication.rollback.JsonSerializable" = jackson-json
9 | }
10 |
11 | akka.test.single-expect-default = 15s
12 | akka.test.filter-leeway = 15s
13 | akka.test.default-timeout = 15s
--------------------------------------------------------------------------------
/rollback-tool-cassandra/src/test/resources/logback.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | %level\t%logger\t%X{akkaSource}\t%msg%n
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
--------------------------------------------------------------------------------
/rollback-tool-cassandra/src/test/scala/lerna/akka/entityreplication/rollback/JsonSerializable.scala:
--------------------------------------------------------------------------------
1 | package lerna.akka.entityreplication.rollback
2 |
3 | /** Marker trait, indicates an instance can be serialized as a JSON format */
4 | trait JsonSerializable
5 |
--------------------------------------------------------------------------------
/rollback-tool-cassandra/src/test/scala/lerna/akka/entityreplication/rollback/RaftShardRollbackParametersSpec.scala:
--------------------------------------------------------------------------------
1 | package lerna.akka.entityreplication.rollback
2 |
3 | import lerna.akka.entityreplication.model.{ NormalizedShardId, TypeName }
4 | import lerna.akka.entityreplication.raft.routing.MemberIndex
5 | import org.scalatest.{ Matchers, WordSpec }
6 |
7 | import java.time.Instant
8 |
9 | final class RaftShardRollbackParametersSpec extends WordSpec with Matchers {
10 |
11 | "RaftShardRollbackParameters" should {
12 |
13 | "throw an IllegalArgumentException if the given allMemberIndices doesn't contain the leader member index" in {
14 | val exception = intercept[IllegalArgumentException] {
15 | RaftShardRollbackParameters(
16 | TypeName.from("example"),
17 | NormalizedShardId.from("1"),
18 | Set(
19 | MemberIndex("replica-group-1"),
20 | MemberIndex("replica-group-2"),
21 | MemberIndex("replica-group-3"),
22 | ),
23 | MemberIndex("replica-group-4"),
24 | Instant.now(),
25 | )
26 | }
27 | exception.getMessage should be(
28 | "requirement failed: allMemberIndices [Set(replica-group-1, replica-group-2, replica-group-3)] " +
29 | "should contain the leader member index [replica-group-4]",
30 | )
31 | }
32 |
33 | }
34 |
35 | }
36 |
--------------------------------------------------------------------------------
/rollback-tool-cassandra/src/test/scala/lerna/akka/entityreplication/rollback/cassandra/CassandraEventsByTagSettingsSpec.scala:
--------------------------------------------------------------------------------
1 | package lerna.akka.entityreplication.rollback.cassandra
2 |
3 | import com.typesafe.config.{ Config, ConfigFactory }
4 | import org.scalatest.{ Matchers, WordSpec }
5 |
6 | final class CassandraEventsByTagSettingsSpec extends WordSpec with Matchers {
7 |
8 | private val defaultPluginConfig: Config =
9 | ConfigFactory.load().getConfig("akka.persistence.cassandra")
10 |
11 | private val customPluginConfig: Config = ConfigFactory.parseString("""
12 | |journal {
13 | | keyspace = "custom_akka"
14 | |}
15 | |events-by-tag {
16 | | table = "custom_tag_views"
17 | |}
18 | |""".stripMargin)
19 |
20 | "CassandraEventsByTagSettings" should {
21 |
22 | "load the default config" in {
23 | val settings = CassandraEventsByTagSettings(defaultPluginConfig)
24 | settings.keyspace should be("akka")
25 | settings.table should be("tag_views")
26 | }
27 |
28 | "load the given custom config" in {
29 | val settings = CassandraEventsByTagSettings(customPluginConfig)
30 | settings.keyspace should be("custom_akka")
31 | settings.table should be("custom_tag_views")
32 | }
33 |
34 | }
35 |
36 | "CassandraEventsByTagSettings.tagViewsTableName" should {
37 |
38 | "return the tag_views table name qualified with the keyspace name" in {
39 | val settings = CassandraEventsByTagSettings(customPluginConfig)
40 | settings.keyspace should be("custom_akka")
41 | settings.table should be("custom_tag_views")
42 | settings.tagViewsTableName should be("custom_akka.custom_tag_views")
43 | }
44 |
45 | }
46 |
47 | "CassandraEventsByTagSettings.tagWriteProgressTableName" should {
48 |
49 | "return the tag_write_progress table name qualified with the keyspace name" in {
50 | val settings = CassandraEventsByTagSettings(customPluginConfig)
51 | settings.keyspace should be("custom_akka")
52 | settings.tagWriteProgressTableName should be("custom_akka.tag_write_progress")
53 | }
54 |
55 | }
56 |
57 | "CassandraEventsByTagSettings.tagScanningTableName" should {
58 |
59 | "return the tag_scanning table name qualified with the keyspace name" in {
60 | val settings = CassandraEventsByTagSettings(customPluginConfig)
61 | settings.keyspace should be("custom_akka")
62 | settings.tagScanningTableName should be("custom_akka.tag_scanning")
63 | }
64 |
65 | }
66 |
67 | }
68 |
--------------------------------------------------------------------------------
/rollback-tool-cassandra/src/test/scala/lerna/akka/entityreplication/rollback/cassandra/CassandraPersistenceQueriesSettingsSpec.scala:
--------------------------------------------------------------------------------
1 | package lerna.akka.entityreplication.rollback.cassandra
2 |
3 | import akka.actor.ActorSystem
4 | import akka.testkit.TestKitBase
5 | import com.typesafe.config.{ Config, ConfigFactory }
6 | import org.scalatest.{ BeforeAndAfterAll, Matchers, WordSpecLike }
7 |
8 | final class CassandraPersistenceQueriesSettingsSpec
9 | extends TestKitBase
10 | with WordSpecLike
11 | with BeforeAndAfterAll
12 | with Matchers {
13 |
14 | private val config: Config = ConfigFactory
15 | .parseString(s"""
16 | |custom.akka.persistence.cassandra = $${akka.persistence.cassandra} {
17 | | read-profile = "custom_akka-persistence-cassandra-read-profile"
18 | | write-profile = "custom_akka-persistence-cassandra-write-profile"
19 | | query {
20 | | read-profile = "custom_akka-persistence-cassandra-query-profile"
21 | | }
22 | | snapshot {
23 | | read-profile = "custom_akka-persistence-cassandra-snapshot-read-profile"
24 | | write-profile = "custom_akka-persistence-cassandra-snapshot-write-profile"
25 | | }
26 | |}
27 | |""".stripMargin)
28 | .withFallback(ConfigFactory.load())
29 | .resolve()
30 |
31 | override implicit val system: ActorSystem =
32 | ActorSystem(getClass.getSimpleName, config)
33 |
34 | override def afterAll(): Unit = {
35 | shutdown(system)
36 | super.afterAll()
37 | }
38 |
39 | "CassandraPersistenceQueriesSettings.resolveJournalSettings" should {
40 |
41 | "resolve the plugin location and then return the journal plugin settings" in {
42 | val settings = new CassandraPersistenceQueriesSettings("custom.akka.persistence.cassandra")
43 | val journalSettings = settings.resolveJournalSettings(system)
44 | journalSettings.readProfile should be("custom_akka-persistence-cassandra-read-profile")
45 | journalSettings.writeProfile should be("custom_akka-persistence-cassandra-write-profile")
46 | }
47 |
48 | }
49 |
50 | "CassandraPersistenceQueriesSettings.resolveQuerySettings" should {
51 |
52 | "resolve the plugin location and then return the query plugin settings" in {
53 | val settings = new CassandraPersistenceQueriesSettings("custom.akka.persistence.cassandra")
54 | val querySettings = settings.resolveQuerySettings(system)
55 | querySettings.readProfile should be("custom_akka-persistence-cassandra-query-profile")
56 | }
57 |
58 | }
59 |
60 | "CassandraPersistenceQueriesSettings.resolveSnapshotSettings" should {
61 |
62 | "resolve the plugin location and then return the snapshot plugin settings" in {
63 | val settings = new CassandraPersistenceQueriesSettings("custom.akka.persistence.cassandra")
64 | val snapshotSettings = settings.resolveSnapshotSettings(system)
65 | snapshotSettings.readProfile should be("custom_akka-persistence-cassandra-snapshot-read-profile")
66 | snapshotSettings.writeProfile should be("custom_akka-persistence-cassandra-snapshot-write-profile")
67 | }
68 |
69 | }
70 |
71 | }
72 |
--------------------------------------------------------------------------------
/rollback-tool-cassandra/src/test/scala/lerna/akka/entityreplication/rollback/cassandra/CassandraSnapshotSettingsSpec.scala:
--------------------------------------------------------------------------------
1 | package lerna.akka.entityreplication.rollback.cassandra
2 |
3 | import com.typesafe.config.{ Config, ConfigFactory }
4 | import org.scalatest.{ Matchers, WordSpec }
5 |
6 | final class CassandraSnapshotSettingsSpec extends WordSpec with Matchers {
7 |
8 | private val defaultPluginConfig: Config =
9 | ConfigFactory.load().getConfig("akka.persistence.cassandra")
10 |
11 | private val customPluginConfig: Config = ConfigFactory.parseString("""
12 | |snapshot {
13 | | read-profile = "custom_akka-persistence-cassandra-snapshot-read-profile"
14 | | write-profile = "custom_akka-persistence-cassandra-snapshot-write-profile"
15 | | keyspace = "custom_akka_snapshot"
16 | | table = "custom_snapshots"
17 | |}
18 | |""".stripMargin)
19 |
20 | "CassandraSnapshotSettings" should {
21 |
22 | "load the default config" in {
23 | val settings = CassandraSnapshotSettings(defaultPluginConfig)
24 | settings.readProfile should be("akka-persistence-cassandra-snapshot-profile")
25 | settings.writeProfile should be("akka-persistence-cassandra-snapshot-profile")
26 | settings.keyspace should be("akka_snapshot")
27 | settings.table should be("snapshots")
28 | }
29 |
30 | "load the given custom config" in {
31 | val settings = CassandraSnapshotSettings(customPluginConfig)
32 | settings.readProfile should be("custom_akka-persistence-cassandra-snapshot-read-profile")
33 | settings.writeProfile should be("custom_akka-persistence-cassandra-snapshot-write-profile")
34 | settings.keyspace should be("custom_akka_snapshot")
35 | settings.table should be("custom_snapshots")
36 | }
37 |
38 | }
39 |
40 | "CassandraSnapshotSettings.tableName" should {
41 |
42 | "return the table name qualified with the keyspace name" in {
43 | val settings = CassandraSnapshotSettings(customPluginConfig)
44 | settings.keyspace should be("custom_akka_snapshot")
45 | settings.table should be("custom_snapshots")
46 | settings.tableName should be("custom_akka_snapshot.custom_snapshots")
47 | }
48 |
49 | }
50 |
51 | }
52 |
--------------------------------------------------------------------------------
/rollback-tool-cassandra/src/test/scala/lerna/akka/entityreplication/rollback/cassandra/CassandraSpecBase.scala:
--------------------------------------------------------------------------------
1 | package lerna.akka.entityreplication.rollback.cassandra
2 |
3 | import akka.actor.ActorSystem
4 | import akka.persistence.cassandra.testkit.CassandraLauncher
5 | import akka.testkit.TestKitBase
6 | import com.typesafe.config.{ Config, ConfigFactory }
7 | import lerna.akka.entityreplication.rollback.cassandra.testkit.PersistenceCassandraConfigProvider
8 | import lerna.akka.entityreplication.rollback.testkit.{
9 | PatienceConfigurationForTestKitBase,
10 | PersistenceInitializationAwaiter,
11 | }
12 | import org.scalatest.concurrent.{ Eventually, ScalaFutures }
13 | import org.scalatest.{ BeforeAndAfterAll, Matchers, WordSpecLike }
14 |
15 | import java.util.concurrent.atomic.AtomicLong
16 |
17 | abstract class CassandraSpecBase(
18 | name: String,
19 | overrideConfig: Config = ConfigFactory.empty,
20 | ) extends TestKitBase
21 | with WordSpecLike
22 | with Matchers
23 | with ScalaFutures
24 | with Eventually
25 | with BeforeAndAfterAll
26 | with PatienceConfigurationForTestKitBase
27 | with PersistenceCassandraConfigProvider {
28 |
29 | /** Keyspace the journal plugin use */
30 | protected def journalKeyspace: String = name
31 |
32 | /** Keyspace the snapshot plugin use */
33 | protected def snapshotKeyspace: String = name
34 |
35 | override implicit lazy val system: ActorSystem = {
36 | val config =
37 | overrideConfig
38 | .withFallback(persistenceCassandraConfig(journalKeyspace, snapshotKeyspace, autoCreate = true))
39 | .withFallback(ConfigFactory.load())
40 | .resolve()
41 | ActorSystem(name, config)
42 | }
43 |
44 | override def beforeAll(): Unit = {
45 | super.beforeAll()
46 | // NOTE: Cassandra is running until all tests are done.
47 | CassandraLauncher.main(Array(s"${cassandraPort}", "true"))
48 | PersistenceInitializationAwaiter(system).awaitInit()
49 | }
50 |
51 | override def afterAll(): Unit = {
52 | shutdown(system)
53 | super.afterAll()
54 | }
55 |
56 | private val currentPersistenceId: AtomicLong = new AtomicLong(0)
57 |
58 | /** Returns the next (unique) persistence ID */
59 | protected def nextPersistenceId(): String = {
60 | s"$name-${currentPersistenceId.incrementAndGet()}"
61 | }
62 |
63 | private val currentTagId: AtomicLong = new AtomicLong(0)
64 |
65 | /** Returns the next unique tag name */
66 | protected def nextUniqueTag(): String = {
67 | s"$name-tag-${currentTagId.incrementAndGet()}"
68 | }
69 |
70 | }
71 |
--------------------------------------------------------------------------------
/rollback-tool-cassandra/src/test/scala/lerna/akka/entityreplication/rollback/cassandra/testkit/FirstTimeBucket.scala:
--------------------------------------------------------------------------------
1 | package lerna.akka.entityreplication.rollback.cassandra.testkit
2 |
3 | import java.time.{ LocalDateTime, ZoneOffset, ZonedDateTime }
4 | import java.time.format.DateTimeFormatter
5 |
6 | object FirstTimeBucket {
7 |
8 | // see https://docs.oracle.com/javase/jp/8/docs/api/java/time/format/DateTimeFormatter.html
9 | private val formatter: DateTimeFormatter =
10 | DateTimeFormatter.ofPattern("yyyyMMdd'T'HH:mm")
11 |
12 | /** Returns a formatted string for first-time-bucket of Akka Persistence Cassandra
13 | *
14 | * The given time is converted to UTC timezone and then will be formatted.
15 | *
16 | * @see https://doc.akka.io/docs/akka-persistence-cassandra/1.0.5/events-by-tag.html#first-time-bucket
17 | */
18 | def format(time: ZonedDateTime): String = {
19 | val utcTime = LocalDateTime.ofInstant(time.toInstant, ZoneOffset.UTC)
20 | utcTime.format(formatter)
21 | }
22 |
23 | }
24 |
--------------------------------------------------------------------------------
/rollback-tool-cassandra/src/test/scala/lerna/akka/entityreplication/rollback/cassandra/testkit/FirstTimeBucketSpec.scala:
--------------------------------------------------------------------------------
1 | package lerna.akka.entityreplication.rollback.cassandra.testkit
2 |
3 | import org.scalatest.{ Matchers, WordSpec }
4 |
5 | import java.time.{ ZoneOffset, ZonedDateTime }
6 |
7 | final class FirstTimeBucketSpec extends WordSpec with Matchers {
8 |
9 | "FirstTimeBucket" should {
10 |
11 | "return a formatted string for first-time-bucket of Akka Persistence Cassandra" in {
12 |
13 | val dateTime1 = ZonedDateTime.of(2022, 11, 3, 14, 58, 0, 0, ZoneOffset.ofHours(2))
14 | FirstTimeBucket.format(dateTime1) should be("20221103T12:58")
15 |
16 | val dateTime2 = ZonedDateTime.of(2021, 4, 1, 14, 7, 0, 0, ZoneOffset.ofHours(5))
17 | FirstTimeBucket.format(dateTime2) should be("20210401T09:07")
18 |
19 | }
20 |
21 | }
22 |
23 | }
24 |
--------------------------------------------------------------------------------
/rollback-tool-cassandra/src/test/scala/lerna/akka/entityreplication/rollback/cassandra/testkit/PersistenceCassandraConfigProvider.scala:
--------------------------------------------------------------------------------
1 | package lerna.akka.entityreplication.rollback.cassandra.testkit
2 |
3 | import com.typesafe.config.{ Config, ConfigFactory }
4 |
5 | import java.time.ZonedDateTime
6 |
7 | trait PersistenceCassandraConfigProvider {
8 |
9 | private def firstTimeBucket: String = {
10 | val time = ZonedDateTime.now().minusMinutes(1)
11 | FirstTimeBucket.format(time)
12 | }
13 |
14 | def cassandraPort: Int = 9042
15 |
16 | def persistenceCassandraConfig(
17 | journalKeyspace: String,
18 | snapshotKeyspace: String,
19 | autoCreate: Boolean = false,
20 | ): Config = {
21 | require(
22 | journalKeyspace.length <= 48,
23 | s"journalKeyspace.length [${journalKeyspace.length}] should be less than or equal to 48. See https://docs.datastax.com/en/cql-oss/3.3/cql/cql_reference/refLimits.html.",
24 | )
25 | require(
26 | snapshotKeyspace.length <= 48,
27 | s"snapshotKeyspace.length [${snapshotKeyspace.length}] should be less than or equal to 48. See https://docs.datastax.com/en/cql-oss/3.3/cql/cql_reference/refLimits.html.",
28 | )
29 | ConfigFactory.parseString(s"""
30 | |akka.persistence.journal.plugin = akka.persistence.cassandra.journal
31 | |akka.persistence.snapshot-store.plugin = akka.persistence.cassandra.snapshot
32 | |
33 | |akka.persistence.cassandra.journal.keyspace = "$journalKeyspace"
34 | |akka.persistence.cassandra.journal.keyspace-autocreate = $autoCreate
35 | |akka.persistence.cassandra.journal.tables-autocreate = $autoCreate
36 | |
37 | |akka.persistence.cassandra.snapshot.keyspace = "$snapshotKeyspace"
38 | |akka.persistence.cassandra.snapshot.keyspace-autocreate = $autoCreate
39 | |akka.persistence.cassandra.snapshot.tables-autocreate = $autoCreate
40 | |
41 | |akka.persistence.cassandra.events-by-tag.eventual-consistency-delay = 1000ms
42 | |akka.persistence.cassandra.events-by-tag.first-time-bucket = "$firstTimeBucket"
43 | |
44 | |datastax-java-driver {
45 | | advanced.reconnect-on-init = true
46 | | basic.contact-points = ["127.0.0.1:$cassandraPort"]
47 | | basic.load-balancing-policy.local-datacenter = "datacenter1"
48 | |}
49 | |""".stripMargin)
50 | }
51 |
52 | }
53 |
--------------------------------------------------------------------------------
/rollback-tool-cassandra/src/test/scala/lerna/akka/entityreplication/rollback/setup/CommitLogStoreActorIdSpec.scala:
--------------------------------------------------------------------------------
1 | package lerna.akka.entityreplication.rollback.setup
2 |
3 | import lerna.akka.entityreplication.model.{ NormalizedShardId, TypeName }
4 | import org.scalatest.{ Matchers, WordSpec }
5 |
6 | final class CommitLogStoreActorIdSpec extends WordSpec with Matchers {
7 |
8 | "CommitLogStoreActorId.persistenceId" should {
9 |
10 | "return the persistence ID for the CommitLogStoreActor" in {
11 | val id = CommitLogStoreActorId(TypeName.from("example"), NormalizedShardId.from("shard1"))
12 | id.persistenceId should be("CommitLogStore:example:shard1")
13 | }
14 |
15 | }
16 |
17 | }
18 |
--------------------------------------------------------------------------------
/rollback-tool-cassandra/src/test/scala/lerna/akka/entityreplication/rollback/setup/RaftActorIdSpec.scala:
--------------------------------------------------------------------------------
1 | package lerna.akka.entityreplication.rollback.setup
2 |
3 | import lerna.akka.entityreplication.model.{ NormalizedShardId, TypeName }
4 | import lerna.akka.entityreplication.raft.routing.MemberIndex
5 | import org.scalatest.{ Matchers, WordSpec }
6 |
7 | final class RaftActorIdSpec extends WordSpec with Matchers {
8 |
9 | "RaftActorId.persistenceId" should {
10 |
11 | "return the persistence ID for the RaftActor" in {
12 | val id = RaftActorId(TypeName.from("example"), NormalizedShardId.from("shard1"), MemberIndex("member1"))
13 | id.persistenceId should be("raft:example:shard1:member1")
14 | }
15 |
16 | }
17 |
18 | }
19 |
--------------------------------------------------------------------------------
/rollback-tool-cassandra/src/test/scala/lerna/akka/entityreplication/rollback/setup/RollbackSetupSpec.scala:
--------------------------------------------------------------------------------
1 | package lerna.akka.entityreplication.rollback.setup
2 |
3 | import org.scalatest.{ Matchers, WordSpec }
4 |
5 | final class RollbackSetupSpec extends WordSpec with Matchers {
6 |
7 | "RollbackSetup" should {
8 |
9 | "not throw an IllegalArgumentException if the given toSequenceNr is 0" in {
10 | RollbackSetup("persistence-id-1", 0)
11 | }
12 |
13 | "throw an IllegalArgumentException if the given toSequenceNr is negative" in {
14 | val exception = intercept[IllegalArgumentException] {
15 | RollbackSetup("persistence-id-1", -1)
16 | }
17 | exception.getMessage should be("requirement failed: toSequenceNr [-1] should be greater than or equal to 0")
18 | }
19 |
20 | }
21 |
22 | }
23 |
--------------------------------------------------------------------------------
/rollback-tool-cassandra/src/test/scala/lerna/akka/entityreplication/rollback/setup/SnapshotStoreIdSpec.scala:
--------------------------------------------------------------------------------
1 | package lerna.akka.entityreplication.rollback.setup
2 |
3 | import lerna.akka.entityreplication.model.{ NormalizedEntityId, TypeName }
4 | import lerna.akka.entityreplication.raft.routing.MemberIndex
5 | import org.scalatest.{ Matchers, WordSpec }
6 |
7 | final class SnapshotStoreIdSpec extends WordSpec with Matchers {
8 |
9 | "SnapshotStoreId.persistenceId" should {
10 |
11 | "return the persistence ID for the SnapshotStore" in {
12 | val id = SnapshotStoreId(TypeName.from("example"), MemberIndex("member1"), NormalizedEntityId("entity1"))
13 | id.persistenceId should be("SnapshotStore:example:entity1:member1")
14 | }
15 |
16 | }
17 |
18 | }
19 |
--------------------------------------------------------------------------------
/rollback-tool-cassandra/src/test/scala/lerna/akka/entityreplication/rollback/setup/SnapshotSyncManagerIdSpec.scala:
--------------------------------------------------------------------------------
1 | package lerna.akka.entityreplication.rollback.setup
2 |
3 | import lerna.akka.entityreplication.model.{ NormalizedShardId, TypeName }
4 | import lerna.akka.entityreplication.raft.routing.MemberIndex
5 | import org.scalatest.{ Matchers, WordSpec }
6 |
7 | final class SnapshotSyncManagerIdSpec extends WordSpec with Matchers {
8 |
9 | "SnapshotSyncManagerId.persistenceId" should {
10 |
11 | "return the persistence ID for the SnapshotSyncManager" in {
12 | val id = SnapshotSyncManagerId(
13 | TypeName.from("example"),
14 | NormalizedShardId.from("shard1"),
15 | sourceMemberIndex = MemberIndex("member1"),
16 | destinationMemberIndex = MemberIndex("member2"),
17 | )
18 | id.persistenceId should be("SnapshotSyncManager:example:member1:member2:shard1")
19 | }
20 |
21 | }
22 |
23 | }
24 |
--------------------------------------------------------------------------------
/rollback-tool-cassandra/src/test/scala/lerna/akka/entityreplication/rollback/testkit/ConstantPersistenceQueries.scala:
--------------------------------------------------------------------------------
1 | package lerna.akka.entityreplication.rollback.testkit
2 |
3 | import akka.NotUsed
4 | import akka.stream.scaladsl.Source
5 | import lerna.akka.entityreplication.rollback.{ PersistenceQueries, SequenceNr }
6 |
7 | import scala.concurrent.Future
8 |
9 | /** [[PersistenceQueries]] for the constant envelope source */
10 | private[rollback] final class ConstantPersistenceQueries(
11 | eventEnvelopes: IndexedSeq[PersistenceQueries.TaggedEventEnvelope],
12 | ) extends PersistenceQueries {
13 |
14 | // Requirements:
15 | require(
16 | eventEnvelopes.map(_.persistenceId).toSet.sizeIs <= 1,
17 | "All event envelopes should have the same persistence ID, but they didn't. " +
18 | s"Persistence IDs were [${eventEnvelopes.map(_.persistenceId).toSet}].",
19 | )
20 | for (i <- 1 until eventEnvelopes.size) {
21 | require(
22 | eventEnvelopes(i).sequenceNr >= eventEnvelopes(i - 1).sequenceNr,
23 | s"eventEnvelopes($i).sequenceNr [${eventEnvelopes(i).sequenceNr.value}] should be " +
24 | s"greater than or equal to eventEnvelopes(${i - 1}).sequenceNr [${eventEnvelopes(i - 1).sequenceNr.value}]",
25 | )
26 | }
27 |
28 | /** @inheritdoc */
29 | override def findHighestSequenceNrAfter(persistenceId: String, from: SequenceNr): Future[Option[SequenceNr]] = {
30 | Future.successful(
31 | eventEnvelopes
32 | .filter(_.persistenceId == persistenceId)
33 | .lastOption
34 | .map(_.sequenceNr)
35 | .filter(_ >= from),
36 | )
37 | }
38 |
39 | /** @inheritdoc */
40 | override def currentEventsAfter(
41 | persistenceId: String,
42 | from: SequenceNr,
43 | ): Source[PersistenceQueries.TaggedEventEnvelope, NotUsed] = {
44 | Source(eventEnvelopes)
45 | .filter(_.persistenceId == persistenceId)
46 | .dropWhile(_.sequenceNr < from)
47 | }
48 |
49 | /** @inheritdoc */
50 | override def currentEventsBefore(
51 | persistenceId: String,
52 | from: SequenceNr,
53 | ): Source[PersistenceQueries.TaggedEventEnvelope, NotUsed] = {
54 | Source(eventEnvelopes.reverse)
55 | .filter(_.persistenceId == persistenceId)
56 | .dropWhile(_.sequenceNr > from)
57 | }
58 |
59 | }
60 |
--------------------------------------------------------------------------------
/rollback-tool-cassandra/src/test/scala/lerna/akka/entityreplication/rollback/testkit/PatienceConfigurationForTestKitBase.scala:
--------------------------------------------------------------------------------
1 | package lerna.akka.entityreplication.rollback.testkit
2 |
3 | import akka.testkit.TestKitBase
4 | import org.scalatest.TestSuite
5 | import org.scalatest.concurrent.PatienceConfiguration
6 | import org.scalatest.time.{ Millis, Span }
7 |
8 | trait PatienceConfigurationForTestKitBase { this: TestKitBase with TestSuite with PatienceConfiguration =>
9 |
10 | lazy val customPatienceConfig: PatienceConfig = PatienceConfig(
11 | timeout = testKitSettings.DefaultTimeout.duration,
12 | interval = Span(100, Millis),
13 | )
14 | override implicit def patienceConfig: PatienceConfig = customPatienceConfig
15 |
16 | }
17 |
--------------------------------------------------------------------------------
/rollback-tool-cassandra/src/test/scala/lerna/akka/entityreplication/rollback/testkit/PersistenceInitializationAwaiter.scala:
--------------------------------------------------------------------------------
1 | package lerna.akka.entityreplication.rollback.testkit
2 |
3 | import akka.actor.{ ActorSystem, ClassicActorSystemProvider, PoisonPill }
4 | import akka.testkit.TestProbe
5 |
6 | import java.util.UUID
7 | import scala.concurrent.duration.{ DurationInt, FiniteDuration }
8 |
9 | object PersistenceInitializationAwaiter {
10 | def apply(systemProvider: ClassicActorSystemProvider): PersistenceInitializationAwaiter = {
11 | new PersistenceInitializationAwaiter(systemProvider)
12 | }
13 | }
14 |
15 | final class PersistenceInitializationAwaiter private (systemProvider: ClassicActorSystemProvider) {
16 | implicit val system: ActorSystem = systemProvider.classicSystem
17 |
18 | /** Waits for the initialization completions of the default journal plugin and snapshot plugin */
19 | def awaitInit(
20 | max: FiniteDuration = 60.seconds,
21 | interval: FiniteDuration = 3.seconds,
22 | ): Unit = {
23 | val probe = TestProbe()
24 | probe.within(max) {
25 | probe.awaitAssert(
26 | {
27 | val persistenceId = getClass.getSimpleName + UUID.randomUUID().toString
28 | val persistentActor =
29 | system.actorOf(TestPersistentActor.props(persistenceId))
30 | probe.watch(persistentActor)
31 | persistentActor ! TestPersistentActor.PersistEvent(probe.ref)
32 | persistentActor ! TestPersistentActor.SaveSnapshot(probe.ref)
33 | try {
34 | probe.expectMsgType[TestPersistentActor.Ack]
35 | probe.expectMsgType[TestPersistentActor.Ack]
36 | } finally {
37 | persistentActor ! PoisonPill
38 | probe.expectTerminated(persistentActor)
39 | }
40 | },
41 | probe.remainingOrDefault,
42 | interval,
43 | )
44 | }
45 | }
46 |
47 | }
48 |
--------------------------------------------------------------------------------
/rollback-tool-cassandra/src/test/scala/lerna/akka/entityreplication/rollback/testkit/TimeBasedUuids.scala:
--------------------------------------------------------------------------------
1 | package lerna.akka.entityreplication.rollback.testkit
2 |
3 | import akka.persistence.query.TimeBasedUUID
4 | import com.datastax.oss.driver.api.core.uuid.Uuids
5 |
6 | import java.time.Instant
7 |
8 | final class TimeBasedUuids(val baseTimestamp: Instant) {
9 |
10 | def create(deltaMillis: Long): TimeBasedUUID = {
11 | val timestamp = baseTimestamp.plusMillis(deltaMillis)
12 | val uuid = Uuids.startOf(timestamp.toEpochMilli)
13 | TimeBasedUUID(uuid)
14 | }
15 |
16 | }
17 |
--------------------------------------------------------------------------------
/rollback-tool-cassandra/src/test/scala/lerna/akka/entityreplication/rollback/testkit/TimeBasedUuidsSpec.scala:
--------------------------------------------------------------------------------
1 | package lerna.akka.entityreplication.rollback.testkit
2 |
3 | import com.datastax.oss.driver.api.core.uuid.Uuids
4 | import org.scalatest.{ Matchers, WordSpec }
5 |
6 | import java.time.{ ZoneOffset, ZonedDateTime }
7 |
8 | final class TimeBasedUuidsSpec extends WordSpec with Matchers {
9 |
10 | "TimeBasedUuids.create" should {
11 |
12 | "return a TimeBasedUUID containing the timestamp calculated from the base timestamp and the given delta" in {
13 | val baseTimestamp =
14 | ZonedDateTime.of(2021, 7, 12, 11, 7, 0, 0, ZoneOffset.UTC).toInstant
15 | val timeBasedUuids =
16 | new TimeBasedUuids(baseTimestamp)
17 |
18 | // Test:
19 | Uuids.unixTimestamp(timeBasedUuids.create(-2).value) should be(baseTimestamp.plusMillis(-2).toEpochMilli)
20 | Uuids.unixTimestamp(timeBasedUuids.create(-1).value) should be(baseTimestamp.plusMillis(-1).toEpochMilli)
21 | Uuids.unixTimestamp(timeBasedUuids.create(0).value) should be(baseTimestamp.toEpochMilli)
22 | Uuids.unixTimestamp(timeBasedUuids.create(+1).value) should be(baseTimestamp.plusMillis(1).toEpochMilli)
23 | Uuids.unixTimestamp(timeBasedUuids.create(+2).value) should be(baseTimestamp.plusMillis(2).toEpochMilli)
24 | }
25 |
26 | }
27 |
28 | }
29 |
--------------------------------------------------------------------------------
/scripts/run-multijvm-test.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/bash
2 |
3 | # Usage
4 | # ./run-multijvm-test.sh
5 | #
6 | # You can specify max attempts(retry).
7 | # ./run-multijvm-test.sh 10
8 |
9 | # Set MaxRetry
10 | MAX_RETRY=3
11 | if [ $# -ge 1 ]; then
12 | MAX_RETRY=$1
13 | fi
14 |
15 | # Run multi-jvm:test until it is success or it's attempts reach to MAX_RETRY.
16 | code=0
17 | for i in `seq 1 $MAX_RETRY`; do
18 | echo "begin ($i/$MAX_RETRY)"
19 | sbt coverage multi-jvm:test
20 | code=$?
21 | if [ $code -eq 0 ]; then
22 | break
23 | fi
24 | if [ $i -ne $MAX_RETRY ]; then
25 | echo "fail, and then try it again..."
26 | sleep 1
27 | fi
28 | done
29 |
30 | exit $code
--------------------------------------------------------------------------------
/src/site/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | Project Documentation
6 |
16 |
17 |
18 | Go to the project documentation
19 |
20 |
21 |
--------------------------------------------------------------------------------