├── core ├── .gitignore └── src │ ├── test │ ├── resources │ │ ├── log4j.properties │ │ ├── application.conf │ │ └── logback-test.xml │ └── scala │ │ ├── akka │ │ └── persistence │ │ │ └── cassandra │ │ │ ├── RequiresThree.scala │ │ │ ├── WithLogCapturing.scala │ │ │ ├── reconciler │ │ │ ├── BuildTagViewForPersistenceIdSpec.scala │ │ │ ├── RebuildAllPersisetceIdsSpec.scala │ │ │ ├── TagQuerySpec.scala │ │ │ ├── TruncateAllSpec.scala │ │ │ └── DeleteTagViewForPersistenceIdSpec.scala │ │ │ ├── query │ │ │ ├── TestEventAdapter.scala │ │ │ ├── TestActor.scala │ │ │ ├── EventsByTagPubsubSpec.scala │ │ │ ├── EventsByPersistenceIdFastForwardSpec.scala │ │ │ ├── CassandraQueryJournalOverrideSpec.scala │ │ │ ├── javadsl │ │ │ │ └── CassandraReadJournalSpec.scala │ │ │ └── scaladsl │ │ │ │ └── CassandraReadJournalSpec.scala │ │ │ ├── EventsByTagCrashSpec.scala │ │ │ ├── RetriesSpec.scala │ │ │ ├── journal │ │ │ ├── TagScanningSpec.scala │ │ │ ├── PubSubThrottlerSpec.scala │ │ │ ├── TimeBucketSpec.scala │ │ │ ├── StartupLoadSpec.scala │ │ │ ├── PersistAllSpec.scala │ │ │ ├── CassandraSerializationSpec.scala │ │ │ ├── CassandraEventUpdateSpec.scala │ │ │ └── CassandraJournalSpec.scala │ │ │ ├── TestTaggingActor.scala │ │ │ ├── PrintCreateStatements.scala │ │ │ ├── Persister.scala │ │ │ ├── EventsByTagStressSpec.scala │ │ │ ├── healthcheck │ │ │ └── CassandraHealthCheckSpec.scala │ │ │ └── sharding │ │ │ └── ClusterShardingQuickTerminationSpec.scala │ │ └── doc │ │ └── reconciler │ │ ├── AllPersistenceIdsMigrationCompileOnly.scala │ │ └── ReconciliationCompileOnly.scala │ └── main │ ├── scala │ └── akka │ │ └── persistence │ │ └── cassandra │ │ ├── journal │ │ ├── package.scala │ │ ├── CassandraJournalUnexpectedError.scala │ │ ├── PubSubThrottler.scala │ │ ├── TaggedPreparedStatements.scala │ │ ├── JournalSettings.scala │ │ └── TimeBucket.scala │ │ ├── compaction │ │ ├── CassandraCompactionStrategyConfig.scala │ │ ├── CassandraCompactionStrategy.scala │ │ ├── LeveledCompactionStrategy.scala │ │ ├── TimeWindowCompactionStrategy.scala │ │ ├── SizeTieredCompactionStrategy.scala │ │ └── BaseCompactionStrategy.scala │ │ ├── reconciler │ │ ├── ReconciliationSettings.scala │ │ ├── AllTags.scala │ │ ├── DeleteTagViewForPersistenceId.scala │ │ └── BuildTagViewForPersistenceId.scala │ │ ├── query │ │ ├── UUIDPersistentRepr.scala │ │ ├── CassandraReadJournalProvider.scala │ │ ├── MissingTaggedEventException.scala │ │ ├── QuerySettings.scala │ │ ├── CassandraReadStatements.scala │ │ └── package.scala │ │ ├── cleanup │ │ └── CleanupSettings.scala │ │ ├── healthcheck │ │ ├── HealthCheckSettings.scala │ │ └── CassandraHealthCheck.scala │ │ ├── CachedPreparedStatement.scala │ │ ├── snapshot │ │ └── SnapshotSettings.scala │ │ ├── Retries.scala │ │ ├── KeyspaceAndTableStatements.scala │ │ └── CassandraStatements.scala │ └── resources │ └── test-embedded-cassandra.yaml ├── project ├── build.properties ├── Scala3.scala ├── link-validator.conf ├── plugins.sbt ├── project-info.conf ├── Publish.scala └── Dependencies.scala ├── .jvmopts-travis ├── example ├── src │ └── main │ │ ├── scala │ │ └── akka │ │ │ └── persistence │ │ │ └── cassandra │ │ │ └── example │ │ │ ├── CborSerializable.scala │ │ │ ├── ReadSideTopic.scala │ │ │ ├── Reporter.scala │ │ │ ├── ConfigurablePersistentActor.scala │ │ │ ├── LoadGenerator.scala │ │ │ ├── Main.scala │ │ │ └── ReadSide.scala │ │ └── resources │ │ ├── logback.xml │ │ ├── kubernetes.conf │ │ ├── local.conf │ │ └── common.conf ├── kubernetes │ ├── rbac.yaml │ ├── cassandra.yaml │ ├── load.yaml │ ├── read.yaml │ └── write.yaml └── README.md ├── docs ├── src │ ├── main │ │ └── paradox │ │ │ ├── serialization.md │ │ │ ├── .htaccess │ │ │ ├── _template │ │ │ └── projectSpecificFooter.st │ │ │ ├── cqrs.md │ │ │ ├── healthcheck.md │ │ │ ├── scylladb.md │ │ │ ├── astra.md │ │ │ ├── index.md │ │ │ ├── assets │ │ │ └── js │ │ │ │ └── warnOldVersion.js │ │ │ ├── read-journal.md │ │ │ ├── server.md │ │ │ ├── testing.md │ │ │ ├── cosmosdb.md │ │ │ ├── keyspaces.md │ │ │ ├── cleanup.md │ │ │ ├── configuration.md │ │ │ ├── snapshots.md │ │ │ └── overview.md │ └── test │ │ ├── scala │ │ └── doc │ │ │ └── cleanup │ │ │ └── CleanupDocExample.scala │ │ └── java │ │ └── jdoc │ │ └── cleanup │ │ └── CleanupDocExample.java └── release-train-issue-template.md ├── .fossa.yml ├── dse-test └── src │ └── test │ ├── resources │ └── application.conf │ └── scala │ ├── your │ └── pack │ │ └── DseSessionProvider.scala │ └── akka │ └── persistence │ └── cassandra │ └── JournalDseSpec.scala ├── .gitignore ├── cassandra-bundle └── src │ └── main │ └── resources │ ├── log4j.properties │ └── logback.xml ├── .scala-steward.conf ├── scripts └── create-release-issue.sh ├── .idea └── runConfigurations │ ├── Main_2551_write.xml │ ├── Main_2552_write.xml │ ├── Main_2553_load_.xml │ └── Main_2554_read.xml ├── RELEASING.md ├── .scalafmt.conf ├── .github └── workflows │ ├── fossa.yml │ ├── link-validator.yml │ ├── publish.yml │ └── check-build-test.yml ├── docker-compose.yml └── README.md /core/.gitignore: -------------------------------------------------------------------------------- 1 | /.target/ 2 | -------------------------------------------------------------------------------- /project/build.properties: -------------------------------------------------------------------------------- 1 | sbt.version=1.11.7 2 | -------------------------------------------------------------------------------- /.jvmopts-travis: -------------------------------------------------------------------------------- 1 | # This is used to configure the sbt instance that Travis launches 2 | 3 | -Dfile.encoding=UTF8 4 | -Dsbt.color=always 5 | -------------------------------------------------------------------------------- /example/src/main/scala/akka/persistence/cassandra/example/CborSerializable.scala: -------------------------------------------------------------------------------- 1 | package akka.persistence.cassandra.example 2 | 3 | trait CborSerializable 4 | -------------------------------------------------------------------------------- /docs/src/main/paradox/serialization.md: -------------------------------------------------------------------------------- 1 | # Serialization 2 | 3 | The events and snapshots are serialized with @extref:[Akka Serialization](akka:serialization.html). 4 | 5 | -------------------------------------------------------------------------------- /project/Scala3.scala: -------------------------------------------------------------------------------- 1 | import sbt.Keys.crossScalaVersions 2 | 3 | object Scala3 { 4 | 5 | val settings = Seq(crossScalaVersions := Dependencies.ScalaVersions) 6 | 7 | } 8 | -------------------------------------------------------------------------------- /.fossa.yml: -------------------------------------------------------------------------------- 1 | version: 3 2 | 3 | # https://github.com/fossas/fossa-cli/blob/master/docs/references/files/fossa-yml.md 4 | 5 | paths: 6 | exclude: 7 | - ./example 8 | - ./dse-test 9 | -------------------------------------------------------------------------------- /dse-test/src/test/resources/application.conf: -------------------------------------------------------------------------------- 1 | datastax-java-driver { 2 | basic.contact-points = [ "127.0.0.1:9043"] 3 | basic.load-balancing-policy.local-datacenter = "dc1" 4 | } 5 | 6 | 7 | -------------------------------------------------------------------------------- /docs/src/main/paradox/.htaccess: -------------------------------------------------------------------------------- 1 | # This file can be used for redirects like: 2 | # RedirectMatch 301 ^/docs/alpakka/([^/]+)/data-transformations/recordio.html https://doc.akka.io/docs/alpakka/$1/data-transformations/simple-codecs.html 3 | -------------------------------------------------------------------------------- /core/src/test/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | log4j.rootLogger=ERROR, stdout 2 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender 3 | log4j.appender.stdout.layout=org.apache.log4j.SimpleLayout 4 | log4j.logger.org.apache.cassandra.service.CassandraDaemon=OFF 5 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .idea* 2 | .bloop 3 | *.env 4 | *.log 5 | *.iml 6 | target/ 7 | /.target/ 8 | .DS_Store 9 | .cache* 10 | .classpath 11 | .project 12 | .settings 13 | .tmpBin/ 14 | *.sublime-project 15 | /bin/ 16 | ext-lib-src/ 17 | .classpath_nb 18 | .bsp 19 | -------------------------------------------------------------------------------- /cassandra-bundle/src/main/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | log4j.rootLogger=ERROR, stdout 2 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender 3 | log4j.appender.stdout.layout=org.apache.log4j.SimpleLayout 4 | log4j.logger.org.apache.cassandra.service.CassandraDaemon=OFF 5 | -------------------------------------------------------------------------------- /.scala-steward.conf: -------------------------------------------------------------------------------- 1 | pullRequests.frequency = "@monthly" 2 | 3 | updates.ignore = [ 4 | // explicit updates 5 | { groupId = "com.typesafe.akka" } 6 | ] 7 | 8 | commits.message = "bump: ${artifactName} ${nextVersion} (was ${currentVersion})" 9 | 10 | updatePullRequests = never 11 | -------------------------------------------------------------------------------- /core/src/test/scala/akka/persistence/cassandra/RequiresThree.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2016-2025 Lightbend Inc. 3 | */ 4 | 5 | package akka.persistence.cassandra 6 | 7 | import org.scalatest.Tag 8 | 9 | object RequiresCassandraThree extends Tag("RequiresCassandraThree") 10 | -------------------------------------------------------------------------------- /docs/src/main/paradox/_template/projectSpecificFooter.st: -------------------------------------------------------------------------------- 1 | 2 | 5 | -------------------------------------------------------------------------------- /core/src/test/resources/application.conf: -------------------------------------------------------------------------------- 1 | datastax-java-driver { 2 | basic { 3 | request { 4 | timeout = 10s # drop keyspaces take some time 5 | } 6 | } 7 | 8 | # Enable a few metrics to test that they work 9 | advanced.metrics { 10 | session.enabled = [ "bytes-sent", "cql-requests"] 11 | node.enabled = [ "pool.open-connections", "pool.in-flight" ] 12 | } 13 | 14 | } -------------------------------------------------------------------------------- /scripts/create-release-issue.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | VERSION=$1 4 | if [ -z $VERSION ] 5 | then 6 | echo specify the version name to be released, eg. 1.0.0 7 | else 8 | sed -e 's/\$VERSION\$/'$VERSION'/g' docs/release-train-issue-template.md > /tmp/release-$VERSION.md 9 | echo Created $(gh issue create --title "Release $VERSION" --body-file /tmp/release-$VERSION.md --milestone $VERSION --web) 10 | fi 11 | -------------------------------------------------------------------------------- /docs/src/main/paradox/cqrs.md: -------------------------------------------------------------------------------- 1 | # Event sourcing and CQRS 2 | 3 | `EventSourcedBehavior`s with the `PersistenceQuery`'s `eventsByTag` query can be used to do Event sourcing with Command and 4 | Query Responsibility Segregation (CQRS). 5 | 6 | A full sample showing how do to this with Cassandra, including scaling the read side, is in the [Akka samples repository](https://github.com/akka/akka-samples/tree/2.6/akka-sample-cqrs-scala). 7 | -------------------------------------------------------------------------------- /core/src/main/scala/akka/persistence/cassandra/journal/package.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2016-2025 Lightbend Inc. 3 | */ 4 | 5 | package akka.persistence.cassandra 6 | 7 | import akka.annotation.InternalApi 8 | 9 | package object journal { 10 | 11 | /** INTERNAL API */ 12 | @InternalApi private[akka] def partitionNr(sequenceNr: Long, partitionSize: Long): Long = 13 | (sequenceNr - 1L) / partitionSize 14 | } 15 | -------------------------------------------------------------------------------- /core/src/main/scala/akka/persistence/cassandra/journal/CassandraJournalUnexpectedError.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2016-2025 Lightbend Inc. 3 | */ 4 | 5 | package akka.persistence.cassandra.journal 6 | 7 | import akka.actor.CoordinatedShutdown.Reason 8 | 9 | /** 10 | * Cassandra Journal unexpected error with akka.persistence.cassandra.coordinated-shutdown-on-error set to true 11 | */ 12 | case object CassandraJournalUnexpectedError extends Reason 13 | -------------------------------------------------------------------------------- /core/src/main/scala/akka/persistence/cassandra/compaction/CassandraCompactionStrategyConfig.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2016-2025 Lightbend Inc. 3 | */ 4 | 5 | package akka.persistence.cassandra.compaction 6 | 7 | import com.typesafe.config.Config 8 | 9 | trait CassandraCompactionStrategyConfig[CSS <: CassandraCompactionStrategy] { 10 | val ClassName: String 11 | 12 | def propertyKeys: List[String] 13 | 14 | def fromConfig(config: Config): CSS 15 | } 16 | -------------------------------------------------------------------------------- /core/src/main/scala/akka/persistence/cassandra/reconciler/ReconciliationSettings.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2016-2025 Lightbend Inc. 3 | */ 4 | 5 | package akka.persistence.cassandra.reconciler 6 | 7 | import com.typesafe.config.Config 8 | 9 | final class ReconciliationSettings(config: Config) { 10 | 11 | val readProfile: String = config.getString("read-profile") 12 | val writeProfile: String = config.getString("write-profile") 13 | val pluginLocation: String = config.getString("plugin-location") 14 | 15 | } 16 | -------------------------------------------------------------------------------- /cassandra-bundle/src/main/resources/logback.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | System.out 6 | 7 | %date{MM/dd HH:mm:ss} %-5level[%thread] %logger{1} - %m%n%xException 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | -------------------------------------------------------------------------------- /example/src/main/scala/akka/persistence/cassandra/example/ReadSideTopic.scala: -------------------------------------------------------------------------------- 1 | package akka.persistence.cassandra.example 2 | 3 | import akka.actor.typed.pubsub.Topic 4 | import akka.actor.typed.scaladsl.ActorContext 5 | import akka.actor.typed.ActorRef 6 | 7 | object ReadSideTopic { 8 | 9 | final case class ReadSideMetrics(count: Long, maxValue: Long, p99: Long, p50: Long) 10 | 11 | def init(context: ActorContext[_]): ActorRef[Topic.Command[ReadSideMetrics]] = { 12 | context.spawn(Topic[ReadSideMetrics]("read-side-metrics"), "read-side-metrics") 13 | } 14 | 15 | } 16 | -------------------------------------------------------------------------------- /core/src/main/scala/akka/persistence/cassandra/compaction/CassandraCompactionStrategy.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2016-2025 Lightbend Inc. 3 | */ 4 | 5 | package akka.persistence.cassandra.compaction 6 | 7 | import com.typesafe.config.Config 8 | 9 | /* 10 | * http://docs.datastax.com/en/cql/3.1/cql/cql_reference/compactSubprop.html 11 | */ 12 | trait CassandraCompactionStrategy { 13 | def asCQL: String 14 | } 15 | 16 | object CassandraCompactionStrategy { 17 | def apply(config: Config): CassandraCompactionStrategy = 18 | BaseCompactionStrategy.fromConfig(config) 19 | } 20 | -------------------------------------------------------------------------------- /example/src/main/resources/logback.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 5 | System.out 6 | 7 | %date{MM/dd HH:mm:ss} %-5level[%thread] %logger{1} [%X{akkaSource}] - %m%n%xException 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | -------------------------------------------------------------------------------- /example/src/main/resources/kubernetes.conf: -------------------------------------------------------------------------------- 1 | include "common.conf" 2 | 3 | akka.management { 4 | cluster.bootstrap { 5 | contact-point-discovery { 6 | discovery-method = kubernetes-api 7 | # have multiple deployments join the same cluster. One deployment per role 8 | } 9 | } 10 | } 11 | 12 | akka.cluster { 13 | roles = [${ROLE}] 14 | } 15 | 16 | akka.discovery.kubernetes-api { 17 | pod-label-selector = "cluster=%s" 18 | } 19 | 20 | datastax-java-driver { 21 | # cassandra service degined in kuberntes/cassandra.yaml 22 | basic.contact-points = ["cassandra:9042"] 23 | basic.load-balancing-policy.local-datacenter = datacenter1 24 | } 25 | 26 | 27 | 28 | -------------------------------------------------------------------------------- /example/kubernetes/rbac.yaml: -------------------------------------------------------------------------------- 1 | kind: Role 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | metadata: 4 | name: pod-reader 5 | rules: 6 | - apiGroups: [""] # "" indicates the core API group 7 | resources: ["pods"] 8 | verbs: ["get", "watch", "list"] 9 | --- 10 | kind: RoleBinding 11 | apiVersion: rbac.authorization.k8s.io/v1 12 | metadata: 13 | name: read-pods 14 | subjects: 15 | # Uses the default service account. 16 | # Consider creating a dedicated service account to run your 17 | # Akka Cluster services and binding the role to that one. 18 | - kind: ServiceAccount 19 | name: default 20 | roleRef: 21 | kind: Role 22 | name: pod-reader 23 | apiGroup: rbac.authorization.k8s.io -------------------------------------------------------------------------------- /docs/src/main/paradox/healthcheck.md: -------------------------------------------------------------------------------- 1 | # Health check 2 | 3 | A [health check for Akka Management](https://doc.akka.io/libraries/akka-management/current/healthchecks.html) 4 | is provided. To enable it you need to add the following configuration 5 | 6 | ``` 7 | akka.management { 8 | health-checks { 9 | readiness-checks { 10 | akka-persistence-cassandra = "akka.persistence.cassandra.healthcheck.CassandraHealthCheck" 11 | } 12 | } 13 | } 14 | ``` 15 | 16 | By default it will try to query the `system.local` table. The query can be configured with: 17 | 18 | ``` 19 | akka.persistence.cassandra.healthcheck { 20 | health-check-cql = "SELECT now() FROM system.local" 21 | } 22 | ``` 23 | 24 | 25 | -------------------------------------------------------------------------------- /dse-test/src/test/scala/your/pack/DseSessionProvider.scala: -------------------------------------------------------------------------------- 1 | package your.pack 2 | 3 | import akka.stream.alpakka.cassandra.CqlSessionProvider 4 | import com.datastax.dse.driver.api.core.DseSession 5 | import com.datastax.oss.driver.api.core.CqlSession 6 | 7 | import scala.jdk.FutureConverters._ 8 | import scala.concurrent.{ ExecutionContext, Future } 9 | 10 | //#dse-session-provider 11 | class DseSessionProvider extends CqlSessionProvider { 12 | override def connect()(implicit ec: ExecutionContext): Future[CqlSession] = { 13 | DseSession 14 | .builder() 15 | // .withAuthProvider() can add any DSE specific authentication here 16 | .buildAsync() 17 | .asScala 18 | } 19 | } 20 | //#dse-session-provider 21 | -------------------------------------------------------------------------------- /core/src/test/resources/logback-test.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | [%date{ISO8601}] [%level] [%logger] [%marker] [%thread] - %msg MDC: {%mdc}%n 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | -------------------------------------------------------------------------------- /docs/src/main/paradox/scylladb.md: -------------------------------------------------------------------------------- 1 | # ScyllaDB 2 | 3 | @@@ warning 4 | 5 | **This project is not continuously tested against ScyllaDB, and ScyllaDB is not supported as part of the [Lightbend Subscription](https://www.lightbend.com/lightbend-subscription).** 6 | 7 | @@@ 8 | 9 | [ScyllaDB](https://www.scylladb.com) is a wire compatible replacement for Cassandra. Initial testing shows that 10 | there are issues when using ScyllaDB with persistent query which is also used for recovery. 11 | 12 | Users with ScyllaDB experience and test environments are very welcome contributors! 13 | You can run the tests and having a ScyllaDB instance accessible on localhost port 9042, or adjust the 14 | `datastax-java-driver.basic.contact-points` and other configuration to connect to your ScyllaDB. 15 | 16 | -------------------------------------------------------------------------------- /.idea/runConfigurations/Main_2551_write.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 16 | -------------------------------------------------------------------------------- /core/src/main/scala/akka/persistence/cassandra/query/UUIDPersistentRepr.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2016-2025 Lightbend Inc. 3 | */ 4 | 5 | package akka.persistence.cassandra.query 6 | 7 | import java.util.UUID 8 | 9 | import akka.persistence.PersistentRepr 10 | import akka.annotation.InternalApi 11 | import akka.persistence.cassandra.journal.CassandraJournal.TagPidSequenceNr 12 | 13 | /** 14 | * INTERNAL API: Wrap the [[PersistentRepr]] to add the UUID for 15 | * `eventsByTag` query, or similar queries. 16 | */ 17 | @InternalApi private[akka] final case class UUIDPersistentRepr( 18 | offset: UUID, 19 | // tags: Set[String], this isn't in the tags table, may need to be added 20 | tagPidSequenceNr: TagPidSequenceNr, 21 | persistentRepr: PersistentRepr) 22 | -------------------------------------------------------------------------------- /core/src/main/scala/akka/persistence/cassandra/cleanup/CleanupSettings.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2016-2025 Lightbend Inc. 3 | */ 4 | 5 | package akka.persistence.cassandra.cleanup 6 | 7 | import scala.concurrent.duration._ 8 | import java.util.concurrent.TimeUnit 9 | 10 | import scala.concurrent.duration.FiniteDuration 11 | 12 | import akka.annotation.ApiMayChange 13 | import com.typesafe.config.Config 14 | 15 | @ApiMayChange 16 | class CleanupSettings(config: Config) { 17 | val pluginLocation: String = config.getString("plugin-location") 18 | val operationTimeout: FiniteDuration = config.getDuration("operation-timeout", TimeUnit.MILLISECONDS).millis 19 | val logProgressEvery: Int = config.getInt("log-progress-every") 20 | val dryRun: Boolean = config.getBoolean("dry-run") 21 | } 22 | -------------------------------------------------------------------------------- /.idea/runConfigurations/Main_2552_write.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 16 | -------------------------------------------------------------------------------- /.idea/runConfigurations/Main_2553_load_.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 16 | -------------------------------------------------------------------------------- /.idea/runConfigurations/Main_2554_read.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 16 | -------------------------------------------------------------------------------- /example/src/main/scala/akka/persistence/cassandra/example/Reporter.scala: -------------------------------------------------------------------------------- 1 | package akka.persistence.cassandra.example 2 | 3 | import akka.actor.typed.{ ActorRef, Behavior } 4 | import akka.actor.typed.pubsub.Topic 5 | import akka.actor.typed.scaladsl.Behaviors 6 | import akka.persistence.cassandra.example.ReadSideTopic.ReadSideMetrics 7 | 8 | object Reporter { 9 | def apply(topic: ActorRef[Topic.Command[ReadSideTopic.ReadSideMetrics]]): Behavior[ReadSideMetrics] = 10 | Behaviors.setup { ctx => 11 | ctx.log.info("Subscribing to latency stats") 12 | topic ! Topic.Subscribe(ctx.self) 13 | Behaviors.receiveMessage[ReadSideMetrics] { 14 | case ReadSideMetrics(count, max, p99, p50) => 15 | ctx.log.info("Read side Count: {} Max: {} p99: {} p50: {}", count, max, p99, p50) 16 | Behaviors.same 17 | } 18 | } 19 | } 20 | -------------------------------------------------------------------------------- /docs/src/main/paradox/astra.md: -------------------------------------------------------------------------------- 1 | # DataStax Astra 2 | 3 | You can spin up a free Apache Cassandra cluster in the cloud using [DataStax 4 | Astra](https://www.datastax.com/products/datastax-astra). To use a Cassandra-as-a-Service cluster as Akka persistence, 5 | specify your secure connect bundle and credentials in your configuration instead of the contact points: 6 | 7 | ``` 8 | datastax-java-driver { 9 | basic { 10 | session-keyspace = my_keyspace 11 | cloud { 12 | secure-connect-bundle = /path/to/secure-connect-database_name.zip 13 | } 14 | } 15 | advanced { 16 | auth-provider { 17 | class = PlainTextAuthProvider 18 | # CLIENT ID and SECRET from Application Token that can be generated 19 | # from Token Management in Astra console. 20 | username = 21 | password = 22 | } 23 | } 24 | } 25 | ``` 26 | -------------------------------------------------------------------------------- /dse-test/src/test/scala/akka/persistence/cassandra/JournalDseSpec.scala: -------------------------------------------------------------------------------- 1 | package akka.persistence.cassandra 2 | 3 | import akka.persistence.journal.JournalSpec 4 | import com.typesafe.config.ConfigFactory 5 | 6 | object JournalDseSpec { 7 | val config = ConfigFactory.parseString(s""" 8 | akka.persistence.cassandra.journal.keyspace=JournalDseSpec 9 | akka.persistence.cassandra.snapshot.keyspace=JournalDseSpec 10 | 11 | //# override-session-provider 12 | akka.persistence.cassandra { 13 | session-provider = "your.pack.DseSessionProvider" 14 | } 15 | //# override-session-provider 16 | """).withFallback(CassandraLifecycle.config) 17 | 18 | } 19 | 20 | class JournalDseSpec extends JournalSpec(JournalDseSpec.config) with CassandraLifecycle { 21 | override def systemName: String = "JournalDseSpec" 22 | override def supportsRejectingNonSerializableObjects = false 23 | } 24 | -------------------------------------------------------------------------------- /core/src/main/scala/akka/persistence/cassandra/healthcheck/HealthCheckSettings.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2016-2025 Lightbend Inc. 3 | */ 4 | 5 | package akka.persistence.cassandra.healthcheck 6 | 7 | import akka.actor.{ ActorSystem, NoSerializationVerificationNeeded } 8 | import akka.annotation.InternalApi 9 | import com.typesafe.config.Config 10 | 11 | import scala.concurrent.duration._ 12 | 13 | @InternalApi 14 | private[akka] final class HealthCheckSettings(system: ActorSystem, config: Config) 15 | extends NoSerializationVerificationNeeded { 16 | 17 | private val healthCheckConfig = config.getConfig("healthcheck") 18 | 19 | val pluginLocation: String = healthCheckConfig.getString("plugin-location") 20 | 21 | val timeout: FiniteDuration = healthCheckConfig.getDuration("timeout", MILLISECONDS).millis 22 | 23 | val healthCheckCql: String = healthCheckConfig.getString("health-check-cql") 24 | 25 | } 26 | -------------------------------------------------------------------------------- /core/src/main/scala/akka/persistence/cassandra/query/CassandraReadJournalProvider.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2016-2025 Lightbend Inc. 3 | */ 4 | 5 | package akka.persistence.cassandra.query 6 | 7 | import akka.actor.ExtendedActorSystem 8 | import akka.persistence.query.ReadJournalProvider 9 | import com.typesafe.config.Config 10 | 11 | class CassandraReadJournalProvider(system: ExtendedActorSystem, config: Config, configPath: String) 12 | extends ReadJournalProvider { 13 | 14 | private val scaladslReadJournalInstance = new scaladsl.CassandraReadJournal(system, config, configPath) 15 | 16 | override def scaladslReadJournal(): scaladsl.CassandraReadJournal = scaladslReadJournalInstance 17 | 18 | private val javadslReadJournalInstance = new javadsl.CassandraReadJournal(scaladslReadJournalInstance) 19 | 20 | override def javadslReadJournal(): javadsl.CassandraReadJournal = javadslReadJournalInstance 21 | 22 | } 23 | -------------------------------------------------------------------------------- /docs/src/main/paradox/index.md: -------------------------------------------------------------------------------- 1 | # Akka Persistence Cassandra 2 | 3 | The Akka Persistence Cassandra plugin allows for using [Apache Cassandra](https://cassandra.apache.org) as a backend for @extref:[Akka Persistence](akka:persistence.html) and @extref:[Akka Persistence Query](akka:persistence-query.html). It uses @extref:[Alpakka Cassandra](alpakka:cassandra.html) for Cassandra access which is based on the @extref:[Datastax Java Driver](java-driver:). 4 | 5 | @@toc { depth=2 } 6 | 7 | @@@ index 8 | 9 | * [overview](overview.md) 10 | * [Journal Plugin](journal.md) 11 | * [Query Plugin](read-journal.md) 12 | * [Events by tag](events-by-tag.md) 13 | * [Snapshot Plugin](snapshots.md) 14 | * [Serialization](serialization.md) 15 | * [Testing](testing.md) 16 | * [CQRS](cqrs.md) 17 | * [Health check](healthcheck.md) 18 | * [Configuration](configuration.md) 19 | * [Cassandra server support](server.md) 20 | * [Migrations](migrations.md) 21 | * [Cleanup](cleanup.md) 22 | 23 | @@@ 24 | -------------------------------------------------------------------------------- /example/kubernetes/cassandra.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | labels: 5 | app: cassandra 6 | name: cassandra 7 | spec: 8 | replicas: 1 9 | selector: 10 | matchLabels: 11 | app: cassandra 12 | 13 | template: 14 | metadata: 15 | labels: 16 | app: cassandra 17 | spec: 18 | containers: 19 | - name: cassandra 20 | image: cassandra:3.11 21 | ports: 22 | - name: native 23 | containerPort: 9042 24 | protocol: TCP 25 | volumeMounts: 26 | - mountPath: /var/lib/cassandra/ 27 | name: data 28 | volumes: 29 | - name: data 30 | emptyDir: {} 31 | --- 32 | apiVersion: v1 33 | kind: Service 34 | metadata: 35 | name: cassandra 36 | spec: 37 | clusterIP: None 38 | selector: 39 | app: cassandra 40 | ports: 41 | - protocol: TCP 42 | port: 9042 43 | targetPort: 9042 44 | 45 | -------------------------------------------------------------------------------- /project/link-validator.conf: -------------------------------------------------------------------------------- 1 | // config for https://github.com/ennru/site-link-validator/ 2 | site-link-validator { 3 | root-dir = "./docs/target/site/" 4 | # relative to `root-dir` 5 | start-file = "libraries/akka-persistence-cassandra/snapshot/index.html" 6 | 7 | # Resolves URLs with the given prefix as local files instead 8 | link-mappings = [ 9 | { 10 | prefix = "https://doc.akka.io/libraries/akka-persistence-cassandra/snapshot/" 11 | replace = "/libraries/akka-persistence-cassandra/snapshot/" 12 | } 13 | { 14 | prefix = "https://doc.akka.io/api/akka-persistence-cassandra/snapshot/" 15 | replace = "/api/akka-persistence-cassandra/snapshot/" 16 | } 17 | ] 18 | 19 | ignore-missing-local-files-regex = "" 20 | 21 | ignore-files = [] 22 | 23 | ignore-prefixes = [ 24 | # GitHub will block with "429 Too Many Requests" 25 | "https://github.com/", 26 | "https://repo.akka.io/" 27 | ] 28 | 29 | non-https-whitelist = [ 30 | ] 31 | } 32 | -------------------------------------------------------------------------------- /project/plugins.sbt: -------------------------------------------------------------------------------- 1 | resolvers += "Akka library repository".at("https://repo.akka.io/maven/github_actions") 2 | 3 | addSbtPlugin("de.heikoseeberger" % "sbt-header" % "5.10.0") 4 | addSbtPlugin("org.scalameta" % "sbt-scalafmt" % "2.5.5") 5 | addSbtPlugin("com.eed3si9n" % "sbt-assembly" % "2.3.1") 6 | addSbtPlugin("com.github.sbt" % "sbt-multi-jvm" % "0.6.0") 7 | addSbtPlugin("com.github.sbt" % "sbt-dynver" % "5.1.1") 8 | addSbtPlugin("com.github.sbt" % "sbt-ci-release" % "1.9.3") 9 | 10 | // for dependency analysis 11 | addDependencyTreePlugin 12 | 13 | // Documentation 14 | addSbtPlugin("io.akka" % "sbt-paradox-akka" % "25.10.2") 15 | addSbtPlugin("com.lightbend.paradox" % "sbt-paradox-dependencies" % "0.2.4") 16 | addSbtPlugin("com.lightbend.sbt" % "sbt-publish-rsync" % "0.3") 17 | addSbtPlugin("com.github.sbt" % "sbt-unidoc" % "0.6.0") 18 | addSbtPlugin("com.github.sbt" % "sbt-site-paradox" % "1.7.0") 19 | 20 | // For example 21 | addSbtPlugin("com.github.sbt" % "sbt-native-packager" % "1.11.3") 22 | -------------------------------------------------------------------------------- /core/src/test/scala/doc/reconciler/AllPersistenceIdsMigrationCompileOnly.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2016-2025 Lightbend Inc. 3 | */ 4 | 5 | package doc.reconciler 6 | 7 | //#imports 8 | import scala.util.Failure 9 | import scala.util.Success 10 | 11 | import akka.actor.ActorSystem 12 | import akka.persistence.cassandra.reconciler.Reconciliation 13 | 14 | //#imports 15 | 16 | class AllPersistenceIdsMigrationCompileOnly { 17 | 18 | //#migrate 19 | // System should have the same Cassandra plugin configuration as your application 20 | // but be careful to remove seed nodes so this doesn't join the cluster 21 | val system = ActorSystem() 22 | import system.dispatcher 23 | 24 | val rec = new Reconciliation(system) 25 | val result = rec.rebuildAllPersistenceIds() 26 | 27 | result.onComplete { 28 | case Success(_) => 29 | system.log.info("All persistenceIds migrated.") 30 | system.terminate() 31 | case Failure(e) => 32 | system.log.error(e, "All persistenceIds migration failed.") 33 | system.terminate() 34 | } 35 | //#migrate 36 | } 37 | -------------------------------------------------------------------------------- /core/src/main/scala/akka/persistence/cassandra/query/MissingTaggedEventException.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2016-2025 Lightbend Inc. 3 | */ 4 | 5 | package akka.persistence.cassandra.query 6 | 7 | import java.util.UUID 8 | import akka.annotation.ApiMayChange 9 | 10 | /** 11 | * Events by tag query was unable all the events for some persistence ids. 12 | * 13 | * Consider restarting the query from the minOffset if downstream processing is idempotent 14 | * as it may re-deliver previously delivered events. 15 | * 16 | * @param tag the tag for the query 17 | * @param missing a map from persistence id to a set of tag pid sequence numbers that could 18 | * not be found 19 | * @param minOffset minimum offset was used when searching 20 | * @param maxOffset maximum offset used when searching 21 | */ 22 | @ApiMayChange 23 | final class MissingTaggedEventException( 24 | val tag: String, 25 | val missing: Map[String, Set[Long]], 26 | val minOffset: UUID, 27 | val maxOffset: UUID) 28 | extends RuntimeException(s"Unable to find tagged events for tag [$tag]: ${missing}") 29 | -------------------------------------------------------------------------------- /example/src/main/resources/local.conf: -------------------------------------------------------------------------------- 1 | include "common.conf" 2 | 3 | // two nodes need to be started to form the cluster, 8551 and 8552 4 | akka.discovery { 5 | config.services = { 6 | local-cluster = { 7 | endpoints = [ 8 | { 9 | host = ${akka.management.http.hostname} 10 | port = 8551 11 | }, 12 | { 13 | host = ${akka.management.http.hostname} 14 | port = 8552 15 | } 16 | ] 17 | } 18 | } 19 | } 20 | 21 | akka.management { 22 | http { 23 | hostname = ${akka.remote.artery.canonical.hostname} 24 | port = 8551 25 | } 26 | 27 | cluster.bootstrap { 28 | contact-point-discovery { 29 | service-name = "local-cluster" 30 | port-name = "akka-management" # work around for making it not group and filter based on hostname 31 | discovery-method = config 32 | required-contact-point-nr = 2 33 | stable-margin = 100 ms 34 | } 35 | } 36 | 37 | } 38 | 39 | akka { 40 | remote.artery { 41 | canonical { 42 | hostname = "127.0.0.1" 43 | port = 2551 44 | } 45 | } 46 | } 47 | -------------------------------------------------------------------------------- /RELEASING.md: -------------------------------------------------------------------------------- 1 | # Releasing 2 | 3 | Create a new issue from the [Release Train Issue Template](docs/release-train-issue-template.md): 4 | 5 | ``` 6 | $ sh ./scripts/create-release-issue.sh 1.x.y 7 | ``` 8 | 9 | ### Releasing only updated docs 10 | 11 | It is possible to release a revised documentation to the already existing release. 12 | 13 | 1. Create a new branch from a release tag. If a revised documentation is for the `v1.0.1` release, then the name of the new branch should be `docs/v1.0.1`. 14 | 1. Add and commit `version.sbt` file that pins the version to the one, that is being revised. Also set `isSnapshot` to `false` for the stable documentation links. For example: 15 | ```scala 16 | ThisBuild / version := "1.0.1" 17 | ThisBuild / isSnapshot := false 18 | ``` 19 | 1. Make all of the required changes to the documentation. 20 | 1. Build documentation locally with: 21 | ```sh 22 | sbt docs/previewSite 23 | ``` 24 | 1. If the generated documentation looks good, send it to Gustav: 25 | ```sh 26 | rm -r docs/target/site 27 | sbt docs/publishRsync 28 | ``` 29 | 1. Do not forget to push the new branch back to GitHub. 30 | -------------------------------------------------------------------------------- /.scalafmt.conf: -------------------------------------------------------------------------------- 1 | version = 2.1.0 2 | 3 | style = defaultWithAlign 4 | 5 | docstrings = JavaDoc 6 | indentOperator = spray 7 | maxColumn = 120 8 | rewrite.rules = [RedundantParens, SortImports, AvoidInfix] 9 | unindentTopLevelOperators = true 10 | align.tokens = [{code = "=>", owner = "Case"}] 11 | align.openParenDefnSite = false 12 | align.openParenCallSite = false 13 | optIn.breakChainOnFirstMethodDot = false 14 | optIn.configStyleArguments = false 15 | danglingParentheses = false 16 | spaces.inImportCurlyBraces = true 17 | rewrite.neverInfix.excludeFilters = [ 18 | and 19 | min 20 | max 21 | until 22 | to 23 | by 24 | eq 25 | ne 26 | "should.*" 27 | "contain.*" 28 | "must.*" 29 | in 30 | ignore 31 | be 32 | taggedAs 33 | thrownBy 34 | synchronized 35 | have 36 | when 37 | size 38 | only 39 | noneOf 40 | oneElementOf 41 | noElementsOf 42 | atLeastOneElementOf 43 | atMostOneElementOf 44 | allElementsOf 45 | inOrderElementsOf 46 | theSameElementsAs 47 | ] 48 | rewriteTokens = { 49 | "⇒": "=>" 50 | "→": "->" 51 | "←": "<-" 52 | } 53 | -------------------------------------------------------------------------------- /core/src/main/scala/akka/persistence/cassandra/CachedPreparedStatement.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2016-2025 Lightbend Inc. 3 | */ 4 | 5 | package akka.persistence.cassandra 6 | 7 | import scala.concurrent.ExecutionContext 8 | import scala.concurrent.Future 9 | 10 | import akka.annotation.InternalApi 11 | import akka.util.OptionVal 12 | import com.datastax.oss.driver.api.core.cql.PreparedStatement 13 | 14 | /** 15 | * INTERNAL API 16 | */ 17 | @InternalApi private[cassandra] class CachedPreparedStatement(init: () => Future[PreparedStatement]) { 18 | @volatile private var preparedStatement: OptionVal[Future[PreparedStatement]] = OptionVal.None 19 | 20 | def get(): Future[PreparedStatement] = 21 | preparedStatement match { 22 | case OptionVal.Some(ps) => ps 23 | case _ => 24 | // ok to init multiple times in case of concurrent access 25 | val ps = init() 26 | ps.foreach { p => 27 | // only cache successful futures, ok to overwrite 28 | preparedStatement = OptionVal.Some(Future.successful(p)) 29 | }(ExecutionContext.parasitic) 30 | ps 31 | } 32 | 33 | } 34 | -------------------------------------------------------------------------------- /docs/src/main/paradox/assets/js/warnOldVersion.js: -------------------------------------------------------------------------------- 1 | function initOldVersionWarnings($, thisVersion, projectUrl) { 2 | if (projectUrl && projectUrl !== "") { 3 | var schemeLessUrl = projectUrl; 4 | if (projectUrl.startsWith("http://")) projectUrl = schemeLessUrl.substring(5); 5 | else if (projectUrl.startsWith("https://")) projectUrl = schemeLessUrl.substring(6); 6 | const url = schemeLessUrl + (schemeLessUrl.endsWith("\/") ? "" : "/") + "paradox.json"; 7 | $.get(url, function (versionData) { 8 | const currentVersion = versionData.version; 9 | if (thisVersion !== currentVersion) { 10 | showVersionWarning(thisVersion, currentVersion, projectUrl); 11 | } 12 | }); 13 | } 14 | } 15 | 16 | function showVersionWarning(thisVersion, currentVersion, projectUrl) { 17 | $('#docs').prepend( 18 | '
' + 19 | '

This documentation regards version ' + thisVersion + ', ' + 20 | 'however the current version is ' + currentVersion + '.

' + 21 | '
' 22 | ); 23 | } -------------------------------------------------------------------------------- /core/src/main/scala/akka/persistence/cassandra/reconciler/AllTags.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2016-2025 Lightbend Inc. 3 | */ 4 | 5 | package akka.persistence.cassandra.reconciler 6 | 7 | import akka.annotation.InternalApi 8 | import akka.stream.scaladsl.Source 9 | import akka.NotUsed 10 | 11 | /** 12 | * Calculates all the tags by scanning the tag_write_progress table. 13 | * 14 | * This is not an efficient query as it needs to do a full table scan and while running will keep 15 | * all tags in memory 16 | * 17 | * This may not pick up tags that have just been created as the write to the tag progress 18 | * table is asynchronous. 19 | * 20 | * INTERNAL API 21 | */ 22 | @InternalApi 23 | private[akka] final class AllTags(session: ReconciliationSession) { 24 | 25 | def execute(): Source[String, NotUsed] = { 26 | session 27 | .selectAllTagProgress() 28 | .map(_.getString("tag")) 29 | .statefulMapConcat(() => { 30 | var seen = Set.empty[String] 31 | tag => 32 | if (!seen.contains(tag)) { 33 | seen += tag 34 | List(tag) 35 | } else { 36 | Nil 37 | } 38 | }) 39 | } 40 | 41 | } 42 | -------------------------------------------------------------------------------- /docs/src/main/paradox/read-journal.md: -------------------------------------------------------------------------------- 1 | # Query Plugin 2 | 3 | It implements the following @extref:[Persistence Queries](akka:persistence-query.html): 4 | 5 | * eventsByPersistenceId, currentEventsByPersistenceId 6 | * eventsByTag, currentEventsByTag 7 | * persistenceIds, currentPersistenceIds 8 | 9 | See API details in @apidoc[CassandraReadJournal] and @ref:[eventsByTag documentation](events-by-tag.md). 10 | 11 | Persistence Query usage example to obtain a stream with all events tagged with "someTag" with Persistence Query: 12 | 13 | val queries = PersistenceQuery(system).readJournalFor[CassandraReadJournal](CassandraReadJournal.Identifier) 14 | queries.eventsByTag("someTag", Offset.noOffset) 15 | 16 | ## Configuration 17 | 18 | The default settings can be changed with the configuration properties defined in 19 | @ref:[reference.conf](configuration.md#default-configuration). 20 | 21 | Query configuration is under `akka.persistence.cassandra.query`. 22 | 23 | Events by tag configuration is under `akka.persistence.cassandra.events-by-tag` and shared 24 | by `journal` and `query`. 25 | 26 | If using events by tag query it is important to set the @ref:[first time bucket](events-by-tag.md#first-time-bucket) -------------------------------------------------------------------------------- /docs/src/main/paradox/server.md: -------------------------------------------------------------------------------- 1 | # Cassandra server support 2 | 3 | There are several alternative Cassandra server implementations. This project is tested with 4 | [Apache Cassandra](https://cassandra.apache.org) and 5 | [DataStax Enterprise (DSE)](https://www.datastax.com/resources/datasheet/datastax-enterprise). 6 | 7 | [DataStax Astra](https://www.datastax.com/products/datastax-astra) is built on Apache Cassandra and should therefore also work without 8 | surprises even though there is no continuous integration tests for it in this project. See @ref:[DataStax Astra configuration](astra.md). 9 | 10 | [Lightbend support](https://www.lightbend.com/lightbend-subscription) only covers usage with Apache Cassandra, DSE and DataStax Astra. 11 | 12 | Several other alternatives are listed below. Be aware of that this project is not continuously tested against those 13 | and currently not covered by Lightbend support. 14 | 15 | * @ref:[CosmosDB](cosmosdb.md) 16 | * @ref:[DataStax Astra](astra.md) 17 | * @ref:[ScyllaDB](scylladb.md) 18 | 19 | ## More details 20 | 21 | @@toc { depth=1 } 22 | 23 | @@@ index 24 | 25 | * [Amazon Keyspaces](keyspaces.md) 26 | * [CosmosDB](cosmosdb.md) 27 | * [DataStax Astra](astra.md) 28 | * [ScyllaDB](scylladb.md) 29 | 30 | @@@ 31 | -------------------------------------------------------------------------------- /example/kubernetes/load.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | labels: 5 | app: apc-load 6 | cluster: apc-example 7 | name: apc-load 8 | spec: 9 | replicas: 1 10 | selector: 11 | matchLabels: 12 | app: apc-load 13 | strategy: 14 | rollingUpdate: 15 | maxSurge: 1 16 | maxUnavailable: 0 17 | type: RollingUpdate 18 | 19 | template: 20 | metadata: 21 | labels: 22 | app: apc-load 23 | cluster: apc-example 24 | spec: 25 | containers: 26 | - name: apc-load 27 | # remove for real clusters, useful for minikube 28 | imagePullPolicy: Never 29 | image: kubakka/endtoendexample:latest 30 | ports: 31 | - name: management 32 | containerPort: 8558 33 | protocol: TCP 34 | - name: http 35 | containerPort: 8080 36 | protocol: TCP 37 | env: 38 | - name: ROLE 39 | value: load 40 | readinessProbe: 41 | httpGet: 42 | path: /ready 43 | port: management 44 | livenessProbe: 45 | httpGet: 46 | path: /alive 47 | port: management 48 | -------------------------------------------------------------------------------- /example/kubernetes/read.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | labels: 5 | app: apc-read 6 | cluster: apc-example 7 | name: apc-read 8 | spec: 9 | replicas: 1 10 | selector: 11 | matchLabels: 12 | app: apc-read 13 | strategy: 14 | rollingUpdate: 15 | maxSurge: 1 16 | maxUnavailable: 0 17 | type: RollingUpdate 18 | 19 | template: 20 | metadata: 21 | labels: 22 | app: apc-read 23 | cluster: apc-example 24 | spec: 25 | containers: 26 | - name: apc-read 27 | # remove for real clusters, useful for minikube 28 | imagePullPolicy: Never 29 | image: kubakka/endtoendexample:latest 30 | ports: 31 | - name: management 32 | containerPort: 8558 33 | protocol: TCP 34 | - name: http 35 | containerPort: 8080 36 | protocol: TCP 37 | env: 38 | - name: ROLE 39 | value: read 40 | readinessProbe: 41 | httpGet: 42 | path: /ready 43 | port: management 44 | livenessProbe: 45 | httpGet: 46 | path: /alive 47 | port: management 48 | -------------------------------------------------------------------------------- /core/src/test/scala/doc/reconciler/ReconciliationCompileOnly.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2016-2025 Lightbend Inc. 3 | */ 4 | 5 | package doc.reconciler 6 | 7 | //#imports 8 | import akka.persistence.cassandra.reconciler.Reconciliation 9 | import akka.actor.ActorSystem 10 | import scala.concurrent.Future 11 | import akka.Done 12 | 13 | //#imports 14 | 15 | class ReconciliationCompileOnly { 16 | 17 | //#reconcile 18 | // System should have the same Cassandra plugin configuration as your application 19 | // but be careful to remove seed nodes so this doesn't join the cluster 20 | val system = ActorSystem() 21 | import system.dispatcher 22 | 23 | val rec = new Reconciliation(system) 24 | 25 | // Drop and re-create data for a persistence id 26 | val pid = "pid1" 27 | for { 28 | // do this before dropping the data 29 | tags <- rec.tagsForPersistenceId(pid) 30 | // drop the tag view for every tag for this persistence id 31 | dropData <- Future.traverse(tags)(tag => rec.deleteTagViewForPersistenceIds(Set(pid), tag)) 32 | // optional: re-build, if this is ommited then it will be re-build next time the pid is started 33 | _ <- rec.rebuildTagViewForPersistenceIds(pid) 34 | } yield Done 35 | //#reconcile 36 | } 37 | -------------------------------------------------------------------------------- /example/kubernetes/write.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | labels: 5 | app: apc-write 6 | cluster: apc-example 7 | name: apc-write 8 | spec: 9 | replicas: 2 10 | selector: 11 | matchLabels: 12 | app: apc-write 13 | strategy: 14 | rollingUpdate: 15 | maxSurge: 1 16 | maxUnavailable: 0 17 | type: RollingUpdate 18 | 19 | template: 20 | metadata: 21 | labels: 22 | app: apc-write 23 | cluster: apc-example 24 | spec: 25 | containers: 26 | - name: apc-write 27 | # remove for real clusters, useful for minikube 28 | imagePullPolicy: Never 29 | image: kubakka/endtoendexample:latest 30 | ports: 31 | - name: management 32 | containerPort: 8558 33 | protocol: TCP 34 | - name: http 35 | containerPort: 8080 36 | protocol: TCP 37 | env: 38 | - name: ROLE 39 | value: write 40 | readinessProbe: 41 | httpGet: 42 | path: /ready 43 | port: management 44 | livenessProbe: 45 | httpGet: 46 | path: /alive 47 | port: management 48 | -------------------------------------------------------------------------------- /core/src/test/scala/akka/persistence/cassandra/WithLogCapturing.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2016-2025 Lightbend Inc. 3 | */ 4 | 5 | package akka.persistence.cassandra 6 | 7 | import akka.event.Logging._ 8 | import akka.testkit.TestEventListener 9 | 10 | /** 11 | * An adaption of TestEventListener that never prints debug logs itself. Use together with [[akka.persistence.cassandra.CassandraSpec]]. 12 | * It allows to enable noisy DEBUG logging for tests and silence pre/post test DEBUG output completely while still showing 13 | * test-specific DEBUG output selectively 14 | */ 15 | class DebugLogSilencingTestEventListener extends TestEventListener { 16 | override def print(event: Any): Unit = event match { 17 | case _: Debug => // ignore 18 | case _ => super.print(event) 19 | } 20 | } 21 | 22 | /** 23 | * An adaption of TestEventListener that does not print out any logs. Use together with [[akka.persistence.cassandra.CassandraSpec]]. 24 | * It allows to enable noisy logging for tests and silence pre/post test log output completely while still showing 25 | * test-specific log output selectively on failures. 26 | */ 27 | class SilenceAllTestEventListener extends TestEventListener { 28 | override def print(event: Any): Unit = () 29 | } 30 | -------------------------------------------------------------------------------- /core/src/test/scala/akka/persistence/cassandra/reconciler/BuildTagViewForPersistenceIdSpec.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2016-2025 Lightbend Inc. 3 | */ 4 | 5 | package akka.persistence.cassandra.reconciler 6 | 7 | import akka.persistence.cassandra.CassandraSpec 8 | import org.scalatest.concurrent.Eventually 9 | 10 | class BuildTagViewForPersisetceIdSpec extends CassandraSpec with Eventually { 11 | 12 | "BuildTagViewForPersistenceId" should { 13 | 14 | val tag1 = "tag1" 15 | val pid1 = "pid1" 16 | val pid2 = "pid2" 17 | 18 | "build from scratch" in { 19 | writeEventsFor(tag1, pid1, 2) 20 | writeEventsFor(tag1, pid2, 1) 21 | eventually { 22 | expectEventsForTag(tag1, "pid1 event-1", "pid1 event-2", "pid2 event-1") 23 | } 24 | val reconciliation = new Reconciliation(system) 25 | reconciliation.truncateTagView().futureValue 26 | expectEventsForTag(tag1) 27 | reconciliation.rebuildTagViewForPersistenceIds(pid1).futureValue 28 | eventually { 29 | expectEventsForTag(tag1, "pid1 event-1", "pid1 event-2") 30 | } 31 | reconciliation.rebuildTagViewForPersistenceIds(pid2).futureValue 32 | eventually { 33 | expectEventsForTag(tag1, "pid1 event-1", "pid1 event-2", "pid2 event-1") 34 | } 35 | } 36 | } 37 | } 38 | -------------------------------------------------------------------------------- /project/project-info.conf: -------------------------------------------------------------------------------- 1 | project-info { 2 | version: "current" 3 | scala-versions: ["2.13", "3.3"] 4 | shared-info { 5 | jdk-versions: ["Eclipse Temurin JDK 11", "Eclipse Temurin JDK 17", "Eclipse Temurin JDK 21"] 6 | snapshots: { 7 | url: "https://repo.akka.io/snapshots" 8 | text: "Akka library snapshot repository" 9 | } 10 | issues: { 11 | url: "https://github.com/akka/akka-persistence-cassandra/issues" 12 | text: "GitHub issues" 13 | } 14 | release-notes: { 15 | url: "https://github.com/akka/akka-persistence-cassandra/releases" 16 | text: "GitHub releases" 17 | } 18 | api-docs: [ 19 | { 20 | url: "https://doc.akka.io/api/akka-persistence-cassandra/"${project-info.version}"/akka/persistence/cassandra/" 21 | text: "API (Scaladoc)" 22 | } 23 | ] 24 | forums: [ 25 | { 26 | text: "Lightbend Discuss" 27 | url: "https://discuss.akka.io/c/akka/" 28 | } 29 | ] 30 | } 31 | core: ${project-info.shared-info} { 32 | title: "Akka Persistence Cassandra" 33 | jpms-name: "akka.persistence.cassandra" 34 | levels: [ 35 | { 36 | readiness: Supported 37 | since: "2020-04-30" 38 | since-version: "1.0.0" 39 | }, 40 | { 41 | readiness: CommunityDriven 42 | since: "2017-09-28" 43 | since-version: "0.56" 44 | } 45 | ] 46 | } 47 | } 48 | -------------------------------------------------------------------------------- /core/src/test/scala/akka/persistence/cassandra/query/TestEventAdapter.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2016-2025 Lightbend Inc. 3 | */ 4 | 5 | package akka.persistence.cassandra.query 6 | 7 | import akka.actor.ExtendedActorSystem 8 | import akka.persistence.journal.{ EventAdapter, EventSeq, Tagged } 9 | 10 | sealed trait TestEvent[T] { 11 | def value: T 12 | } 13 | 14 | class TestEventAdapter(system: ExtendedActorSystem) extends EventAdapter { 15 | 16 | override def manifest(event: Any): String = "" 17 | 18 | override def toJournal(event: Any): Any = event match { 19 | case e: String if e.startsWith("tagged:") => 20 | val taggedEvent = e.stripPrefix("tagged:") 21 | val tags = taggedEvent.takeWhile(_ != ':').split(",").toSet 22 | val payload = taggedEvent.dropWhile(_ != ':').drop(1) 23 | Tagged(payload, tags) 24 | case e => e 25 | } 26 | 27 | override def fromJournal(event: Any, manifest: String): EventSeq = event match { 28 | case e: String if e.contains(":") => 29 | e.split(":").toList match { 30 | case "dropped" :: _ :: Nil => EventSeq.empty 31 | case "duplicated" :: x :: Nil => EventSeq(x, x) 32 | case "prefixed" :: prefix :: x :: Nil => EventSeq.single(s"$prefix-$x") 33 | case _ => throw new IllegalArgumentException(e) 34 | } 35 | case _ => EventSeq.single(event) 36 | } 37 | } 38 | -------------------------------------------------------------------------------- /.github/workflows/fossa.yml: -------------------------------------------------------------------------------- 1 | name: Dependency License Scanning 2 | 3 | on: 4 | workflow_dispatch: 5 | schedule: 6 | - cron: '0 0 * * 0' # At 00:00 on Sunday 7 | 8 | permissions: 9 | contents: read 10 | 11 | jobs: 12 | fossa: 13 | name: Fossa 14 | runs-on: ubuntu-latest 15 | if: github.event.repository.fork == false 16 | steps: 17 | - name: Checkout 18 | # https://github.com/actions/checkout/releases 19 | # v4.1.1 20 | uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 21 | with: 22 | # we don't know what commit the last tag was it's safer to get entire repo so previousStableVersion resolves 23 | fetch-depth: 0 24 | 25 | - name: Set up JDK 11 26 | # https://github.com/coursier/setup-action/releases 27 | # v1.3.5 28 | uses: coursier/setup-action@7bde40eee928896f074dbb76d22dd772eed5c65f 29 | with: 30 | jvm: temurin:1.11 31 | 32 | - name: Cache Coursier cache 33 | # https://github.com/coursier/cache-action/releases 34 | # v6.4.5 35 | uses: coursier/cache-action@1ff273bff02a8787bc9f1877d347948af647956d 36 | 37 | - name: FOSSA policy check 38 | run: |- 39 | curl -H 'Cache-Control: no-cache' https://raw.githubusercontent.com/fossas/fossa-cli/master/install-latest.sh | bash 40 | fossa analyze 41 | env: 42 | FOSSA_API_KEY: "${{secrets.FOSSA_API_KEY}}" 43 | -------------------------------------------------------------------------------- /docs/src/main/paradox/testing.md: -------------------------------------------------------------------------------- 1 | # Testing 2 | 3 | There are a number of options for testing persistent actors when using the Akka Persistence Cassandra plugin. 4 | The two main methods are: 5 | 6 | * Testing using the inmem journal as shown in the [Akka docs](https://doc.akka.io/libraries/akka-core/current/typed/persistence-testing.html). 7 | * Testing against a real Cassandra instance. 8 | 9 | For testing against Cassandra you can: 10 | 11 | * Use an existing cluster in your environment. 12 | * Install a local Cassandra cluster installed with your preferred method 13 | * Set up one locally with [Cassandra Cluster Manager](https://github.com/riptano/ccm) 14 | 15 | Then there are options with tighter unit test framework integration: 16 | 17 | * [Cassandra unit](https://github.com/jsevellec/cassandra-unit) 18 | * [Test Containers](https://www.testcontainers.org/modules/databases/cassandra/) 19 | 20 | For testing it can be convenient to enable automatic creation of keyspace and tables with configuration: 21 | 22 | akka.persistence.cassandra { 23 | journal.keyspace-autocreate = on 24 | journal.tables-autocreate = on 25 | snapshot.keyspace-autocreate = on 26 | snapshot.tables-autocreate = on 27 | } 28 | 29 | 30 | Another recommended configuration is to not run test in parallel to avoid contention around manipulating the cassandra keyspace. Using sbt, you are able to control the `parallelExecution` as described [here](https://www.scala-sbt.org/1.x/docs/Testing.html). 31 | -------------------------------------------------------------------------------- /core/src/main/scala/akka/persistence/cassandra/query/QuerySettings.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2016-2025 Lightbend Inc. 3 | */ 4 | 5 | package akka.persistence.cassandra.query 6 | 7 | import scala.concurrent.duration._ 8 | 9 | import akka.actor.ActorSystem 10 | import akka.actor.NoSerializationVerificationNeeded 11 | import akka.annotation.InternalApi 12 | import akka.annotation.InternalStableApi 13 | import akka.persistence.cassandra.EventsByTagSettings 14 | import com.typesafe.config.Config 15 | 16 | /** 17 | * INTERNAL API 18 | */ 19 | @InternalStableApi 20 | @InternalApi private[akka] class QuerySettings( 21 | system: ActorSystem, 22 | config: Config, 23 | val eventsByTagSettings: EventsByTagSettings) 24 | extends NoSerializationVerificationNeeded { 25 | 26 | private val queryConfig = config.getConfig("query") 27 | 28 | val readProfile: String = queryConfig.getString("read-profile") 29 | 30 | val refreshInterval: FiniteDuration = 31 | queryConfig.getDuration("refresh-interval", MILLISECONDS).millis 32 | 33 | val gapFreeSequenceNumbers: Boolean = queryConfig.getBoolean("gap-free-sequence-numbers") 34 | 35 | val deserializationParallelism: Int = queryConfig.getInt("deserialization-parallelism") 36 | 37 | val pluginDispatcher: String = queryConfig.getString("plugin-dispatcher") 38 | 39 | val eventsByPersistenceIdEventTimeout: FiniteDuration = 40 | queryConfig.getDuration("events-by-persistence-id-gap-timeout", MILLISECONDS).millis 41 | 42 | } 43 | -------------------------------------------------------------------------------- /core/src/main/scala/akka/persistence/cassandra/compaction/LeveledCompactionStrategy.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2016-2025 Lightbend Inc. 3 | */ 4 | 5 | package akka.persistence.cassandra.compaction 6 | 7 | import com.typesafe.config.Config 8 | 9 | /* 10 | * https://github.com/apache/cassandra/blob/cassandra-2.2/src/java/org/apache/cassandra/db/compaction/LeveledCompactionStrategy.java 11 | */ 12 | class LeveledCompactionStrategy(config: Config) 13 | extends BaseCompactionStrategy(config, LeveledCompactionStrategy.ClassName, LeveledCompactionStrategy.propertyKeys) { 14 | val ssTableSizeInMB: Long = 15 | if (config.hasPath("sstable_size_in_mb")) 16 | config.getLong("sstable_size_in_mb") 17 | else 160 18 | 19 | require(ssTableSizeInMB > 0, s"sstable_size_in_mb must be larger than 0, but was $ssTableSizeInMB") 20 | 21 | override def asCQL: String = 22 | s"""{ 23 | |'class' : '${LeveledCompactionStrategy.ClassName}', 24 | |${super.asCQL}, 25 | |'sstable_size_in_mb' : $ssTableSizeInMB 26 | |} 27 | """.stripMargin.trim 28 | } 29 | 30 | object LeveledCompactionStrategy extends CassandraCompactionStrategyConfig[LeveledCompactionStrategy] { 31 | override val ClassName: String = "LeveledCompactionStrategy" 32 | 33 | override def propertyKeys: List[String] = 34 | (BaseCompactionStrategy.propertyKeys ++ List("sstable_size_in_mb")).sorted 35 | 36 | override def fromConfig(config: Config): LeveledCompactionStrategy = 37 | new LeveledCompactionStrategy(config) 38 | } 39 | -------------------------------------------------------------------------------- /docs/src/test/scala/doc/cleanup/CleanupDocExample.scala: -------------------------------------------------------------------------------- 1 | package doc.cleanup 2 | 3 | import java.time.{ LocalDate, ZonedDateTime } 4 | import java.time.temporal.ChronoUnit 5 | 6 | import akka.actor.ActorSystem 7 | import akka.persistence.cassandra.cleanup.Cleanup 8 | import akka.persistence.cassandra.query.scaladsl.CassandraReadJournal 9 | import akka.persistence.query.PersistenceQuery 10 | 11 | object CleanupDocExample { 12 | 13 | implicit val system: ActorSystem = ??? 14 | 15 | //#cleanup 16 | val queries = PersistenceQuery(system).readJournalFor[CassandraReadJournal](CassandraReadJournal.Identifier) 17 | val cleanup = new Cleanup(system) 18 | 19 | // how many persistence ids to operate on in parallel 20 | val persistenceIdParallelism = 10 21 | 22 | // forall persistence ids, keep two snapshots and delete all events before the oldest kept snapshot 23 | queries.currentPersistenceIds().mapAsync(persistenceIdParallelism)(pid => cleanup.cleanupBeforeSnapshot(pid, 2)).run() 24 | 25 | // forall persistence ids, keep everything after the provided unix timestamp, if there aren't enough snapshots after this time 26 | // go back before the timestamp to find snapshot to delete before 27 | // this operation is more expensive that the one above 28 | val keepAfter = ZonedDateTime.now().minus(1, ChronoUnit.MONTHS); 29 | queries 30 | .currentPersistenceIds() 31 | .mapAsync(persistenceIdParallelism)(pid => cleanup.cleanupBeforeSnapshot(pid, 2, keepAfter.toInstant.toEpochMilli)) 32 | .run() 33 | 34 | //#cleanup 35 | 36 | } 37 | -------------------------------------------------------------------------------- /example/src/main/resources/common.conf: -------------------------------------------------------------------------------- 1 | cassandra.example { 2 | 3 | # number of processors, will each take tags 1/n of the tags 4 | processors = 10 5 | 6 | # number of tags will be processors * this 7 | tags-per-processor = 5 8 | 9 | # number of persistence ids 10 | persistence-ids = 1000 11 | 12 | # load tick duration 13 | load-tick-duration = 1s 14 | 15 | } 16 | 17 | datastax-java-driver { 18 | advanced { 19 | # reconnect to c* if down when app is started 20 | reconnect-on-init = true 21 | } 22 | } 23 | 24 | akka { 25 | loglevel = DEBUG 26 | actor { 27 | provider = "cluster" 28 | serialization-bindings { 29 | "akka.persistence.cassandra.example.CborSerializable" = jackson-cbor 30 | } 31 | } 32 | 33 | persistence.journal { 34 | plugin = "akka.persistence.cassandra.journal" 35 | # to create the schema 36 | auto-start-journals = ["akka.persistence.cassandra.journal"] 37 | } 38 | persistence.snapshot-store.plugin = "akka.persistence.cassandra.snapshot" 39 | 40 | persistence { 41 | cassandra { 42 | journal { 43 | keyspace-autocreate = true 44 | tables-autocreate = true 45 | } 46 | 47 | snapshot { 48 | keyspace-autocreate = true 49 | tables-autocreate = true 50 | } 51 | 52 | query { 53 | refresh-interval = 2s 54 | } 55 | 56 | events-by-tag { 57 | // for lower latency 58 | eventual-consistency-delay = 25ms 59 | flush-interval = 25ms 60 | pubsub-notification = on 61 | } 62 | } 63 | } 64 | } 65 | -------------------------------------------------------------------------------- /core/src/test/scala/akka/persistence/cassandra/reconciler/RebuildAllPersisetceIdsSpec.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2016-2025 Lightbend Inc. 3 | */ 4 | 5 | package akka.persistence.cassandra.reconciler 6 | 7 | import akka.persistence.cassandra.CassandraSpec 8 | import akka.stream.scaladsl.Sink 9 | import org.scalatest.concurrent.Eventually 10 | 11 | class RebuildAllPersisetceIdsSpec extends CassandraSpec with Eventually { 12 | 13 | "RebuildAllPersisetceIds" should { 14 | 15 | val tag1 = "tag1" 16 | val pid1 = "pid1" 17 | val pid2 = "pid2" 18 | val pid3 = "pid3" 19 | val pid4 = "pid4" 20 | val pid5 = "pid5" 21 | 22 | "build from messages table" in { 23 | writeEventsFor(tag1, pid1, 2) 24 | writeEventsFor(tag1, pid2, 1) 25 | writeEventsFor(tag1, pid3, 5) 26 | 27 | val reconciliation = new Reconciliation(system) 28 | reconciliation.rebuildAllPersistenceIds().futureValue 29 | 30 | queries 31 | .currentPersistenceIds() 32 | .runWith(Sink.seq) 33 | .futureValue 34 | .toSet 35 | .filterNot(_.startsWith("persistenceInit")) should ===(Set(pid1, pid2, pid3)) 36 | 37 | // add some more 38 | writeEventsFor(tag1, pid4, 2) 39 | writeEventsFor(tag1, pid5, 4) 40 | 41 | reconciliation.rebuildAllPersistenceIds().futureValue 42 | 43 | queries 44 | .currentPersistenceIds() 45 | .runWith(Sink.seq) 46 | .futureValue 47 | .toSet 48 | .filterNot(_.startsWith("persistenceInit")) should ===(Set(pid1, pid2, pid3, pid4, pid5)) 49 | } 50 | } 51 | } 52 | -------------------------------------------------------------------------------- /.github/workflows/link-validator.yml: -------------------------------------------------------------------------------- 1 | name: Link Validator 2 | 3 | on: 4 | workflow_dispatch: 5 | pull_request: 6 | schedule: 7 | - cron: '0 5 1 * *' 8 | 9 | permissions: 10 | contents: read 11 | 12 | jobs: 13 | documentation: 14 | name: ScalaDoc, Documentation with Paradox 15 | if: github.event.repository.fork == false 16 | runs-on: Akka-Default 17 | env: 18 | JAVA_OPTS: -Xms2G -Xmx2G -Xss2M -XX:ReservedCodeCacheSize=256M -Dfile.encoding=UTF-8 19 | 20 | steps: 21 | - name: Checkout 22 | # https://github.com/actions/checkout/releases 23 | # v4.1.1 24 | uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 25 | with: # https://github.com/olafurpg/setup-scala#faster-checkout-of-big-repos 26 | fetch-depth: 100 27 | 28 | - name: Fetch tags 29 | run: git fetch --depth=100 origin +refs/tags/*:refs/tags/* 30 | 31 | - name: Set up JDK 25 32 | # https://github.com/coursier/setup-action/releases 33 | # v1.3.5 34 | uses: coursier/setup-action@7bde40eee928896f074dbb76d22dd772eed5c65f 35 | with: 36 | jvm: temurin:1.25 37 | 38 | - name: Cache Coursier cache 39 | # https://github.com/coursier/cache-action/releases 40 | # v6.4.5 41 | uses: coursier/cache-action@1ff273bff02a8787bc9f1877d347948af647956d 42 | 43 | - name: "Create all API docs and create site with Paradox" 44 | run: sbt "unidoc; docs/makeSite" 45 | 46 | - name: Run Link Validator 47 | run: cs launch net.runne::site-link-validator:0.2.5 -- project/link-validator.conf 48 | 49 | -------------------------------------------------------------------------------- /core/src/test/scala/akka/persistence/cassandra/reconciler/TagQuerySpec.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2016-2025 Lightbend Inc. 3 | */ 4 | 5 | package akka.persistence.cassandra.reconciler 6 | 7 | import akka.persistence.cassandra.CassandraSpec 8 | import akka.stream.scaladsl.Sink 9 | import org.scalatest.concurrent.Eventually 10 | 11 | class TagQuerySpec extends CassandraSpec with Eventually { 12 | 13 | private lazy val reconciliation = new Reconciliation(system) 14 | 15 | "Tag querying" should { 16 | "return distinct tags for all tags" in { 17 | val pid1 = "pid1" 18 | val pid2 = "pid2" 19 | val tag1 = "tag1" 20 | val tag2 = "tag2" 21 | val tag3 = "tag3" 22 | reconciliation.allTags().runWith(Sink.seq).futureValue shouldEqual Nil 23 | writeEventsFor(Set(tag1, tag2), pid1, 3) 24 | writeEventsFor(Set(tag2, tag3), pid2, 3) 25 | eventually { 26 | val allTags = reconciliation.allTags().runWith(Sink.seq).futureValue 27 | allTags.size shouldEqual 3 28 | allTags.toSet shouldEqual Set(tag1, tag2, tag3) 29 | } 30 | } 31 | 32 | "return tags only if that pid has used them" in { 33 | val pid1 = "p11" 34 | val pid2 = "p12" 35 | val tag1 = "tag11" 36 | val tag2 = "tag12" 37 | val tag3 = "tag13" 38 | writeEventsFor(tag1, pid1, 3) 39 | writeEventsFor(Set(tag2, tag3), pid2, 3) 40 | eventually { 41 | val tags = reconciliation.tagsForPersistenceId(pid2).futureValue 42 | tags.size shouldEqual 2 43 | tags.toSet shouldEqual Set(tag2, tag3) 44 | } 45 | } 46 | } 47 | 48 | } 49 | -------------------------------------------------------------------------------- /core/src/test/scala/akka/persistence/cassandra/EventsByTagCrashSpec.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2016-2025 Lightbend Inc. 3 | */ 4 | 5 | package akka.persistence.cassandra 6 | 7 | import akka.NotUsed 8 | import akka.persistence.cassandra.TestTaggingActor.{ Ack, Crash } 9 | import akka.persistence.query.{ EventEnvelope, NoOffset } 10 | import akka.stream.scaladsl.Source 11 | import akka.stream.testkit.scaladsl.TestSink 12 | 13 | import scala.concurrent.duration._ 14 | 15 | class EventsByTagCrashSpec extends CassandraSpec(EventsByTagRestartSpec.config) { 16 | 17 | val waitTime = 100.milliseconds 18 | 19 | "EventsByTag" must { 20 | 21 | "should handle crashes of the persistent actor" in { 22 | // crash the actor many times, persist 5 events each time 23 | val crashEvery = 5 24 | val crashNr = 20 25 | val msgs = crashEvery * crashNr 26 | val p2 = system.actorOf(TestTaggingActor.props("p2", Set("blue"))) 27 | (1 to msgs).foreach { cn => 28 | if (cn % crashEvery == 0) { 29 | p2 ! Crash 30 | } 31 | val msg = s"msg $cn" 32 | p2 ! msg 33 | expectMsg(Ack) 34 | } 35 | val blueTags: Source[EventEnvelope, NotUsed] = queryJournal.eventsByTag(tag = "blue", offset = NoOffset) 36 | val tagProbe = blueTags.runWith(TestSink.probe[EventEnvelope](system)) 37 | (1L to msgs).foreach { m => 38 | val expected = s"msg $m" 39 | tagProbe.request(1) 40 | tagProbe.expectNext().event shouldEqual expected 41 | } 42 | tagProbe.expectNoMessage(250.millis) 43 | tagProbe.cancel() 44 | 45 | } 46 | } 47 | } 48 | -------------------------------------------------------------------------------- /example/src/main/scala/akka/persistence/cassandra/example/ConfigurablePersistentActor.scala: -------------------------------------------------------------------------------- 1 | package akka.persistence.cassandra.example 2 | 3 | import akka.actor.typed.scaladsl.Behaviors 4 | import akka.actor.typed.{ ActorRef, ActorSystem, Behavior } 5 | import akka.cluster.sharding.typed.ShardingEnvelope 6 | import akka.cluster.sharding.typed.scaladsl.{ ClusterSharding, Entity, EntityTypeKey } 7 | import akka.persistence.typed.PersistenceId 8 | import akka.persistence.typed.scaladsl.{ Effect, EventSourcedBehavior } 9 | 10 | object ConfigurablePersistentActor { 11 | 12 | case class Settings(nrTags: Int) 13 | 14 | val Key: EntityTypeKey[Event] = EntityTypeKey[Event]("configurable") 15 | 16 | def init(settings: Settings, system: ActorSystem[_]): ActorRef[ShardingEnvelope[Event]] = { 17 | ClusterSharding(system).init(Entity(Key)(ctx => apply(settings, ctx.entityId)).withRole("write")) 18 | } 19 | 20 | final case class Event(timeCreated: Long = System.currentTimeMillis()) extends CborSerializable 21 | 22 | final case class State(eventsProcessed: Long) extends CborSerializable 23 | 24 | def apply(settings: Settings, persistenceId: String): Behavior[Event] = 25 | Behaviors.setup { ctx => 26 | EventSourcedBehavior[Event, Event, State]( 27 | persistenceId = PersistenceId.ofUniqueId(persistenceId), 28 | State(0), 29 | (_, event) => { 30 | ctx.log.info("persisting event {}", event) 31 | Effect.persist(event) 32 | }, 33 | (state, _) => state.copy(eventsProcessed = state.eventsProcessed + 1)).withTagger(event => 34 | Set("tag-" + math.abs(event.hashCode() % settings.nrTags))) 35 | } 36 | 37 | } 38 | -------------------------------------------------------------------------------- /core/src/main/scala/akka/persistence/cassandra/query/CassandraReadStatements.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2016-2025 Lightbend Inc. 3 | */ 4 | 5 | package akka.persistence.cassandra.query 6 | 7 | import akka.annotation.InternalApi 8 | import akka.persistence.cassandra.PluginSettings 9 | 10 | /** 11 | * INTERNAL API 12 | */ 13 | @InternalApi private[akka] trait CassandraReadStatements { 14 | 15 | def settings: PluginSettings 16 | private def journalSettings = settings.journalSettings 17 | private def eventsByTagSettings = settings.eventsByTagSettings 18 | 19 | private def tableName = s"${journalSettings.keyspace}.${journalSettings.table}" 20 | private def tagViewTableName = s"${journalSettings.keyspace}.${eventsByTagSettings.tagTable.name}" 21 | private def allPersistenceIdsTableName = s"${journalSettings.keyspace}.${journalSettings.allPersistenceIdsTable}" 22 | 23 | def selectAllPersistenceIds = 24 | s""" 25 | SELECT persistence_id FROM $allPersistenceIdsTableName 26 | """ 27 | 28 | def selectDistinctPersistenceIds = 29 | s""" 30 | SELECT DISTINCT persistence_id, partition_nr FROM $tableName 31 | """ 32 | 33 | def selectEventsFromTagViewWithUpperBound = 34 | s""" 35 | SELECT * FROM $tagViewTableName WHERE 36 | tag_name = ? AND 37 | timebucket = ? AND 38 | timestamp > ? AND 39 | timestamp < ? 40 | ORDER BY timestamp ASC 41 | """.stripMargin 42 | 43 | def selectTagSequenceNrs = 44 | s""" 45 | SELECT persistence_id, tag_pid_sequence_nr, timestamp 46 | FROM $tagViewTableName WHERE 47 | tag_name = ? AND 48 | timebucket = ? AND 49 | timestamp > ? AND 50 | timestamp <= ?""" 51 | } 52 | -------------------------------------------------------------------------------- /core/src/test/scala/akka/persistence/cassandra/reconciler/TruncateAllSpec.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2016-2025 Lightbend Inc. 3 | */ 4 | 5 | package akka.persistence.cassandra.reconciler 6 | 7 | import akka.persistence.cassandra.CassandraSpec 8 | import org.scalatest.concurrent.Eventually 9 | import akka.persistence.cassandra.TestTaggingActor 10 | 11 | class TruncateAllSpec extends CassandraSpec with Eventually { 12 | 13 | val pid1 = "pid1" 14 | val pid2 = "pid2" 15 | val tag1 = "tag1" 16 | val tag2 = "tag2" 17 | val tag3 = "tag3" 18 | 19 | "Truncate " should { 20 | "remove all tags from the table" in { 21 | writeEventsFor(tag1, pid1, 2) 22 | writeEventsFor(Set(tag1, tag2, tag3), pid2, 2) 23 | 24 | expectEventsForTag(tag1, "pid1 event-1", "pid1 event-2", "pid2 event-1", "pid2 event-2") 25 | expectEventsForTag(tag2, "pid2 event-1", "pid2 event-2") 26 | expectEventsForTag(tag3, "pid2 event-1", "pid2 event-2") 27 | 28 | val reconciliation = new Reconciliation(system) 29 | reconciliation.truncateTagView().futureValue 30 | 31 | eventually { 32 | expectEventsForTag(tag1) 33 | expectEventsForTag(tag2) 34 | expectEventsForTag(tag3) 35 | } 36 | } 37 | 38 | "recover if actors are started again" in { 39 | system.actorOf(TestTaggingActor.props(pid1)) 40 | system.actorOf(TestTaggingActor.props(pid2)) 41 | eventually { 42 | expectEventsForTag(tag1, "pid1 event-1", "pid1 event-2", "pid2 event-1", "pid2 event-2") 43 | expectEventsForTag(tag2, "pid2 event-1", "pid2 event-2") 44 | expectEventsForTag(tag3, "pid2 event-1", "pid2 event-2") 45 | } 46 | 47 | } 48 | } 49 | 50 | } 51 | -------------------------------------------------------------------------------- /core/src/test/scala/akka/persistence/cassandra/RetriesSpec.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2016-2025 Lightbend Inc. 3 | */ 4 | 5 | package akka.persistence.cassandra 6 | 7 | import scala.concurrent.{ ExecutionContext, Future } 8 | import scala.concurrent.duration._ 9 | import akka.actor.{ ActorSystem, Scheduler } 10 | import akka.testkit.TestKit 11 | import akka.testkit.TestProbe 12 | import org.scalatest.BeforeAndAfterAll 13 | import org.scalatest.concurrent.ScalaFutures 14 | import org.scalatest.matchers.should.Matchers 15 | import org.scalatest.wordspec.AnyWordSpecLike 16 | 17 | class RetriesSpec 18 | extends TestKit(ActorSystem("RetriesSpec")) 19 | with AnyWordSpecLike 20 | with ScalaFutures 21 | with BeforeAndAfterAll 22 | with Matchers { 23 | implicit val scheduler: Scheduler = system.scheduler 24 | implicit val ec: ExecutionContext = system.dispatcher 25 | "Retries" should { 26 | "retry N number of times" in { 27 | val failProbe = TestProbe() 28 | @volatile var called = 0 29 | val result = Retries 30 | .retry(() => { 31 | called += 1 32 | Future.failed(new RuntimeException(s"cats $called")) 33 | }, 3, (_, exc, _) => failProbe.ref ! exc, 1.milli, 2.millis, 0.1) 34 | .failed 35 | .futureValue 36 | called shouldEqual 3 37 | result.getMessage shouldEqual "cats 3" 38 | failProbe.expectMsgType[RuntimeException].getMessage shouldEqual "cats 1" 39 | failProbe.expectMsgType[RuntimeException].getMessage shouldEqual "cats 2" 40 | failProbe.expectNoMessage() 41 | } 42 | } 43 | 44 | override protected def afterAll(): Unit = { 45 | super.afterAll() 46 | TestKit.shutdownActorSystem(system) 47 | } 48 | } 49 | -------------------------------------------------------------------------------- /core/src/main/scala/akka/persistence/cassandra/snapshot/SnapshotSettings.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2016-2025 Lightbend Inc. 3 | */ 4 | 5 | package akka.persistence.cassandra.snapshot 6 | 7 | import akka.actor.ActorSystem 8 | import akka.annotation.InternalApi 9 | import akka.persistence.cassandra.PluginSettings.getReplicationStrategy 10 | import akka.persistence.cassandra.compaction.CassandraCompactionStrategy 11 | import akka.persistence.cassandra.getListFromConfig 12 | import com.typesafe.config.Config 13 | 14 | /** INTERNAL API */ 15 | @InternalApi private[akka] class SnapshotSettings(system: ActorSystem, config: Config) { 16 | private val snapshotConfig = config.getConfig("snapshot") 17 | 18 | val writeProfile: String = snapshotConfig.getString("write-profile") 19 | val readProfile: String = snapshotConfig.getString("read-profile") 20 | 21 | val keyspaceAutoCreate: Boolean = snapshotConfig.getBoolean("keyspace-autocreate") 22 | val tablesAutoCreate: Boolean = snapshotConfig.getBoolean("tables-autocreate") 23 | 24 | val keyspace: String = snapshotConfig.getString("keyspace") 25 | 26 | val table: String = snapshotConfig.getString("table") 27 | 28 | val tableCompactionStrategy: CassandraCompactionStrategy = 29 | CassandraCompactionStrategy(snapshotConfig.getConfig("table-compaction-strategy")) 30 | 31 | val replicationStrategy: String = getReplicationStrategy( 32 | snapshotConfig.getString("replication-strategy"), 33 | snapshotConfig.getInt("replication-factor"), 34 | getListFromConfig(snapshotConfig, "data-center-replication-factors")) 35 | 36 | val gcGraceSeconds: Long = snapshotConfig.getLong("gc-grace-seconds") 37 | 38 | val maxLoadAttempts: Int = snapshotConfig.getInt("max-load-attempts") 39 | 40 | } 41 | -------------------------------------------------------------------------------- /core/src/main/scala/akka/persistence/cassandra/healthcheck/CassandraHealthCheck.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2016-2025 Lightbend Inc. 3 | */ 4 | 5 | package akka.persistence.cassandra.healthcheck 6 | 7 | import akka.actor.ActorSystem 8 | import akka.event.Logging 9 | import akka.pattern.{ ask, AskTimeoutException } 10 | import akka.persistence.Persistence 11 | import akka.persistence.cassandra.PluginSettings 12 | import akka.persistence.cassandra.journal.CassandraJournal.HealthCheckQuery 13 | import akka.util.Timeout 14 | 15 | import scala.concurrent.{ ExecutionContextExecutor, Future } 16 | import scala.util.control.NonFatal 17 | 18 | final class CassandraHealthCheck(system: ActorSystem) extends (() => Future[Boolean]) { 19 | 20 | private val log = Logging.getLogger(system, getClass) 21 | 22 | private val settings = new PluginSettings(system, system.settings.config.getConfig("akka.persistence.cassandra")) 23 | private val healthCheckSettings = settings.healthCheckSettings 24 | private val journalPluginId = s"${healthCheckSettings.pluginLocation}.journal" 25 | private val journalRef = Persistence(system).journalFor(journalPluginId) 26 | 27 | private implicit val ec: ExecutionContextExecutor = system.dispatchers.lookup(s"$journalPluginId.plugin-dispatcher") 28 | private implicit val timeout: Timeout = healthCheckSettings.timeout 29 | 30 | override def apply(): Future[Boolean] = { 31 | (journalRef ? HealthCheckQuery).map(_ => true).recoverWith { 32 | case _: AskTimeoutException => 33 | log.warning("Failed to execute health check due to ask timeout") 34 | Future.successful(false) 35 | case NonFatal(e) => 36 | log.warning("Failed to execute health check due to: {}", e) 37 | Future.successful(false) 38 | } 39 | } 40 | } 41 | -------------------------------------------------------------------------------- /core/src/test/scala/akka/persistence/cassandra/query/TestActor.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2016-2025 Lightbend Inc. 3 | */ 4 | 5 | package akka.persistence.cassandra.query 6 | 7 | import scala.collection.immutable 8 | import akka.actor.Props 9 | import akka.persistence.PersistentActor 10 | import akka.actor.ActorRef 11 | import akka.persistence.DeleteMessagesSuccess 12 | import akka.persistence.journal.Tagged 13 | 14 | object TestActor { 15 | def props(persistenceId: String, journalId: String = "akka.persistence.cassandra.journal"): Props = 16 | Props(new TestActor(persistenceId, journalId)) 17 | 18 | final case class PersistAll(events: immutable.Seq[String]) 19 | final case class DeleteTo(seqNr: Long) 20 | } 21 | 22 | class TestActor(override val persistenceId: String, override val journalPluginId: String) extends PersistentActor { 23 | 24 | var lastDelete: ActorRef = _ 25 | 26 | val receiveRecover: Receive = { 27 | case evt: String => 28 | } 29 | 30 | val receiveCommand: Receive = { 31 | case cmd: String => 32 | persist(cmd) { evt => 33 | sender() ! evt + "-done" 34 | } 35 | case cmd: Tagged => 36 | persist(cmd) { evt => 37 | val msg = s"${evt.payload}-done" 38 | sender() ! msg 39 | } 40 | 41 | case TestActor.PersistAll(events) => 42 | val size = events.size 43 | val handler = { 44 | var count = 0 45 | (evt: String) => { 46 | count += 1 47 | if (count == size) 48 | sender() ! "PersistAll-done" 49 | } 50 | } 51 | persistAll(events)(handler) 52 | 53 | case TestActor.DeleteTo(seqNr) => 54 | lastDelete = sender() 55 | deleteMessages(seqNr) 56 | 57 | case d: DeleteMessagesSuccess => 58 | lastDelete ! d 59 | } 60 | } 61 | -------------------------------------------------------------------------------- /core/src/main/scala/akka/persistence/cassandra/query/package.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2016-2025 Lightbend Inc. 3 | */ 4 | 5 | package akka.persistence.cassandra 6 | 7 | import java.time.format.DateTimeFormatter 8 | import java.time.{ LocalDateTime, ZoneId, ZoneOffset } 9 | import java.util.UUID 10 | 11 | import akka.annotation.InternalApi 12 | import com.datastax.oss.driver.api.core.cql.AsyncResultSet 13 | import com.datastax.oss.driver.api.core.uuid.Uuids 14 | 15 | package object query { 16 | 17 | /** INTERNAL API */ 18 | @InternalApi private[akka] val firstBucketFormat = "yyyyMMdd'T'HH:mm" 19 | 20 | /** INTERNAL API */ 21 | @InternalApi private[akka] val firstBucketFormatter: DateTimeFormatter = 22 | DateTimeFormatter.ofPattern(firstBucketFormat).withZone(ZoneOffset.UTC) 23 | 24 | /** INTERNAL API */ 25 | @InternalApi private[akka] def isExhausted(rs: AsyncResultSet): Boolean = { 26 | rs.remaining() == 0 && !rs.hasMorePages 27 | } 28 | 29 | /** INTERNAL API */ 30 | @InternalApi private[akka] def uuid(timestamp: Long): UUID = { 31 | def makeMsb(time: Long): Long = { 32 | // copied from Uuids.makeMsb 33 | // UUID v1 timestamp must be in 100-nanoseconds interval since 00:00:00.000 15 Oct 1582. 34 | val uuidEpoch = LocalDateTime.of(1582, 10, 15, 0, 0).atZone(ZoneId.of("GMT-0")).toInstant.toEpochMilli 35 | val timestamp = (time - uuidEpoch) * 10000 36 | 37 | var msb = 0L 38 | msb |= (0X00000000FFFFFFFFL & timestamp) << 32 39 | msb |= (0X0000FFFF00000000L & timestamp) >>> 16 40 | msb |= (0X0FFF000000000000L & timestamp) >>> 48 41 | msb |= 0X0000000000001000L // sets the version to 1. 42 | msb 43 | } 44 | 45 | val now = Uuids.timeBased() 46 | new UUID(makeMsb(timestamp), now.getLeastSignificantBits) 47 | } 48 | } 49 | -------------------------------------------------------------------------------- /core/src/test/scala/akka/persistence/cassandra/reconciler/DeleteTagViewForPersistenceIdSpec.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2016-2025 Lightbend Inc. 3 | */ 4 | 5 | package akka.persistence.cassandra.reconciler 6 | 7 | import akka.persistence.cassandra.CassandraSpec 8 | import akka.persistence.cassandra.TestTaggingActor 9 | import akka.testkit.TestProbe 10 | import akka.persistence.RecoveryCompleted 11 | 12 | /** 13 | * These tests depend on the output of each other, can't be run separately 14 | */ 15 | class DeleteTagViewForPersistenceIdSpec extends CassandraSpec { 16 | 17 | "Deleting " should { 18 | val tag = "tag1" 19 | val pid1 = "p1" 20 | val pid2 = "p2" 21 | 22 | "only delete for the provided persistence id" in { 23 | writeEventsFor(tag, pid1, 3) 24 | writeEventsFor(tag, pid2, 3) 25 | 26 | eventsByTag(tag) 27 | .request(10) 28 | .expectNextN(List("p1 event-1", "p1 event-2", "p1 event-3", "p2 event-1", "p2 event-2", "p2 event-3")) 29 | .expectNoMessage() 30 | .cancel() 31 | val reconciliation = new Reconciliation(system) 32 | reconciliation.deleteTagViewForPersistenceIds(Set(pid2), tag).futureValue 33 | eventsByTag(tag).request(5).expectNextN(List("p1 event-1", "p1 event-2", "p1 event-3")).expectNoMessage().cancel() 34 | 35 | } 36 | 37 | "recover the tagged events if persistence id is started again" in { 38 | val probe = TestProbe() 39 | system.actorOf(TestTaggingActor.props(pid2, Set(tag), Some(probe.ref))) 40 | probe.expectMsg(RecoveryCompleted) 41 | eventsByTag(tag) 42 | .request(10) 43 | .expectNextN(List("p1 event-1", "p1 event-2", "p1 event-3", "p2 event-1", "p2 event-2", "p2 event-3")) 44 | .expectNoMessage() 45 | .cancel() 46 | } 47 | 48 | } 49 | 50 | } 51 | -------------------------------------------------------------------------------- /example/src/main/scala/akka/persistence/cassandra/example/LoadGenerator.scala: -------------------------------------------------------------------------------- 1 | package akka.persistence.cassandra.example 2 | 3 | import akka.actor.typed.{ ActorRef, Behavior } 4 | import akka.actor.typed.scaladsl.Behaviors 5 | import akka.cluster.sharding.typed.ShardingEnvelope 6 | import com.typesafe.config.Config 7 | 8 | import scala.concurrent.duration.FiniteDuration 9 | import scala.jdk.DurationConverters._ 10 | import scala.util.Random 11 | 12 | object LoadGenerator { 13 | 14 | object Settings { 15 | def apply(config: Config): Settings = { 16 | Settings(config.getInt("persistence-ids"), config.getDuration("load-tick-duration").toScala) 17 | } 18 | } 19 | 20 | case class Settings(nrPersistenceIds: Int, tickDuration: FiniteDuration) 21 | 22 | sealed trait Command 23 | final case class Start(duration: FiniteDuration) extends Command 24 | final case class Tick() extends Command 25 | private case object Stop extends Command 26 | 27 | def apply( 28 | settings: Settings, 29 | ref: ActorRef[ShardingEnvelope[ConfigurablePersistentActor.Event]]): Behavior[Command] = { 30 | Behaviors.withTimers { timers => 31 | Behaviors.setup { ctx => 32 | Behaviors.receiveMessage { 33 | case Start(duration) => 34 | ctx.log.info("Starting...") 35 | timers.startTimerAtFixedRate(Tick(), settings.tickDuration) 36 | timers.startSingleTimer(Stop, duration) 37 | Behaviors.same 38 | case Tick() => 39 | ctx.log.info("Sending event") 40 | ref ! ShardingEnvelope( 41 | s"p${Random.nextInt(settings.nrPersistenceIds)}", 42 | ConfigurablePersistentActor.Event()) 43 | Behaviors.same 44 | case Stop => 45 | Behaviors.same 46 | } 47 | } 48 | } 49 | } 50 | } 51 | -------------------------------------------------------------------------------- /core/src/test/scala/akka/persistence/cassandra/journal/TagScanningSpec.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2016-2025 Lightbend Inc. 3 | */ 4 | 5 | package akka.persistence.cassandra.journal 6 | 7 | import akka.actor._ 8 | import akka.persistence.cassandra.{ CassandraLifecycle, CassandraSpec, TestTaggingActor } 9 | import com.typesafe.config.ConfigFactory 10 | 11 | object TagScanningSpec { 12 | val config = ConfigFactory.parseString(s""" 13 | akka.persistence.cassandra.events-by-tag.enabled = on 14 | akka.persistence.cassandra.events-by-tag.scanning-flush-interval = 2s 15 | akka.persistence.cassandra.journal.replay-filter.mode = off 16 | akka.persistence.cassandra.log-queries = off 17 | """).withFallback(CassandraLifecycle.config) 18 | } 19 | 20 | class TagScanningSpec extends CassandraSpec(TagScanningSpec.config) { 21 | 22 | "Tag writing" must { 23 | "complete writes to tag scanning for many persistent actors" in { 24 | val nrActors = 25 25 | (0 until nrActors).foreach { i => 26 | val ref = system.actorOf(TestTaggingActor.props(s"$i")) 27 | ref ! "msg" 28 | } 29 | 30 | awaitAssert { 31 | import scala.jdk.CollectionConverters._ 32 | val expected = (0 until nrActors).map(n => (s"$n".toInt, 1L)).toList 33 | val scanning = cluster 34 | .execute(s"select * from ${journalName}.tag_scanning") 35 | .all() 36 | .asScala 37 | .toList 38 | .map(row => (row.getString("persistence_id"), row.getLong("sequence_nr"))) 39 | .filterNot(_._1.startsWith("persistenceInit")) 40 | .map { case (pid, seqNr) => (pid.toInt, seqNr) } // sorting by pid makes the failure message easy to interpret 41 | .sortBy(_._1) 42 | scanning shouldEqual expected 43 | } 44 | } 45 | } 46 | } 47 | -------------------------------------------------------------------------------- /docs/src/test/java/jdoc/cleanup/CleanupDocExample.java: -------------------------------------------------------------------------------- 1 | package jdoc.cleanup; 2 | 3 | import akka.Done; 4 | import akka.actor.ActorSystem; 5 | import akka.persistence.cassandra.cleanup.Cleanup; 6 | import akka.persistence.cassandra.query.javadsl.CassandraReadJournal; 7 | import akka.persistence.query.PersistenceQuery; 8 | 9 | import scala.jdk.javaapi.FutureConverters; 10 | 11 | import java.time.ZonedDateTime; 12 | import java.time.temporal.ChronoUnit; 13 | 14 | public class CleanupDocExample { 15 | 16 | 17 | 18 | public static void example() { 19 | 20 | ActorSystem system = null; 21 | 22 | //#cleanup 23 | CassandraReadJournal queries = PersistenceQuery.get(system).getReadJournalFor(CassandraReadJournal.class, CassandraReadJournal.Identifier()); 24 | Cleanup cleanup = new Cleanup(system); 25 | 26 | int persistenceIdParallelism = 10; 27 | 28 | 29 | // forall persistence ids, keep two snapshots and delete all events before the oldest kept snapshot 30 | queries.currentPersistenceIds().mapAsync(persistenceIdParallelism, pid -> FutureConverters.asJava(cleanup.cleanupBeforeSnapshot(pid, 2))).run(system); 31 | 32 | // forall persistence ids, keep everything after the provided unix timestamp, if there aren't enough snapshots after this time 33 | // go back before the timestamp to find snapshot to delete before 34 | // this operation is more expensive that the one above 35 | ZonedDateTime keepAfter = ZonedDateTime.now().minus(1, ChronoUnit.MONTHS); 36 | queries 37 | .currentPersistenceIds() 38 | .mapAsync(persistenceIdParallelism, pid -> FutureConverters.asJava(cleanup.cleanupBeforeSnapshot(pid, 2, keepAfter.toInstant().toEpochMilli()))) 39 | .run(system); 40 | 41 | //#cleanup 42 | 43 | 44 | } 45 | } 46 | -------------------------------------------------------------------------------- /core/src/main/resources/test-embedded-cassandra.yaml: -------------------------------------------------------------------------------- 1 | # Cassandra storage config YAML 2 | # Reference: http://docs.datastax.com/en/cassandra/3.x/cassandra/configuration/configCassandra_yaml.html 3 | 4 | # NOTE: 5 | # See http://wiki.apache.org/cassandra/StorageConfiguration for 6 | # full explanations of configuration directives 7 | # /NOTE 8 | 9 | # The name of the cluster. This is mainly used to prevent machines in 10 | # one logical cluster from joining another. 11 | cluster_name: 'Test Cluster' 12 | 13 | # directories where Cassandra should store data on disk. 14 | data_file_directories: 15 | - $DIR/data 16 | 17 | # commit log 18 | commitlog_directory: $DIR/commitlog 19 | 20 | # saved caches 21 | saved_caches_directory: $DIR/saved_caches 22 | 23 | hints_directory: $DIR/hints 24 | 25 | cdc_raw_directory: $DIR/cdc_raw 26 | 27 | commitlog_sync: periodic 28 | commitlog_sync_period_in_ms: 10000 29 | 30 | partitioner: org.apache.cassandra.dht.Murmur3Partitioner 31 | 32 | endpoint_snitch: SimpleSnitch 33 | 34 | listen_address: $HOST 35 | 36 | start_native_transport: true 37 | # port for the CQL native transport to listen for clients on 38 | native_transport_port: $PORT 39 | 40 | # TCP port, for commands and data 41 | storage_port: $STORAGE_PORT 42 | 43 | # Whether to start the thrift rpc server. 44 | start_rpc: false 45 | 46 | 47 | seed_provider: 48 | - class_name: org.apache.cassandra.locator.SimpleSeedProvider 49 | parameters: 50 | - seeds: "$HOST" 51 | 52 | 53 | # The following settings were inspired by 54 | # http://opensourceconnections.com/blog/2013/08/31/building-the-perfect-cassandra-test-environment/ 55 | rpc_server_type: hsha 56 | concurrent_reads: 2 57 | concurrent_writes: 2 58 | rpc_min_threads: 1 59 | rpc_max_threads: 1 60 | concurrent_compactors: 1 61 | compaction_throughput_mb_per_sec: 0 62 | key_cache_size_in_mb: 0 63 | hinted_handoff_enabled: false 64 | -------------------------------------------------------------------------------- /core/src/main/scala/akka/persistence/cassandra/compaction/TimeWindowCompactionStrategy.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2016-2025 Lightbend Inc. 3 | */ 4 | 5 | package akka.persistence.cassandra.compaction 6 | 7 | import java.util.concurrent.TimeUnit 8 | 9 | import com.typesafe.config.Config 10 | 11 | class TimeWindowCompactionStrategy(config: Config) 12 | extends BaseCompactionStrategy( 13 | config, 14 | TimeWindowCompactionStrategy.ClassName, 15 | TimeWindowCompactionStrategy.propertyKeys) { 16 | import TimeWindowCompactionStrategy._ 17 | 18 | val compactionWindowUnit: TimeUnit = 19 | if (config.hasPath("compaction_window_unit")) 20 | TimeUnit.valueOf(config.getString("compaction_window_unit")) 21 | else TimeUnit.DAYS 22 | val compactionWindowSize: Int = 23 | if (config.hasPath("compaction_window_size")) 24 | config.getInt("compaction_window_size") 25 | else 1 26 | 27 | require( 28 | compactionWindowSize >= 1, 29 | s"compaction_window_size must be larger than or equal to 1, but was $compactionWindowSize") 30 | 31 | override def asCQL: String = 32 | s"""{ 33 | |'class' : '$ClassName', 34 | |${super.asCQL}, 35 | |'compaction_window_size' : $compactionWindowSize, 36 | |'compaction_window_unit' : '${compactionWindowUnit.toString.toUpperCase}' 37 | |} 38 | """.stripMargin.trim 39 | } 40 | 41 | object TimeWindowCompactionStrategy extends CassandraCompactionStrategyConfig[TimeWindowCompactionStrategy] { 42 | override val ClassName: String = "TimeWindowCompactionStrategy" 43 | 44 | override def propertyKeys: List[String] = 45 | (BaseCompactionStrategy.propertyKeys ++ List("compaction_window_size", "compaction_window_unit")).sorted 46 | 47 | override def fromConfig(config: Config): TimeWindowCompactionStrategy = 48 | new TimeWindowCompactionStrategy(config) 49 | } 50 | -------------------------------------------------------------------------------- /core/src/test/scala/akka/persistence/cassandra/TestTaggingActor.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2016-2025 Lightbend Inc. 3 | */ 4 | 5 | package akka.persistence.cassandra 6 | 7 | import akka.actor.{ ActorLogging, ActorRef, Props } 8 | import akka.persistence.cassandra.journal.TagWriterSpec.TestEx 9 | import akka.persistence.{ PersistentActor, RecoveryCompleted, SaveSnapshotSuccess } 10 | import akka.persistence.journal.Tagged 11 | 12 | object TestTaggingActor { 13 | case object Ack 14 | case object Crash 15 | case object DoASnapshotPlease 16 | case object SnapShotAck 17 | case object Stop 18 | 19 | def props(pId: String, tags: Set[String] = Set(), probe: Option[ActorRef] = None): Props = 20 | Props(new TestTaggingActor(pId, tags, probe)) 21 | } 22 | 23 | class TestTaggingActor(val persistenceId: String, tags: Set[String], probe: Option[ActorRef]) 24 | extends PersistentActor 25 | with ActorLogging { 26 | import TestTaggingActor._ 27 | 28 | def receiveRecover: Receive = { 29 | case RecoveryCompleted => 30 | probe.foreach(_ ! RecoveryCompleted) 31 | case _ => 32 | } 33 | 34 | def receiveCommand: Receive = normal 35 | 36 | def normal: Receive = { 37 | case event: String => 38 | log.debug("Persisting {}", event) 39 | persist(Tagged(event, tags)) { e => 40 | processEvent(e) 41 | sender() ! Ack 42 | } 43 | case Crash => 44 | throw TestEx("oh dear") 45 | case DoASnapshotPlease => 46 | saveSnapshot("i don't have any state :-/") 47 | context.become(waitingForSnapshot(sender())) 48 | case Stop => 49 | context.stop(self) 50 | 51 | } 52 | 53 | def waitingForSnapshot(who: ActorRef): Receive = { 54 | case SaveSnapshotSuccess(_) => 55 | who ! SnapShotAck 56 | context.become(normal) 57 | } 58 | 59 | def processEvent: Receive = { 60 | case _ => 61 | } 62 | } 63 | -------------------------------------------------------------------------------- /core/src/test/scala/akka/persistence/cassandra/journal/PubSubThrottlerSpec.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2016-2025 Lightbend Inc. 3 | */ 4 | 5 | package akka.persistence.cassandra.journal 6 | 7 | import scala.concurrent.duration.DurationInt 8 | 9 | import org.scalatest.wordspec.AnyWordSpecLike 10 | import org.scalatest.matchers.should.Matchers 11 | 12 | import akka.actor.{ ActorSystem, Props } 13 | import akka.testkit.{ TestKit, TestProbe } 14 | 15 | class PubSubThrottlerSpec 16 | extends TestKit(ActorSystem("CassandraConfigCheckerSpec")) 17 | with AnyWordSpecLike 18 | with Matchers { 19 | "PubSubThrottler" should { 20 | "eat up duplicate messages that arrive within the same [interval] window" in { 21 | val delegate = TestProbe() 22 | val throttler = system.actorOf(Props(new PubSubThrottler(delegate.ref, 5.seconds))) 23 | 24 | throttler ! "hello" 25 | throttler ! "hello" 26 | throttler ! "hello" 27 | delegate.within(2.seconds) { 28 | delegate.expectMsg("hello") 29 | } 30 | // Only first "hello" makes it through during the first interval. 31 | delegate.expectNoMessage(2.seconds) 32 | 33 | // Eventually, the interval will roll over and forward ONE further hello. 34 | delegate.expectMsg(10.seconds, "hello") 35 | delegate.expectNoMessage(2.seconds) 36 | 37 | throttler ! "hello" 38 | delegate.within(2.seconds) { 39 | delegate.expectMsg("hello") 40 | } 41 | } 42 | 43 | "allow differing messages to pass through within the same [interval] window" in { 44 | val delegate = TestProbe() 45 | val throttler = system.actorOf(Props(new PubSubThrottler(delegate.ref, 5.seconds))) 46 | throttler ! "hello" 47 | throttler ! "world" 48 | delegate.within(2.seconds) { 49 | delegate.expectMsg("hello") 50 | delegate.expectMsg("world") 51 | } 52 | } 53 | } 54 | } 55 | -------------------------------------------------------------------------------- /core/src/main/scala/akka/persistence/cassandra/journal/PubSubThrottler.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2016-2025 Lightbend Inc. 3 | */ 4 | 5 | package akka.persistence.cassandra.journal 6 | 7 | import scala.concurrent.duration.FiniteDuration 8 | import akka.actor.{ Actor, ActorRef } 9 | import akka.actor.Props 10 | import akka.annotation.InternalApi 11 | 12 | /** 13 | * INTERNAL API: Proxies messages to another actor, only allowing identical messages through once every [interval]. 14 | */ 15 | @InternalApi private[akka] class PubSubThrottler(delegate: ActorRef, interval: FiniteDuration) extends Actor { 16 | import PubSubThrottler._ 17 | import context.dispatcher 18 | 19 | /** The messages we've already seen during this interval */ 20 | val seen = collection.mutable.Set.empty[Any] 21 | 22 | /** The messages we've seen more than once during this interval, and their sender(s). */ 23 | val repeated = collection.mutable.Map.empty[Any, Set[ActorRef]].withDefaultValue(Set.empty) 24 | 25 | val timer = context.system.scheduler.scheduleWithFixedDelay(interval, interval, self, Tick) 26 | 27 | def receive = { 28 | case Tick => 29 | for ((msg, clients) <- repeated; 30 | client <- clients) { 31 | delegate.tell(msg, client) 32 | } 33 | seen.clear() 34 | repeated.clear() 35 | 36 | case msg => 37 | if (seen.contains(msg)) { 38 | repeated += (msg -> (repeated(msg) + sender())) 39 | } else { 40 | delegate.forward(msg) 41 | seen += msg 42 | } 43 | 44 | } 45 | 46 | override def postStop() = { 47 | timer.cancel() 48 | super.postStop() 49 | } 50 | } 51 | 52 | /** 53 | * INTERNAL API 54 | */ 55 | @InternalApi private[akka] object PubSubThrottler { 56 | 57 | def props(delegate: ActorRef, interval: FiniteDuration): Props = 58 | Props(new PubSubThrottler(delegate, interval)) 59 | 60 | private case object Tick 61 | 62 | } 63 | -------------------------------------------------------------------------------- /core/src/test/scala/akka/persistence/cassandra/journal/TimeBucketSpec.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2016-2025 Lightbend Inc. 3 | */ 4 | 5 | package akka.persistence.cassandra.journal 6 | 7 | import akka.persistence.cassandra.Day 8 | import akka.persistence.cassandra.Hour 9 | import akka.persistence.cassandra.Minute 10 | import org.scalatest.wordspec.AnyWordSpec 11 | import org.scalatest.matchers.should.Matchers 12 | 13 | class TimeBucketSpec extends AnyWordSpec with Matchers { 14 | "TimeBucket sizes" must { 15 | "support day" in { 16 | val epochTime = 1409545135047L 17 | val day = TimeBucket(epochTime, Day) 18 | day.key should equal(1409529600000L) 19 | day.inPast should equal(true) 20 | day.isCurrent should equal(false) 21 | day.key should equal(1409529600000L) 22 | day.next().key should equal(1409616000000L) 23 | day.next() > day should equal(true) 24 | day < day.next() should equal(true) 25 | } 26 | 27 | "support hour" in { 28 | val epochTime = 1409545135047L 29 | val hour = TimeBucket(epochTime, Hour) 30 | hour.key should equal(1409544000000L) 31 | hour.inPast should equal(true) 32 | hour.isCurrent should equal(false) 33 | hour.key should equal(1409544000000L) 34 | hour.next().key should equal(1409547600000L) 35 | hour.next() > hour should equal(true) 36 | hour < hour.next() should equal(true) 37 | } 38 | 39 | "support minute" in { 40 | val epochTime = 1409545135047L 41 | val minute = TimeBucket(epochTime, Minute) 42 | minute.key should equal(1409545080000L) 43 | minute.inPast should equal(true) 44 | minute.isCurrent should equal(false) 45 | minute.key should equal(1409545080000L) 46 | minute.next().key should equal(1409545140000L) 47 | minute.next() > minute should equal(true) 48 | minute < minute.next() should equal(true) 49 | } 50 | } 51 | } 52 | -------------------------------------------------------------------------------- /docs/src/main/paradox/cosmosdb.md: -------------------------------------------------------------------------------- 1 | # CosmosDB 2 | 3 | @@@ warning 4 | 5 | **This project is not continuously tested against CosmosDB, and CosmosDB is not supported as part of the [Lightbend Subscription](https://www.lightbend.com/lightbend-subscription).** 6 | 7 | @@@ 8 | 9 | [CosmosDB](https://docs.microsoft.com/en-us/azure/cosmos-db/) is a wire compatible replacement for Cassandra. 10 | 11 | Initial testing shows that most things are working. There is one issue related to deletes. 12 | Delete of events isn't supported because CosmosDB doesn't support range deletes yet, and has a different behavior 13 | for clustering columns than Cassandra. This will probably be fixed in CosmosDB. 14 | 15 | When using CosmosDB you need to configure: 16 | 17 | ``` 18 | akka.persistence.cassandra { 19 | compatibility.cosmosdb = on 20 | journal.gc-grace-seconds = 0 21 | events-by-tag.gc-grace-seconds = 0 22 | } 23 | ``` 24 | 25 | The connection configuration can be defined with properties like the following example. 26 | 27 | ``` 28 | datastax-java-driver { 29 | # using environment variables for the values 30 | basic.contact-points = [${COSMOSDB_CONTACT_POINTS}] 31 | basic.load-balancing-policy.local-datacenter = ${COSMOSDB_DATACENTER} 32 | advanced.auth-provider { 33 | class = PlainTextAuthProvider 34 | username = ${COSMOSDB_USER} 35 | password = ${COSMOSDB_PWD} 36 | } 37 | advanced.ssl-engine-factory { 38 | class = DefaultSslEngineFactory 39 | } 40 | advanced.reconnect-on-init = true 41 | } 42 | ``` 43 | 44 | You need to define the variables `COSMOSDB_CONTACT_POINTS`, `COSMOSDB_DATACENTER`, `COSMOSDB_USER`, 45 | `COSMOSDB_PWD` as environment variables or system properties in the shell running the system. 46 | The connection details can be found in the Microsoft Azure CosmosDB portal. 47 | 48 | If you don't know the datacenter you can first connect with `cqlsh` and query: 49 | ``` 50 | select * from system.local; 51 | ``` 52 | 53 | -------------------------------------------------------------------------------- /docs/src/main/paradox/keyspaces.md: -------------------------------------------------------------------------------- 1 | # Amazon Keyspaces 2 | 3 | @@@ warning 4 | 5 | **This project is not continuously tested against Keyspaces, and Keyspaces is not supported as part of the [Lightbend Subscription](https://www.lightbend.com/lightbend-subscription).** 6 | 7 | @@@ 8 | 9 | [Amazon Keyspaces](https://docs.aws.amazon.com/keyspaces) is an Amazon service 10 | that provides a Cassandra-like API, but it's not real Cassandra and has 11 | different behaviour for some things (for example atomic persist of several 12 | events is not possible). 13 | 14 | Keyspaces is known not to work on akka-persistence-cassandra version 1.0.3 and 15 | earlier. 16 | 17 | ## Configuration 18 | 19 | Configure the authentication plugin in application.conf: 20 | 21 | ``` 22 | datastax-java-driver { 23 | basic.contact-points = [ "cassandra.eu-central-1.amazonaws.com:9142"] 24 | basic.request.consistency = LOCAL_QUORUM 25 | basic.load-balancing-policy { 26 | class = DefaultLoadBalancingPolicy 27 | local-datacenter = eu-central-1 28 | } 29 | profiles { 30 | akka-persistence-cassandra-profile { 31 | basic.request.consistency = LOCAL_QUORUM 32 | } 33 | } 34 | advanced { 35 | auth-provider = { 36 | class = software.aws.mcs.auth.SigV4AuthProvider 37 | aws-region = eu-central-1 38 | } 39 | ssl-engine-factory { 40 | class = DefaultSslEngineFactory 41 | truststore-path = "cassandra_truststore.jks" 42 | truststore-password = "my_password" 43 | hostname-validation = false 44 | } 45 | } 46 | } 47 | ``` 48 | 49 | Note that the contact points must also be configured. You find the endpoints for different regions at https://docs.aws.amazon.com/keyspaces/latest/devguide/programmatic.endpoints.html 50 | 51 | ## Notes 52 | 53 | Keyspace and table creation are asynchronous and not finished when the 54 | returned Future is completed, so it is best to create the tables and keyspaces 55 | manually before deploying your application. 56 | -------------------------------------------------------------------------------- /core/src/main/scala/akka/persistence/cassandra/journal/TaggedPreparedStatements.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2016-2025 Lightbend Inc. 3 | */ 4 | 5 | package akka.persistence.cassandra.journal 6 | 7 | import com.datastax.oss.driver.api.core.cql.PreparedStatement 8 | import scala.concurrent.{ ExecutionContext, Future } 9 | 10 | import akka.annotation.InternalApi 11 | import akka.persistence.cassandra.CachedPreparedStatement 12 | 13 | /** 14 | * INTERNAL API 15 | */ 16 | @InternalApi private[akka] class TaggedPreparedStatements( 17 | statements: CassandraJournalStatements, 18 | prepare: String => Future[PreparedStatement])(implicit val ec: ExecutionContext) { 19 | 20 | def init(): Unit = { 21 | writeTagViewWithoutMeta.get() 22 | writeTagViewWithMeta.get() 23 | writeTagProgress.get() 24 | selectTagProgress.get() 25 | selectTagProgressForPersistenceId.get() 26 | writeTagScanning.get() 27 | selectTagScanningForPersistenceId.get() 28 | } 29 | 30 | val writeTagViewWithoutMeta: CachedPreparedStatement = 31 | new CachedPreparedStatement(() => prepare(statements.writeTags(false))) 32 | val writeTagViewWithMeta: CachedPreparedStatement = 33 | new CachedPreparedStatement(() => prepare(statements.writeTags(true))) 34 | val writeTagProgress: CachedPreparedStatement = 35 | new CachedPreparedStatement(() => prepare(statements.writeTagProgress)) 36 | val selectTagProgress: CachedPreparedStatement = 37 | new CachedPreparedStatement(() => prepare(statements.selectTagProgress)) 38 | val selectTagProgressForPersistenceId: CachedPreparedStatement = 39 | new CachedPreparedStatement(() => prepare(statements.selectTagProgressForPersistenceId)) 40 | val writeTagScanning: CachedPreparedStatement = 41 | new CachedPreparedStatement(() => prepare(statements.writeTagScanning)) 42 | val selectTagScanningForPersistenceId: CachedPreparedStatement = 43 | new CachedPreparedStatement(() => prepare(statements.selectTagScanningForPersistenceId)) 44 | } 45 | -------------------------------------------------------------------------------- /docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '2.2' 2 | services: 3 | cassandra-service: 4 | image: cassandra:latest 5 | ports: 6 | - "9042:9042" 7 | volumes: 8 | # This configuration is customized to enable materialized views. 9 | - ${PWD}/docker-files/cassandra.yaml:/etc/cassandra/cassandra.yaml 10 | healthcheck: 11 | test: ["CMD", "cqlsh", "-e", "describe keyspaces"] 12 | interval: 5s 13 | timeout: 5s 14 | retries: 60 15 | 16 | cassandra3-service: 17 | image: cassandra:3 18 | ports: 19 | - "9042:9042" 20 | healthcheck: 21 | test: ["CMD", "cqlsh", "-e", "describe keyspaces"] 22 | interval: 5s 23 | timeout: 5s 24 | retries: 60 25 | 26 | cassandra2-service: 27 | image: cassandra:2 28 | ports: 29 | - "9042:9042" 30 | volumes: 31 | - ${PWD}/docker-files/cassandra2.yaml:/etc/cassandra/cassandra.yaml 32 | healthcheck: 33 | test: ["CMD", "cqlsh", "-e", "describe keyspaces"] 34 | interval: 5s 35 | timeout: 5s 36 | retries: 60 37 | 38 | # These exists to force the condition of having the Cassandra service is up before starting the tests. 39 | # The healthcheck above is not enough because it does not provide a condition to wait for the service 40 | # to be up. And this is simpler than installing cqlsh and using it to check the service status on the 41 | # CI server. This uses alpine since it is a pretty small image. 42 | cassandra-latest: 43 | image: alpine:latest 44 | depends_on: 45 | cassandra-service: 46 | condition: service_healthy 47 | cassandra3: 48 | image: alpine:latest 49 | depends_on: 50 | cassandra3-service: 51 | condition: service_healthy 52 | cassandra2: 53 | image: alpine:latest 54 | depends_on: 55 | cassandra2-service: 56 | condition: service_healthy 57 | 58 | dse: 59 | image: datastax/dse-server 60 | environment: 61 | - DS_LICENSE=accept 62 | ports: 63 | - "9043:9042" 64 | -------------------------------------------------------------------------------- /core/src/test/scala/akka/persistence/cassandra/journal/StartupLoadSpec.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2016-2025 Lightbend Inc. 3 | */ 4 | 5 | package akka.persistence.cassandra.journal 6 | 7 | import akka.actor._ 8 | import akka.persistence._ 9 | import akka.persistence.cassandra.CassandraSpec 10 | import akka.testkit._ 11 | 12 | import scala.concurrent.duration._ 13 | 14 | object StartupLoadSpec { 15 | class ProcessorA(val persistenceId: String, receiver: ActorRef) extends PersistentActor { 16 | def receiveRecover: Receive = { 17 | case _ => 18 | } 19 | 20 | def receiveCommand: Receive = { 21 | case payload: String => 22 | persist(payload)(handle) 23 | } 24 | 25 | def handle: Receive = { 26 | case payload: String => 27 | receiver ! payload 28 | receiver ! lastSequenceNr 29 | saveSnapshot(payload) 30 | } 31 | } 32 | 33 | } 34 | 35 | class StartupLoadSpec extends CassandraSpec { 36 | 37 | import StartupLoadSpec._ 38 | 39 | // important, since we are testing the initialization 40 | override def awaitPersistenceInit(): Unit = () 41 | 42 | "Journal initialization" must { 43 | 44 | "handle many persistent actors starting at the same time" in { 45 | val N = 500 46 | for (i <- 1 to 3) { 47 | val probesAndRefs = (1 to N).map { n => 48 | val probe = TestProbe() 49 | val persistenceId = n.toString 50 | val r = system.actorOf(Props(classOf[ProcessorA], persistenceId, probe.ref)) 51 | r ! s"a-$i" 52 | (probe, r) 53 | } 54 | 55 | probesAndRefs.foreach { 56 | case (p, r) => 57 | if (i == 1 && p == probesAndRefs.head._1) 58 | p.expectMsg(30.seconds, s"a-$i") 59 | else 60 | p.expectMsg(s"a-$i") 61 | p.expectMsg(i.toLong) // seq number 62 | p.watch(r) 63 | r ! PoisonPill 64 | p.expectTerminated(r) 65 | } 66 | } 67 | } 68 | } 69 | 70 | } 71 | -------------------------------------------------------------------------------- /docs/src/main/paradox/cleanup.md: -------------------------------------------------------------------------------- 1 | # Database Cleanup 2 | 3 | If possible, it is best to keep all events in an event sourced system. That way new [projections](https://doc.akka.io/libraries/akka-projection/current/index.html) 4 | and the `tag_view` table can be re-built if it is corrupted (e.g. due to a two persistence ids writing events from two nodes in a split brain). 5 | 6 | In some cases keeping all events is not possible. `EventSourcedBehavior`s can automatically snapshot state and delete events as described in the [Akka docs](https://doc.akka.io/libraries/akka-core/current/typed/persistence-snapshot.html#snapshot-deletion). 7 | Snapshotting is useful even if events aren't deleted as it speeds up recovery. 8 | 9 | The @apidoc[akka.persistence.cassandra.cleanup.Cleanup] tool can retrospectively clean up the journal. Its operations include: 10 | 11 | * Delete all events for a persistence id 12 | * Delete all events and tagged events for the `eventsByTag` query 13 | * Delete all snapshots for a persistence id 14 | * Delete all snapshots and events for a persistence id keeping the latest N snapshots and all the events after them. 15 | 16 | The cleanup tool can be combined with the @ref[query plugin](./read-journal.md) which has a query to get all persistence ids. 17 | 18 | @@@ warning 19 | 20 | When running an operation with `Cleanup` that deletes all events for a persistence id, 21 | the actor with that persistence id must not be running! If the actor is restarted it would in that 22 | case be recovered to the wrong state since the stored events have been deleted. Delete events before 23 | snapshot can still be used while the actor is running. 24 | 25 | @@@ 26 | 27 | 28 | Scala 29 | : @@snip [snapshot-keyspace](/docs/src/test/scala/doc/cleanup/CleanupDocExample.scala) { #cleanup } 30 | 31 | Java 32 | : @@snip [snapshot-keyspace](/docs/src/test/java/jdoc/cleanup/CleanupDocExample.java) { #cleanup } 33 | 34 | By default, all operations only print what they were going to do. Once you're happy with what the cleanup tool is going to do set `akka.persistence.cassandra.cleanup.dry-run = false` 35 | -------------------------------------------------------------------------------- /core/src/test/scala/akka/persistence/cassandra/PrintCreateStatements.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2016-2025 Lightbend Inc. 3 | */ 4 | 5 | package akka.persistence.cassandra 6 | 7 | import java.io.File 8 | import java.io.PrintWriter 9 | import akka.actor.ActorSystem 10 | 11 | /** 12 | * Main application that prints the create keyspace and create table statements. 13 | * It's using `akka.persistence.cassandra` configuration from default application.conf. 14 | * 15 | * These statements can be copy-pasted and run in `cqlsh`. 16 | */ 17 | object PrintCreateStatements { 18 | 19 | def main(args: Array[String]): Unit = { 20 | val system = ActorSystem("PrintCreateStatements") 21 | val statements = new KeyspaceAndTableStatements(system, "akka.persistence.cassandra", PluginSettings(system)) 22 | 23 | def withWriter(name: String)(f: PrintWriter => Unit): Unit = { 24 | val writer: PrintWriter = new PrintWriter(new File(name)) 25 | try { 26 | f(writer) 27 | } finally { 28 | writer.flush() 29 | writer.close() 30 | } 31 | 32 | } 33 | 34 | withWriter("./target/journal-keyspace.txt") { pw => 35 | pw.println("//#journal-keyspace") 36 | pw.println(statements.createJournalKeyspaceStatement + ";") 37 | pw.println("//#journal-keyspace") 38 | } 39 | 40 | withWriter("./target/journal-tables.txt") { pw => 41 | pw.println("//#journal-tables") 42 | pw.println(statements.createJournalTablesStatements.mkString(";\n\n") + ";") 43 | pw.println("//#journal-tables") 44 | } 45 | 46 | withWriter("./target/snapshot-keyspace.txt") { pw => 47 | pw.println("//#snapshot-keyspace") 48 | pw.println(statements.createSnapshotKeyspaceStatement + ";") 49 | pw.println("//#snapshot-keyspace") 50 | } 51 | withWriter("./target/snapshot-tables.txt") { pw => 52 | pw.println("//#snapshot-tables") 53 | pw.println(statements.createSnapshotTablesStatements.mkString(";\n\n") + ";") 54 | pw.println("//#snapshot-tables") 55 | } 56 | 57 | system.terminate() 58 | } 59 | 60 | } 61 | -------------------------------------------------------------------------------- /core/src/main/scala/akka/persistence/cassandra/Retries.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2016-2025 Lightbend Inc. 3 | */ 4 | 5 | package akka.persistence.cassandra 6 | 7 | import akka.actor.Scheduler 8 | import akka.annotation.InternalApi 9 | import akka.pattern.{ after, BackoffSupervisor } 10 | 11 | import scala.concurrent.{ ExecutionContext, Future } 12 | import scala.concurrent.duration.FiniteDuration 13 | import scala.util.control.NonFatal 14 | 15 | /** 16 | * INTERNAL API 17 | */ 18 | @InternalApi 19 | private[cassandra] object Retries { 20 | def retry[T]( 21 | attempt: () => Future[T], 22 | attempts: Int, 23 | onFailure: (Int, Throwable, FiniteDuration) => Unit, 24 | minBackoff: FiniteDuration, 25 | maxBackoff: FiniteDuration, 26 | randomFactor: Double)(implicit ec: ExecutionContext, scheduler: Scheduler): Future[T] = { 27 | require(attempts == -1 || attempts > 0, s"Attempts must be -1 or greater than 0. Got $attempts") 28 | retry(attempt, attempts, onFailure, minBackoff, maxBackoff, randomFactor, 0) 29 | } 30 | 31 | private def retry[T]( 32 | attempt: () => Future[T], 33 | maxAttempts: Int, 34 | onFailure: (Int, Throwable, FiniteDuration) => Unit, 35 | minBackoff: FiniteDuration, 36 | maxBackoff: FiniteDuration, 37 | randomFactor: Double, 38 | attempted: Int)(implicit ec: ExecutionContext, scheduler: Scheduler): Future[T] = { 39 | 40 | def tryAttempt(): Future[T] = { 41 | try { 42 | attempt() 43 | } catch { 44 | case NonFatal(exc) => Future.failed(exc) // in case the `attempt` function throws 45 | } 46 | } 47 | 48 | if (maxAttempts == -1 || maxAttempts - attempted != 1) { 49 | tryAttempt().recoverWith { 50 | case NonFatal(exc) => 51 | val nextDelay = BackoffSupervisor.calculateDelay(attempted, minBackoff, maxBackoff, randomFactor) 52 | onFailure(attempted + 1, exc, nextDelay) 53 | after(nextDelay, scheduler) { 54 | retry(attempt, maxAttempts, onFailure, minBackoff, maxBackoff, randomFactor, attempted + 1) 55 | } 56 | } 57 | } else { 58 | tryAttempt() 59 | } 60 | } 61 | } 62 | -------------------------------------------------------------------------------- /core/src/test/scala/akka/persistence/cassandra/Persister.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2016-2025 Lightbend Inc. 3 | */ 4 | 5 | package akka.persistence.cassandra 6 | 7 | import akka.actor.ActorRef 8 | import akka.persistence._ 9 | import akka.persistence.cassandra.Persister._ 10 | 11 | object Persister { 12 | case class CrapEvent(n: Int) 13 | case class Snapshot(s: Any) 14 | case object GetSnapshot 15 | case object SnapshotAck 16 | case object SnapshotNack 17 | case class DeleteSnapshot(sequenceNr: Long) 18 | case class DeleteSnapshots(sequenceNr: Long) 19 | } 20 | 21 | class Persister(override val persistenceId: String, probe: Option[ActorRef] = None) extends PersistentActor { 22 | def this(pid: String, probe: ActorRef) = this(pid, Some(probe)) 23 | 24 | var snapshot: Option[Any] = None 25 | var snapshotAck: Option[ActorRef] = None 26 | var deleteSnapshotAck: Option[ActorRef] = None 27 | 28 | override def receiveRecover: Receive = { 29 | case SnapshotOffer(_, s) => 30 | snapshot = Some(s) 31 | case msg => probe.foreach(_ ! msg) 32 | } 33 | override def receiveCommand: Receive = { 34 | case GetSnapshot => 35 | sender() ! snapshot 36 | case Snapshot(s) => 37 | snapshotAck = Some(sender()) 38 | saveSnapshot(s) 39 | case SaveSnapshotSuccess(_) => 40 | snapshotAck.foreach(_ ! SnapshotAck) 41 | snapshotAck = None 42 | case SaveSnapshotFailure(_, _) => 43 | snapshotAck.foreach(_ ! SnapshotNack) 44 | snapshotAck = None 45 | case DeleteSnapshot(nr) => 46 | deleteSnapshot(nr) 47 | deleteSnapshotAck = Some(sender()) 48 | case DeleteSnapshots(nr) => 49 | deleteSnapshots(SnapshotSelectionCriteria(maxSequenceNr = nr)) 50 | deleteSnapshotAck = Some(sender()) 51 | case DeleteSnapshotsSuccess(cri) => 52 | deleteSnapshotAck.foreach(_ ! cri) 53 | case DeleteSnapshotSuccess(cri) => 54 | deleteSnapshotAck.foreach(_ ! cri) 55 | deleteSnapshotAck = None 56 | case DeleteSnapshotFailure(m, c) => 57 | println(s"$m -> $c") 58 | case msg => 59 | persist(msg) { _ => 60 | probe.foreach(_ ! msg) 61 | } 62 | } 63 | 64 | override protected def onRecoveryFailure(cause: Throwable, event: Option[Any]): Unit = 65 | probe.foreach(_ ! cause) 66 | } 67 | -------------------------------------------------------------------------------- /project/Publish.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2023 Lightbend Inc. 3 | */ 4 | 5 | import java.util.concurrent.atomic.AtomicBoolean 6 | 7 | import scala.language.postfixOps 8 | 9 | import sbt.{ Def, _ } 10 | import Keys._ 11 | import com.geirsson.CiReleasePlugin 12 | import com.jsuereth.sbtpgp.PgpKeys.publishSigned 13 | import xerial.sbt.Sonatype.autoImport.sonatypeProfileName 14 | 15 | /** 16 | * For projects that are not published. 17 | */ 18 | object NoPublish extends AutoPlugin { 19 | override def requires = plugins.JvmPlugin 20 | 21 | override def projectSettings = 22 | Seq(publish / skip := true, publishArtifact := false, publish := {}, publishLocal := {}) 23 | } 24 | 25 | object Publish extends AutoPlugin { 26 | override def requires = plugins.JvmPlugin && Common 27 | override def trigger = AllRequirements 28 | 29 | lazy val beforePublishTask = taskKey[Unit]("setup before publish") 30 | 31 | lazy val beforePublishDone = new AtomicBoolean(false) 32 | 33 | def beforePublish(snapshot: Boolean) = { 34 | if (beforePublishDone.compareAndSet(false, true)) { 35 | CiReleasePlugin.setupGpg() 36 | if (!snapshot) 37 | cloudsmithCredentials(validate = true) 38 | } 39 | } 40 | 41 | override def projectSettings: Seq[Def.Setting[_]] = 42 | Seq( 43 | sonatypeProfileName := "com.typesafe", 44 | beforePublishTask := beforePublish(isSnapshot.value), 45 | publishSigned := publishSigned.dependsOn(beforePublishTask).value, 46 | publishTo := 47 | (if (isSnapshot.value) 48 | Some("Cloudsmith API".at("https://maven.cloudsmith.io/lightbend/akka-snapshots/")) 49 | else 50 | Some("Cloudsmith API".at("https://maven.cloudsmith.io/lightbend/akka/"))), 51 | credentials ++= cloudsmithCredentials(validate = false)) 52 | 53 | def cloudsmithCredentials(validate: Boolean): Seq[Credentials] = { 54 | (sys.env.get("PUBLISH_USER"), sys.env.get("PUBLISH_PASSWORD")) match { 55 | case (Some(user), Some(password)) => 56 | Seq(Credentials("Cloudsmith API", "maven.cloudsmith.io", user, password)) 57 | case _ => 58 | if (validate) 59 | throw new Exception("Publishing credentials expected in `PUBLISH_USER` and `PUBLISH_PASSWORD`.") 60 | else 61 | Nil 62 | } 63 | } 64 | } 65 | -------------------------------------------------------------------------------- /docs/src/main/paradox/configuration.md: -------------------------------------------------------------------------------- 1 | # Configuration 2 | 3 | Make your edits/overrides in your `application.conf`. 4 | 5 | ## Default configuration 6 | 7 | The reference configuration file with the default values: 8 | 9 | @@snip [reference.conf](/core/src/main/resources/reference.conf) 10 | 11 | Journal configuration is under `akka.persistence.cassandra.journal`. 12 | 13 | Snapshot configuration is under `akka.persistence.cassandra.snapshot`. 14 | 15 | Query configuration is under `akka.persistence.cassandra.query`. 16 | 17 | Events by tag configuration is under `akka.persistence.cassandra.events-by-tag` and shared 18 | b `journal` and `query`. 19 | 20 | The settings that shared by the `journal`, `query`, and `snapshot` parts of the plugin and are under 21 | `akka.persistence.cassandra`. 22 | 23 | ## Cassandra driver configuration 24 | 25 | All Cassandra driver settings are via its @extref:[standard profile mechanism](java-driver:manual/core/configuration/). 26 | 27 | One important setting is to configure the database driver to retry the initial connection: 28 | 29 | `datastax-java-driver.advanced.reconnect-on-init = true` 30 | 31 | It is not enabled automatically as it is in the driver's reference.conf and is not overridable in a profile. 32 | 33 | If the ip addresses of your cassandra nodes might change (e.g. if you use k8s) then 34 | 35 | `datastax-java-driver.advanced.resolve-contact-points = false` 36 | 37 | should also be set (resolves a dns address again when new connections are created). This also implies disabling java's dns cache with `-Dnetworkaddress.cache.ttl=0`. 38 | 39 | 40 | ### Cassandra driver overrides 41 | 42 | @@snip [reference.conf](/core/src/main/resources/reference.conf) { #profile } 43 | 44 | ## Contact points configuration 45 | 46 | The Cassandra server contact points can be defined with the @extref:[Cassandra driver configuration](java-driver:manual/core/configuration/) 47 | 48 | ``` 49 | datastax-java-driver { 50 | basic.contact-points = ["127.0.0.1:9042"] 51 | basic.load-balancing-policy.local-datacenter = "datacenter1" 52 | } 53 | ``` 54 | 55 | Alternatively, Akka Discovery can be used for finding the Cassandra server contact points as described 56 | in the @extref:[Alpakka Cassandra documentation](alpakka:cassandra.html#using-akka-discovery). 57 | 58 | Without any configuration it will use `localhost:9042` as default. 59 | -------------------------------------------------------------------------------- /core/src/main/scala/akka/persistence/cassandra/reconciler/DeleteTagViewForPersistenceId.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2016-2025 Lightbend Inc. 3 | */ 4 | 5 | package akka.persistence.cassandra.reconciler 6 | 7 | import akka.actor.ActorSystem 8 | import akka.persistence.cassandra.PluginSettings 9 | import akka.Done 10 | import akka.event.Logging 11 | import akka.persistence.cassandra.query.scaladsl.CassandraReadJournal 12 | import akka.annotation.InternalApi 13 | import akka.persistence.query.NoOffset 14 | import akka.persistence.cassandra.journal.TimeBucket 15 | import akka.stream.scaladsl.Sink 16 | import scala.concurrent.Future 17 | 18 | /** 19 | * Deletes tagged events for a persistence id. 20 | * 21 | * Walks the events by tags table for the given tag, filters by persistence id, and 22 | * issues deletes one at a time. 23 | * 24 | * INTERNAL API 25 | */ 26 | @InternalApi 27 | private[akka] final class DeleteTagViewForPersistenceId( 28 | persistenceIds: Set[String], 29 | tag: String, 30 | system: ActorSystem, 31 | session: ReconciliationSession, 32 | settings: PluginSettings, 33 | queries: CassandraReadJournal) { 34 | private val log = Logging(system, s"DeleteTagView($tag)") 35 | private implicit val sys: ActorSystem = system 36 | import system.dispatcher 37 | 38 | def execute(): Future[Done] = { 39 | queries 40 | .currentEventsByTagInternal(tag, NoOffset) 41 | .filter(persistenceIds contains _.persistentRepr.persistenceId) 42 | // Make the parallelism configurable? 43 | .mapAsync(1) { uuidPr => 44 | val bucket = TimeBucket(uuidPr.offset, settings.eventsByTagSettings.bucketSize) 45 | val timestamp = uuidPr.offset 46 | val persistenceId = uuidPr.persistentRepr.persistenceId 47 | val tagPidSequenceNr = uuidPr.tagPidSequenceNr 48 | log.debug("Issuing delete {} {} {} {}", persistenceId, bucket, timestamp, tagPidSequenceNr) 49 | session.deleteFromTagView(tag, bucket, timestamp, persistenceId, tagPidSequenceNr) 50 | } 51 | .runWith(Sink.ignore) 52 | .flatMap(_ => 53 | Future.traverse(persistenceIds) { pid => 54 | val progress = session.deleteTagProgress(tag, pid) 55 | val scanning = session.deleteTagScannning(pid) 56 | for { 57 | _ <- progress 58 | _ <- scanning 59 | } yield Done 60 | }) 61 | .map(_ => Done) 62 | } 63 | 64 | } 65 | -------------------------------------------------------------------------------- /core/src/test/scala/akka/persistence/cassandra/query/EventsByTagPubsubSpec.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2016-2025 Lightbend Inc. 3 | */ 4 | 5 | package akka.persistence.cassandra.query 6 | 7 | import java.time.{ LocalDate, ZoneOffset } 8 | 9 | import akka.cluster.Cluster 10 | import akka.persistence.cassandra.CassandraSpec 11 | import akka.persistence.cassandra.journal.JournalSettings 12 | import akka.persistence.query.{ EventEnvelope, NoOffset } 13 | import akka.stream.testkit.scaladsl.TestSink 14 | import com.typesafe.config.ConfigFactory 15 | 16 | import scala.concurrent.duration._ 17 | 18 | object EventsByTagPubsubSpec { 19 | val today = LocalDate.now(ZoneOffset.UTC) 20 | 21 | val config = ConfigFactory.parseString(s""" 22 | akka.actor.provider = "akka.cluster.ClusterActorRefProvider" 23 | akka.actor.serialize-messages = off 24 | akka.actor.serialize-creators = off 25 | akka.remote.netty.tcp.port = 0 26 | akka.remote.artery.canonical.port = 0 27 | akka.remote.netty.tcp.hostname = "127.0.0.1" 28 | akka.persistence.cassandra { 29 | 30 | query.refresh-interval = 10s 31 | 32 | events-by-tag { 33 | pubsub-notification = on 34 | flush-interval = 0ms 35 | eventual-consistency-delay = 0s 36 | } 37 | } 38 | """).withFallback(EventsByTagSpec.config) 39 | } 40 | 41 | class EventsByTagPubsubSpec extends CassandraSpec(EventsByTagPubsubSpec.config) { 42 | 43 | val journalSettings = new JournalSettings(system, system.settings.config.getConfig("akka.persistence.cassandra")) 44 | 45 | override protected def beforeAll(): Unit = { 46 | super.beforeAll() 47 | Cluster(system).join(Cluster(system).selfAddress) 48 | } 49 | 50 | "Cassandra query getEventsByTag when running clustered with pubsub enabled" must { 51 | "present new events to an ongoing getEventsByTag stream long before polling would kick in" in { 52 | val actor = system.actorOf(TestActor.props("EventsByTagPubsubSpec_a")) 53 | 54 | val blackSrc = queries.eventsByTag(tag = "black", offset = NoOffset) 55 | val probe = blackSrc.runWith(TestSink.probe[Any]) 56 | probe.request(2) 57 | probe.expectNoMessage(300.millis) 58 | 59 | actor ! "a black car" 60 | probe.within(5.seconds) { // long before refresh-interval, which is 10s 61 | probe.expectNextPF { case e @ EventEnvelope(_, _, _, "a black car") => e } 62 | } 63 | } 64 | } 65 | } 66 | -------------------------------------------------------------------------------- /core/src/main/scala/akka/persistence/cassandra/journal/JournalSettings.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2016-2025 Lightbend Inc. 3 | */ 4 | 5 | package akka.persistence.cassandra.journal 6 | 7 | import akka.actor.ActorSystem 8 | import akka.actor.NoSerializationVerificationNeeded 9 | import akka.annotation.InternalApi 10 | import akka.annotation.InternalStableApi 11 | import akka.persistence.cassandra.PluginSettings.getReplicationStrategy 12 | import akka.persistence.cassandra.compaction.CassandraCompactionStrategy 13 | import akka.persistence.cassandra.getListFromConfig 14 | import com.typesafe.config.Config 15 | 16 | /** INTERNAL API */ 17 | @InternalStableApi 18 | @InternalApi private[akka] class JournalSettings(system: ActorSystem, config: Config) 19 | extends NoSerializationVerificationNeeded { 20 | 21 | private val journalConfig = config.getConfig("journal") 22 | 23 | val writeProfile: String = config.getString("write-profile") 24 | val readProfile: String = config.getString("read-profile") 25 | 26 | val keyspaceAutoCreate: Boolean = journalConfig.getBoolean("keyspace-autocreate") 27 | val tablesAutoCreate: Boolean = journalConfig.getBoolean("tables-autocreate") 28 | 29 | val keyspace: String = journalConfig.getString("keyspace") 30 | 31 | val table: String = journalConfig.getString("table") 32 | val metadataTable: String = journalConfig.getString("metadata-table") 33 | val allPersistenceIdsTable: String = journalConfig.getString("all-persistence-ids-table") 34 | 35 | val tableCompactionStrategy: CassandraCompactionStrategy = 36 | CassandraCompactionStrategy(journalConfig.getConfig("table-compaction-strategy")) 37 | 38 | val replicationStrategy: String = getReplicationStrategy( 39 | journalConfig.getString("replication-strategy"), 40 | journalConfig.getInt("replication-factor"), 41 | getListFromConfig(journalConfig, "data-center-replication-factors")) 42 | 43 | val gcGraceSeconds: Long = journalConfig.getLong("gc-grace-seconds") 44 | 45 | val targetPartitionSize: Long = journalConfig.getLong("target-partition-size") 46 | val maxMessageBatchSize: Int = journalConfig.getInt("max-message-batch-size") 47 | 48 | val maxConcurrentDeletes: Int = journalConfig.getInt("max-concurrent-deletes") 49 | 50 | val supportDeletes: Boolean = journalConfig.getBoolean("support-deletes") 51 | 52 | val supportAllPersistenceIds: Boolean = journalConfig.getBoolean("support-all-persistence-ids") 53 | 54 | val coordinatedShutdownOnError: Boolean = config.getBoolean("coordinated-shutdown-on-error") 55 | 56 | } 57 | -------------------------------------------------------------------------------- /core/src/test/scala/akka/persistence/cassandra/query/EventsByPersistenceIdFastForwardSpec.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2016-2025 Lightbend Inc. 3 | */ 4 | 5 | package akka.persistence.cassandra.query 6 | 7 | import java.util.UUID 8 | 9 | import akka.persistence.PersistentRepr 10 | import akka.persistence.cassandra.{ CassandraLifecycle, CassandraSpec } 11 | import akka.stream.scaladsl.Keep 12 | import akka.stream.testkit.scaladsl.TestSink 13 | import com.typesafe.config.ConfigFactory 14 | import org.scalatest.time.{ Milliseconds, Seconds, Span } 15 | 16 | object EventsByPersistenceIdFastForwardSpec { 17 | 18 | // separate from EventsByPersistenceIdWithControlSpec since it needs the refreshing enabled 19 | val config = ConfigFactory.parseString(s""" 20 | akka.persistence.cassandra.journal.keyspace=EventsByPersistenceIdFastForwardSpec 21 | akka.persistence.cassandra.query.refresh-interval = 250ms 22 | akka.persistence.cassandra.journal.target-partition-size = 15 23 | datastax-java-driver.profiles.akka-persistence-cassandra-profile.basic.request.page-size = 2 24 | """).withFallback(CassandraLifecycle.config) 25 | } 26 | 27 | class EventsByPersistenceIdFastForwardSpec 28 | extends CassandraSpec(EventsByPersistenceIdFastForwardSpec.config) 29 | with DirectWriting { 30 | 31 | override implicit val patience: PatienceConfig = 32 | PatienceConfig(timeout = Span(5, Seconds), interval = Span(100, Milliseconds)) 33 | 34 | "be able to fast forward when currently looking for missing sequence number" in { 35 | val w1 = UUID.randomUUID().toString 36 | val evt1 = PersistentRepr("e-1", 1L, "f", "", writerUuid = w1) 37 | writeTestEvent(evt1) 38 | 39 | val src = queries.eventsByPersistenceIdWithControl("f", 0L, Long.MaxValue) 40 | val (futureControl, probe) = src.map(_.event).toMat(TestSink.probe[Any])(Keep.both).run() 41 | val control = futureControl.futureValue 42 | probe.request(5) 43 | 44 | val evt3 = PersistentRepr("e-3", 3L, "f", "", writerUuid = w1) 45 | writeTestEvent(evt3) 46 | 47 | probe.expectNext("e-1") 48 | 49 | system.log.debug("Sleeping for query to go into look-for-missing-seqnr-mode") 50 | Thread.sleep(2000) 51 | 52 | // then we fast forward past the gap 53 | control.fastForward(3L) 54 | probe.expectNext("e-3") 55 | 56 | val evt2 = PersistentRepr("e-2", 2L, "f", "", writerUuid = w1) 57 | val evt4 = PersistentRepr("e-4", 4L, "f", "", writerUuid = w1) 58 | writeTestEvent(evt2) 59 | writeTestEvent(evt4) 60 | probe.expectNext("e-4") 61 | 62 | probe.cancel() 63 | } 64 | } 65 | -------------------------------------------------------------------------------- /core/src/test/scala/akka/persistence/cassandra/EventsByTagStressSpec.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2016-2025 Lightbend Inc. 3 | */ 4 | 5 | package akka.persistence.cassandra 6 | 7 | import akka.persistence.cassandra.query.TestActor 8 | import akka.persistence.journal.Tagged 9 | import akka.persistence.query.NoOffset 10 | import akka.stream.testkit.TestSubscriber 11 | import akka.stream.testkit.scaladsl.TestSink 12 | 13 | import scala.collection.immutable 14 | import scala.concurrent.{ ExecutionContext, Future } 15 | 16 | class EventsByTagStressSpec extends CassandraSpec(s""" 17 | akka.persistence.cassandra { 18 | events-by-tag { 19 | max-message-batch-size = 25 20 | } 21 | } 22 | """) { 23 | 24 | implicit val ec: ExecutionContext = system.dispatcher 25 | 26 | val writers = 10 27 | val readers = 20 28 | val messages = 5000 29 | 30 | "EventsByTag" must { 31 | 32 | "work under load" in { 33 | val pas = (0 until writers).map { i => 34 | system.actorOf(TestActor.props(s"pid$i")) 35 | } 36 | 37 | val eventsByTagQueries: immutable.Seq[(Int, TestSubscriber.Probe[(String, Int)])] = (0 until readers).map { i => 38 | val probe = queryJournal 39 | .eventsByTag("all", NoOffset) 40 | .map(i => { 41 | (i.persistenceId, i.event.asInstanceOf[Int]) 42 | }) 43 | .runWith(TestSink.probe) 44 | (i, probe) 45 | } 46 | 47 | system.log.info("Started events by tag queries") 48 | 49 | val writes: Future[Unit] = Future { 50 | system.log.info("Sending messages") 51 | (0 until messages).foreach { i => 52 | pas.foreach(ref => { 53 | ref ! Tagged(i, Set("all")) 54 | expectMsg(s"$i-done") 55 | }) 56 | } 57 | system.log.info("Sent messages") 58 | } 59 | writes.onComplete(result => system.log.info("{}", result)) 60 | 61 | system.log.info("Reading messages") 62 | var latestValues: Map[(Int, String), Int] = Map.empty.withDefault(_ => -1) 63 | (0 until messages).foreach { _ => 64 | (0 until writers).foreach { _ => 65 | eventsByTagQueries.foreach { 66 | case (probeNr, probe) => 67 | // should be in order per persistence id per probe 68 | val (pid, msg) = probe.requestNext() 69 | latestValues((probeNr, pid)) shouldEqual (msg - 1) 70 | latestValues += (probeNr, pid) -> msg 71 | } 72 | } 73 | } 74 | system.log.info("Received all messages {}", latestValues) 75 | } 76 | 77 | } 78 | } 79 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | Akka 2 | ==== 3 | *Akka is a powerful platform that simplifies building and operating highly responsive, resilient, and scalable services.* 4 | 5 | 6 | The platform consists of 7 | * the [**Akka SDK**](https://doc.akka.io/) for straightforward, rapid development with AI assist and automatic clustering. Services built with the Akka SDK are automatically clustered and can be deployed on any infrastructure. 8 | * and [**Akka Automated Operations**](https://doc.akka.io/operations/akka-platform.html), a managed solution that handles everything for Akka SDK services from auto-elasticity to multi-region high availability running safely within your VPC. 9 | 10 | The **Akka SDK** and **Akka Automated Operations** are built upon the foundational [**Akka libraries**](https://doc.akka.io/libraries/akka-dependencies/current/), providing the building blocks for distributed systems. 11 | 12 | 13 | Cassandra Plugins for Akka Persistence 14 | ====================================== 15 | 16 | Replicated [Akka Persistence](https://doc.akka.io/libraries/akka-core/current/scala/persistence.html) journal and snapshot store backed by [Apache Cassandra](https://cassandra.apache.org/). 17 | 18 | For questions please use the [discuss.akka.io](https://discuss.akka.io/c/akka/). 19 | 20 | Reference Documentation 21 | ----------------------- 22 | 23 | The reference documentation for all Akka libraries is available via [doc.akka.io/libraries/](https://doc.akka.io/libraries/), details for the Akka Cassandra Plugin 24 | for [Scala](https://doc.akka.io/libraries/akka-persistence-cassandra/current/?language=scala) and [Java](https://doc.akka.io/libraries/akka-persistence-cassandra/current/?language=java). 25 | 26 | The current versions of all Akka libraries are listed on the [Akka Dependencies](https://doc.akka.io/libraries/akka-dependencies/current/) page. Releases of the Akka Cassandra Plugin in this repository are listed on the [GitHub releases](https://github.com/akka/akka-persistence-cassandra/releases) page. 27 | 28 | 29 | 30 | ## History 31 | 32 | This [Apache Cassandra](https://cassandra.apache.org/) plugin to Akka Persistence was initiated [originally](https://github.com/krasserm/akka-persistence-cassandra) by Martin Krasser, [@krasserm](https://github.com/krasserm) in 2014. 33 | 34 | It moved to the [Akka](https://github.com/akka/) organisation in 2016 and the first release after that move was 0.7 in January 2016. 35 | 36 | ## License 37 | 38 | Akka is licensed under the Business Source License 1.1, please see the [Akka License FAQ](https://www.lightbend.com/akka/license-faq). 39 | 40 | Tests and documentation are under a separate license, see the LICENSE file in each documentation and test root directory for details. 41 | -------------------------------------------------------------------------------- /core/src/main/scala/akka/persistence/cassandra/compaction/SizeTieredCompactionStrategy.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2016-2025 Lightbend Inc. 3 | */ 4 | 5 | package akka.persistence.cassandra.compaction 6 | 7 | import com.typesafe.config.Config 8 | 9 | /* 10 | * Based upon https://github.com/apache/cassandra/blob/cassandra-2.2/src/java/org/apache/cassandra/db/compaction/SizeTieredCompactionStrategy.java 11 | */ 12 | class SizeTieredCompactionStrategy(config: Config) 13 | extends BaseCompactionStrategy( 14 | config, 15 | SizeTieredCompactionStrategy.ClassName, 16 | SizeTieredCompactionStrategy.propertyKeys) { 17 | 18 | val bucketHigh: Double = 19 | if (config.hasPath("bucket_high")) config.getDouble("bucket_high") else 1.5 20 | val bucketLow: Double = 21 | if (config.hasPath("bucket_low")) config.getDouble("bucket_low") else 0.5 22 | val maxThreshold: Int = 23 | if (config.hasPath("max_threshold")) config.getInt("max_threshold") else 32 24 | val minThreshold: Int = 25 | if (config.hasPath("min_threshold")) config.getInt("min_threshold") else 4 26 | val minSSTableSize: Long = 27 | if (config.hasPath("min_sstable_size")) config.getLong("min_sstable_size") 28 | else 50 29 | 30 | require(bucketHigh > bucketLow, s"bucket_high must be larger than bucket_low, but was $bucketHigh") 31 | require(maxThreshold > 0, s"max_threshold must be larger than 0, but was $maxThreshold") 32 | require(minThreshold > 1, s"min_threshold must be larger than 1, but was $minThreshold") 33 | require(maxThreshold > minThreshold, s"max_threshold must be larger than min_threshold, but was $maxThreshold") 34 | require(minSSTableSize > 0, s"min_sstable_size must be larger than 0, but was $minSSTableSize") 35 | 36 | override def asCQL: String = 37 | s"""{ 38 | |'class' : '${SizeTieredCompactionStrategy.ClassName}', 39 | |${super.asCQL}, 40 | |'bucket_high' : $bucketHigh, 41 | |'bucket_low' : $bucketLow, 42 | |'max_threshold' : $maxThreshold, 43 | |'min_threshold' : $minThreshold, 44 | |'min_sstable_size' : $minSSTableSize 45 | |} 46 | """.stripMargin.trim 47 | } 48 | 49 | object SizeTieredCompactionStrategy extends CassandraCompactionStrategyConfig[SizeTieredCompactionStrategy] { 50 | override val ClassName: String = "SizeTieredCompactionStrategy" 51 | 52 | override def propertyKeys: List[String] = 53 | (BaseCompactionStrategy.propertyKeys ++ 54 | List("bucket_high", "bucket_low", "max_threshold", "min_threshold", "min_sstable_size")).sorted 55 | 56 | override def fromConfig(config: Config): SizeTieredCompactionStrategy = 57 | new SizeTieredCompactionStrategy(config) 58 | } 59 | -------------------------------------------------------------------------------- /core/src/main/scala/akka/persistence/cassandra/journal/TimeBucket.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2016-2025 Lightbend Inc. 3 | */ 4 | 5 | package akka.persistence.cassandra.journal 6 | 7 | import java.util.UUID 8 | 9 | import akka.annotation.InternalApi 10 | import akka.persistence.cassandra.BucketSize 11 | import akka.util.HashCode 12 | import com.datastax.oss.driver.api.core.uuid.Uuids 13 | 14 | /** INTERNAL API */ 15 | @InternalApi private[akka] object TimeBucket { 16 | 17 | def apply(timeuuid: UUID, bucketSize: BucketSize): TimeBucket = 18 | apply(Uuids.unixTimestamp(timeuuid), bucketSize) 19 | 20 | def apply(epochTimestamp: Long, bucketSize: BucketSize): TimeBucket = 21 | // round down to bucket size so the times are deterministic 22 | new TimeBucket(roundDownBucketSize(epochTimestamp, bucketSize), bucketSize) 23 | 24 | private def roundDownBucketSize(time: Long, bucketSize: BucketSize): Long = { 25 | val key: Long = time / bucketSize.durationMillis 26 | key * bucketSize.durationMillis 27 | } 28 | } 29 | 30 | /** INTERNAL API */ 31 | @InternalApi private[akka] final class TimeBucket private (val key: Long, val bucketSize: BucketSize) { 32 | def inPast: Boolean = 33 | key < TimeBucket.roundDownBucketSize(System.currentTimeMillis(), bucketSize) 34 | 35 | def isCurrent: Boolean = { 36 | val now = System.currentTimeMillis() 37 | now >= key && now < (key + bucketSize.durationMillis) 38 | } 39 | 40 | def within(uuid: UUID): Boolean = { 41 | val when = Uuids.unixTimestamp(uuid) 42 | when >= key && when < (key + bucketSize.durationMillis) 43 | } 44 | 45 | def next(): TimeBucket = 46 | new TimeBucket(key + bucketSize.durationMillis, bucketSize) 47 | 48 | def previous(steps: Int): TimeBucket = 49 | if (steps == 0) this 50 | else new TimeBucket(key - steps * bucketSize.durationMillis, bucketSize) 51 | 52 | def >(other: TimeBucket): Boolean = 53 | key > other.key 54 | 55 | def <(other: TimeBucket): Boolean = 56 | key < other.key 57 | 58 | def <=(other: TimeBucket): Boolean = 59 | key <= other.key 60 | 61 | override def equals(other: Any): Boolean = other match { 62 | case that: TimeBucket => 63 | key == that.key && 64 | bucketSize == that.bucketSize 65 | case _ => false 66 | } 67 | 68 | override def hashCode(): Int = { 69 | var result = HashCode.SEED 70 | result = HashCode.hash(result, key) 71 | result = HashCode.hash(result, bucketSize) 72 | result 73 | } 74 | 75 | import akka.persistence.cassandra._ 76 | 77 | override def toString = 78 | s"TimeBucket($key, $bucketSize, inPast: $inPast, currentBucket: $isCurrent. time: ${formatUnixTime(key)} )" 79 | } 80 | -------------------------------------------------------------------------------- /core/src/test/scala/akka/persistence/cassandra/journal/PersistAllSpec.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2016-2025 Lightbend Inc. 3 | */ 4 | 5 | package akka.persistence.cassandra.journal 6 | 7 | import java.util.UUID 8 | 9 | import akka.actor._ 10 | import akka.persistence._ 11 | import akka.persistence.cassandra.CassandraLifecycle 12 | import akka.persistence.cassandra.CassandraSpec 13 | import akka.testkit._ 14 | import com.typesafe.config.ConfigFactory 15 | import org.scalatest.matchers.should.Matchers 16 | import org.scalatest.wordspec.AnyWordSpecLike 17 | 18 | object PersistAllSpec { 19 | val config = ConfigFactory.parseString(s""" 20 | akka.persistence.cassandra.journal.max-message-batch-size = 100 21 | akka.persistence.cassandra.journal.keyspace=PersistAllSpec 22 | akka.persistence.cassandra.snapshot.keyspace=PersistAllSpecSnapshot 23 | """).withFallback(CassandraLifecycle.config) 24 | 25 | case class DeleteTo(snr: Long) 26 | 27 | class ProcessorAtomic(val persistenceId: String, receiver: ActorRef) extends PersistentActor { 28 | def receiveRecover: Receive = handle 29 | 30 | def receiveCommand: Receive = { 31 | case DeleteTo(sequenceNr) => 32 | deleteMessages(sequenceNr) 33 | case payload: List[_] => 34 | persistAll(payload)(handle) 35 | } 36 | 37 | def handle: Receive = { 38 | case payload: String => 39 | receiver ! payload 40 | receiver ! lastSequenceNr 41 | receiver ! recoveryRunning 42 | } 43 | } 44 | } 45 | 46 | import akka.persistence.cassandra.journal.PersistAllSpec._ 47 | 48 | class PersistAllSpec extends CassandraSpec(config) with ImplicitSender with AnyWordSpecLike with Matchers { 49 | 50 | private def stopAndWaitUntilTerminated(ref: ActorRef) = { 51 | watch(ref) 52 | ref ! PoisonPill 53 | expectTerminated(ref) 54 | } 55 | 56 | "A Cassandra journal" must { 57 | 58 | // reproducer of issue #869 59 | "write and replay with persistAll greater max-message-batch-size" in { 60 | val persistenceId = UUID.randomUUID().toString 61 | val processorAtomic = system.actorOf(Props(classOf[ProcessorAtomic], persistenceId, self)) 62 | 63 | val N = 200 64 | 65 | processorAtomic ! (1 to N).map(n => s"a-$n").toList 66 | (1L to N).foreach { i => 67 | expectMsgAllOf(s"a-$i", i, false) 68 | } 69 | 70 | stopAndWaitUntilTerminated(processorAtomic) 71 | 72 | val testProbe = TestProbe() 73 | val processor2 = system.actorOf(Props(classOf[ProcessorAtomic], persistenceId, testProbe.ref)) 74 | (1L to N).foreach { i => 75 | testProbe.expectMsgAllOf(s"a-$i", i, true) 76 | } 77 | processor2 78 | } 79 | } 80 | } 81 | -------------------------------------------------------------------------------- /.github/workflows/publish.yml: -------------------------------------------------------------------------------- 1 | name: Publish 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | - release-* 8 | tags: ["v*"] 9 | 10 | permissions: 11 | contents: read 12 | 13 | jobs: 14 | release: 15 | # runs on main repo only 16 | if: github.event.repository.fork == false 17 | name: Release 18 | environment: release 19 | runs-on: Akka-Default 20 | env: 21 | JAVA_OPTS: -Xms2G -Xmx2G -Xss2M -XX:ReservedCodeCacheSize=256M -Dfile.encoding=UTF-8 22 | steps: 23 | - name: Checkout 24 | # https://github.com/actions/checkout/releases 25 | # v4.1.1 26 | uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 27 | with: 28 | # we don't know what commit the last tag was it's safer to get entire repo so previousStableVersion resolves 29 | fetch-depth: 0 30 | 31 | - name: Set up JDK 11 32 | # https://github.com/coursier/setup-action/releases 33 | # v1.3.5 34 | uses: coursier/setup-action@7bde40eee928896f074dbb76d22dd772eed5c65f 35 | with: 36 | jvm: temurin:1.11.0.17 37 | 38 | - name: Cache Coursier cache 39 | # https://github.com/coursier/cache-action/releases 40 | # v6.4.5 41 | uses: coursier/cache-action@1ff273bff02a8787bc9f1877d347948af647956d 42 | 43 | - name: Publish artifacts for all Scala versions 44 | env: 45 | PGP_PASSPHRASE: ${{ secrets.PGP_PASSPHRASE }} 46 | PGP_SECRET: ${{ secrets.PGP_SECRET }} 47 | PUBLISH_USER: ${{ secrets.PUBLISH_USER }} 48 | PUBLISH_PASSWORD: ${{ secrets.PUBLISH_PASSWORD }} 49 | run: sbt +publishSigned 50 | 51 | documentation: 52 | name: Documentation 53 | runs-on: Akka-Default 54 | if: github.event.repository.fork == false 55 | steps: 56 | - name: Checkout 57 | # https://github.com/actions/checkout/releases 58 | # v4.1.1 59 | uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 60 | with: 61 | # we don't know what commit the last tag was it's safer to get entire repo so previousStableVersion resolves 62 | fetch-depth: 0 63 | 64 | - name: Set up JDK 25 65 | # https://github.com/coursier/setup-action/releases 66 | # v1.3.5 67 | uses: coursier/setup-action@7bde40eee928896f074dbb76d22dd772eed5c65f 68 | with: 69 | jvm: temurin:1.25 70 | 71 | - name: Publish 72 | run: |- 73 | eval "$(ssh-agent -s)" 74 | echo $AKKA_RSYNC_GUSTAV | base64 -d > .github/id_rsa 75 | chmod 600 .github/id_rsa 76 | ssh-add .github/id_rsa 77 | sbt docs/publishRsync 78 | env: 79 | AKKA_RSYNC_GUSTAV: ${{ secrets.AKKA_RSYNC_GUSTAV }} 80 | 81 | -------------------------------------------------------------------------------- /example/src/main/scala/akka/persistence/cassandra/example/Main.scala: -------------------------------------------------------------------------------- 1 | package akka.persistence.cassandra.example 2 | 3 | import akka.actor.typed.{ ActorRef, ActorSystem } 4 | import akka.actor.typed.scaladsl.Behaviors 5 | import akka.cluster.sharding.typed.ShardingEnvelope 6 | import akka.cluster.typed.{ Cluster, SelfUp, Subscribe } 7 | import akka.management.cluster.bootstrap.ClusterBootstrap 8 | import akka.management.scaladsl.AkkaManagement 9 | import akka.persistence.cassandra.example.LoadGenerator.Start 10 | import akka.stream.alpakka.cassandra.scaladsl.CassandraSessionRegistry 11 | 12 | import scala.concurrent.Await 13 | import scala.concurrent.duration._ 14 | 15 | object Main { 16 | 17 | def main(args: Array[String]): Unit = { 18 | 19 | ActorSystem(Behaviors.setup[SelfUp] { 20 | ctx => 21 | val readSettings = ReadSide.Settings(ctx.system.settings.config.getConfig("cassandra.example")) 22 | val writeSettings = ConfigurablePersistentActor.Settings(readSettings.nrTags) 23 | val loadSettings = LoadGenerator.Settings(ctx.system.settings.config.getConfig("cassandra.example")) 24 | 25 | AkkaManagement(ctx.system).start() 26 | ClusterBootstrap(ctx.system).start() 27 | val cluster = Cluster(ctx.system) 28 | cluster.subscriptions ! Subscribe(ctx.self, classOf[SelfUp]) 29 | 30 | val topic = ReadSideTopic.init(ctx) 31 | 32 | if (cluster.selfMember.hasRole("read")) { 33 | val session = CassandraSessionRegistry(ctx.system).sessionFor("akka.persistence.cassandra") 34 | val offsetTableStmt = 35 | """ 36 | CREATE TABLE IF NOT EXISTS akka.offsetStore ( 37 | eventProcessorId text, 38 | tag text, 39 | timeUuidOffset timeuuid, 40 | PRIMARY KEY (eventProcessorId, tag) 41 | ) 42 | """ 43 | 44 | Await.ready(session.executeDDL(offsetTableStmt), 30.seconds) 45 | } 46 | 47 | Behaviors.receiveMessage { 48 | case SelfUp(state) => 49 | ctx.log.info( 50 | "Cluster member joined. Initializing persistent actors. Roles {}. Members {}", 51 | cluster.selfMember.roles, 52 | state.members) 53 | val ref = ConfigurablePersistentActor.init(writeSettings, ctx.system) 54 | if (cluster.selfMember.hasRole("read")) { 55 | ctx.spawnAnonymous(Reporter(topic)) 56 | } 57 | ReadSide(ctx.system, topic, readSettings) 58 | if (cluster.selfMember.hasRole("load")) { 59 | ctx.log.info("Starting load generation") 60 | val load = ctx.spawn(LoadGenerator(loadSettings, ref), "load-generator") 61 | load ! Start(10.seconds) 62 | } 63 | Behaviors.empty 64 | } 65 | }, "apc-example") 66 | } 67 | } 68 | -------------------------------------------------------------------------------- /core/src/main/scala/akka/persistence/cassandra/reconciler/BuildTagViewForPersistenceId.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2016-2025 Lightbend Inc. 3 | */ 4 | 5 | package akka.persistence.cassandra.reconciler 6 | 7 | import akka.actor.ActorSystem 8 | import akka.persistence.cassandra.PluginSettings 9 | import akka.Done 10 | import akka.persistence.cassandra.journal.TagWriter._ 11 | import scala.concurrent.duration._ 12 | import scala.concurrent.Future 13 | import akka.stream.scaladsl.Source 14 | import akka.actor.ExtendedActorSystem 15 | import akka.persistence.query.PersistenceQuery 16 | import akka.persistence.cassandra.query.scaladsl.CassandraReadJournal 17 | import akka.event.Logging 18 | import akka.persistence.cassandra.journal.CassandraTagRecovery 19 | import akka.persistence.cassandra.Extractors 20 | import akka.util.Timeout 21 | import akka.stream.{ OverflowStrategy } 22 | import akka.stream.scaladsl.Sink 23 | import akka.annotation.InternalApi 24 | import akka.serialization.SerializationExtension 25 | 26 | /** 27 | * INTERNAL API 28 | */ 29 | @InternalApi 30 | private[akka] final class BuildTagViewForPersisetceId( 31 | persistenceId: String, 32 | system: ActorSystem, 33 | recovery: CassandraTagRecovery, 34 | settings: PluginSettings) { 35 | 36 | import system.dispatcher 37 | 38 | private implicit val sys: ActorSystem = system 39 | private val log = Logging(system, classOf[BuildTagViewForPersisetceId]) 40 | private val serialization = SerializationExtension(system) 41 | 42 | private val queries: CassandraReadJournal = 43 | PersistenceQuery(system.asInstanceOf[ExtendedActorSystem]) 44 | .readJournalFor[CassandraReadJournal]("akka.persistence.cassandra.query") 45 | 46 | private implicit val flushTimeout: Timeout = Timeout(30.seconds) 47 | 48 | def reconcile(flushEvery: Int = 1000): Future[Done] = { 49 | 50 | val recoveryPrep = for { 51 | tp <- recovery.lookupTagProgress(persistenceId) 52 | _ <- recovery.setTagProgress(persistenceId, tp) 53 | } yield tp 54 | 55 | Source 56 | .futureSource(recoveryPrep.map((tp: Map[String, TagProgress]) => { 57 | log.debug("[{}] Rebuilding tag view table from: [{}]", persistenceId, tp) 58 | queries 59 | .eventsByPersistenceId( 60 | persistenceId, 61 | 0, 62 | Long.MaxValue, 63 | Long.MaxValue, 64 | None, 65 | settings.journalSettings.readProfile, 66 | "BuildTagViewForPersistenceId", 67 | extractor = Extractors.rawEvent(settings.eventsByTagSettings.bucketSize, serialization, system)) 68 | .map(recovery.sendMissingTagWriteRaw(tp, actorRunning = false)) 69 | .buffer(flushEvery, OverflowStrategy.backpressure) 70 | .mapAsync(1)(_ => recovery.flush(flushTimeout)) 71 | })) 72 | .runWith(Sink.ignore) 73 | 74 | } 75 | 76 | } 77 | -------------------------------------------------------------------------------- /core/src/test/scala/akka/persistence/cassandra/healthcheck/CassandraHealthCheckSpec.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2016-2025 Lightbend Inc. 3 | */ 4 | 5 | package akka.persistence.cassandra.healthcheck 6 | 7 | import akka.persistence.cassandra.{ CassandraLifecycle, CassandraSpec } 8 | 9 | class CassandraHealthCheckDefaultQuerySpec extends CassandraSpec with CassandraLifecycle { 10 | 11 | "CassandraHealthCheck" must { 12 | "reply with successful health check result when plugin uses default query" in { 13 | val healthCheckResult = new CassandraHealthCheck(system)() 14 | healthCheckResult.futureValue shouldBe true 15 | } 16 | } 17 | } 18 | 19 | class CassandraHealthCheckCustomQueryNonEmptyResultSpec extends CassandraSpec(s""" 20 | akka.persistence.cassandra.healthcheck.health-check-cql="SELECT * FROM system.peers" 21 | """) with CassandraLifecycle { 22 | 23 | override def beforeAll(): Unit = { 24 | super.beforeAll() 25 | cluster.execute("INSERT INTO system.peers(peer, data_center) VALUES ('10.0.0.1', 'cassandra-dc')") 26 | cluster.execute("INSERT INTO system.peers(peer, data_center) VALUES ('10.0.0.2', 'cassandra-dc')") 27 | } 28 | 29 | override def afterAll(): Unit = { 30 | cluster.execute("DELETE FROM system.peers WHERE peer = '10.0.0.1'") 31 | cluster.execute("DELETE FROM system.peers WHERE peer = '10.0.0.2'") 32 | super.afterAll() 33 | } 34 | 35 | "CassandraHealthCheck" must { 36 | "reply with successful health check result when plugin executes custom query and result is non-empty" in { 37 | val healthCheckResult = new CassandraHealthCheck(system)() 38 | healthCheckResult.futureValue shouldBe true 39 | } 40 | } 41 | } 42 | 43 | class CassandraHealthCheckCustomQueryEmptyResultSpec extends CassandraSpec(s""" 44 | akka.persistence.cassandra.healthcheck.health-check-cql="SELECT * FROM system.peers" 45 | """) with CassandraLifecycle { 46 | 47 | override def beforeAll(): Unit = { 48 | super.beforeAll() 49 | cluster.execute("TRUNCATE system.peers") 50 | } 51 | 52 | "CassandraHealthCheck" must { 53 | "reply with successful health check result when plugin executes custom query and result is empty" in { 54 | val healthCheckResult = new CassandraHealthCheck(system)() 55 | healthCheckResult.futureValue shouldBe true 56 | } 57 | } 58 | } 59 | 60 | class CassandraHealthCheckCustomFailingQuerySpec extends CassandraSpec(s""" 61 | akka.persistence.cassandra.healthcheck.health-check-cql="SELECT * FROM non_existing_keyspace.non_existing_table" 62 | """) with CassandraLifecycle { 63 | 64 | "CassandraHealthCheck" must { 65 | "reply with failed health check result when plugin executes custom query and it fails" in { 66 | val healthCheckResult = new CassandraHealthCheck(system)() 67 | healthCheckResult.futureValue shouldBe false 68 | } 69 | } 70 | } 71 | -------------------------------------------------------------------------------- /core/src/test/scala/akka/persistence/cassandra/journal/CassandraSerializationSpec.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2016-2025 Lightbend Inc. 3 | */ 4 | 5 | package akka.persistence.cassandra.journal 6 | 7 | import akka.actor.{ ExtendedActorSystem, Props } 8 | import akka.persistence.RecoveryCompleted 9 | import akka.persistence.cassandra.{ CassandraLifecycle, CassandraSpec, Persister } 10 | import akka.serialization.BaseSerializer 11 | import akka.testkit.TestProbe 12 | import com.typesafe.config.ConfigFactory 13 | 14 | object CassandraSerializationSpec { 15 | val config = ConfigFactory.parseString(s""" 16 | |akka.actor.serialize-messages=false 17 | |akka.actor.serializers.crap="akka.persistence.cassandra.journal.BrokenDeSerialization" 18 | |akka.actor.serialization-identifiers."akka.persistence.cassandra.journal.BrokenDeSerialization" = 666 19 | |akka.actor.serialization-bindings { 20 | | "akka.persistence.cassandra.Persister$$CrapEvent" = crap 21 | |} 22 | |akka.persistence.journal.max-deletion-batch-size = 3 23 | |akka.persistence.publish-confirmations = on 24 | |akka.persistence.publish-plugin-commands = on 25 | |akka.persistence.cassandra.journal.target-partition-size = 5 26 | |akka.persistence.cassandra.max-result-size = 3 27 | |akka.persistence.cassandra.journal.keyspace=CassandraIntegrationSpec 28 | |akka.persistence.cassandra.snapshot.keyspace=CassandraIntegrationSpecSnapshot 29 | | 30 | """.stripMargin).withFallback(CassandraLifecycle.config) 31 | 32 | } 33 | 34 | class BrokenDeSerialization(override val system: ExtendedActorSystem) extends BaseSerializer { 35 | override def includeManifest: Boolean = false 36 | override def toBinary(o: AnyRef): Array[Byte] = 37 | // I was serious with the class name 38 | Array.emptyByteArray 39 | override def fromBinary(bytes: Array[Byte], manifest: Option[Class[_]]): AnyRef = 40 | throw new RuntimeException("I can't deserialize a single thing") 41 | } 42 | 43 | class CassandraSerializationSpec extends CassandraSpec(CassandraSerializationSpec.config) { 44 | 45 | import akka.persistence.cassandra.Persister._ 46 | 47 | "A Cassandra journal" must { 48 | 49 | "Fail recovery when deserialization fails" in { 50 | val probe = TestProbe() 51 | val incarnation1 = system.actorOf(Props(new Persister("id1", probe.ref))) 52 | probe.expectMsgType[RecoveryCompleted] 53 | 54 | incarnation1 ! CrapEvent(1) 55 | probe.expectMsg(CrapEvent(1)) 56 | 57 | probe.watch(incarnation1) 58 | system.stop(incarnation1) 59 | probe.expectTerminated(incarnation1) 60 | 61 | val incarnation2 = system.actorOf(Props(new Persister("id1", probe.ref))) 62 | probe.expectMsgType[RuntimeException].getMessage shouldBe "I can't deserialize a single thing" 63 | incarnation2 64 | } 65 | 66 | } 67 | 68 | } 69 | -------------------------------------------------------------------------------- /core/src/main/scala/akka/persistence/cassandra/KeyspaceAndTableStatements.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2016-2025 Lightbend Inc. 3 | */ 4 | 5 | package akka.persistence.cassandra 6 | 7 | import scala.collection.immutable 8 | import scala.jdk.CollectionConverters._ 9 | 10 | import akka.actor.ClassicActorSystemProvider 11 | 12 | /** 13 | * Definitions of keyspace and table creation statements. 14 | */ 15 | class KeyspaceAndTableStatements( 16 | systemProvider: ClassicActorSystemProvider, 17 | configPath: String, 18 | settings: PluginSettings) 19 | extends CassandraStatements(settings) { 20 | 21 | /** 22 | * The Cassandra Statement that can be used to create the configured keyspace. 23 | * 24 | * This can be queried in for example a startup script without accessing the actual 25 | * Cassandra plugin actor. 26 | */ 27 | def createJournalKeyspaceStatement: String = 28 | journalStatements.createKeyspace 29 | 30 | /** 31 | * Scala API: The Cassandra statements that can be used to create the configured tables. 32 | * 33 | * This can be queried in for example a startup script without accessing the actual 34 | * Cassandra plugin actor. 35 | * 36 | */ 37 | def createJournalTablesStatements: immutable.Seq[String] = 38 | journalStatements.createTable :: 39 | journalStatements.createTagsTable :: 40 | journalStatements.createTagsProgressTable :: 41 | journalStatements.createTagScanningTable :: 42 | journalStatements.createMetadataTable :: 43 | journalStatements.createAllPersistenceIdsTable :: 44 | Nil 45 | 46 | /** 47 | * Java API: The Cassandra statements that can be used to create the configured tables. 48 | * 49 | * This can be queried in for example a startup script without accessing the actual 50 | * Cassandra plugin actor. 51 | */ 52 | def getCreateJournalTablesStatements: java.util.List[String] = createJournalTablesStatements.asJava 53 | 54 | /** 55 | * The Cassandra Statement that can be used to create the configured keyspace. 56 | * 57 | * This can be queried in for example a startup script without accessing the actual 58 | * Cassandra plugin actor. 59 | */ 60 | def createSnapshotKeyspaceStatement: String = 61 | snapshotStatements.createKeyspace 62 | 63 | /** 64 | * Scala API: The Cassandra statements that can be used to create the configured tables. 65 | * 66 | * This can be queried in for example a startup script without accessing the actual 67 | * Cassandra plugin actor. 68 | */ 69 | def createSnapshotTablesStatements: immutable.Seq[String] = 70 | snapshotStatements.createTable :: Nil 71 | 72 | /** 73 | * Java API: The Cassandra statements that can be used to create the configured tables. 74 | * 75 | * This can be queried in for example a startup script without accessing the actual 76 | * Cassandra plugin actor. 77 | */ 78 | def getCreateSnapshotTablesStatements: java.util.List[String] = createSnapshotTablesStatements.asJava 79 | 80 | } 81 | -------------------------------------------------------------------------------- /core/src/test/scala/akka/persistence/cassandra/query/CassandraQueryJournalOverrideSpec.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2016-2025 Lightbend Inc. 3 | */ 4 | 5 | package akka.persistence.cassandra.query 6 | 7 | import akka.actor.ExtendedActorSystem 8 | import akka.persistence.PersistentRepr 9 | import akka.persistence.cassandra.TestTaggingActor.Ack 10 | import akka.persistence.cassandra.query.scaladsl.CassandraReadJournal 11 | import akka.persistence.cassandra.{ CassandraLifecycle, CassandraSpec, TestTaggingActor } 12 | import akka.persistence.query.{ PersistenceQuery, ReadJournalProvider } 13 | import akka.stream.testkit.scaladsl.TestSink 14 | import com.typesafe.config.{ Config, ConfigFactory } 15 | 16 | import scala.concurrent.duration._ 17 | 18 | class JournalOverride(as: ExtendedActorSystem, config: Config, configPath: String) 19 | extends CassandraReadJournal(as, config, configPath) { 20 | override private[akka] def mapEvent(pr: PersistentRepr) = 21 | PersistentRepr("cat", pr.sequenceNr, pr.persistenceId, pr.manifest, pr.deleted, pr.sender, pr.writerUuid) 22 | } 23 | 24 | class JournalOverrideProvider(as: ExtendedActorSystem, config: Config, configPath: String) extends ReadJournalProvider { 25 | override def scaladslReadJournal() = new JournalOverride(as, config, configPath) 26 | override def javadslReadJournal() = null 27 | } 28 | 29 | object CassandraQueryJournalOverrideSpec { 30 | 31 | val config = ConfigFactory.parseString(""" 32 | akka.persistence.cassandra.query { 33 | class = "akka.persistence.cassandra.query.JournalOverrideProvider" 34 | } 35 | """.stripMargin).withFallback(CassandraLifecycle.config) 36 | 37 | } 38 | 39 | class CassandraQueryJournalOverrideSpec extends CassandraSpec(CassandraQueryJournalOverrideSpec.config) { 40 | 41 | lazy val journal = 42 | PersistenceQuery(system).readJournalFor[JournalOverride](CassandraReadJournal.Identifier) 43 | 44 | "Cassandra query journal override" must { 45 | "map events" in { 46 | val pid = "p1" 47 | val p1 = system.actorOf(TestTaggingActor.props(pid)) 48 | p1 ! "not a cat" 49 | expectMsg(Ack) 50 | 51 | val currentEvents = journal.currentEventsByPersistenceId(pid, 0, Long.MaxValue) 52 | val currentProbe = currentEvents.map(_.event.toString).runWith(TestSink.probe[String]) 53 | currentProbe.request(2) 54 | currentProbe.expectNext("cat") 55 | currentProbe.expectComplete() 56 | 57 | val liveEvents = journal.eventsByPersistenceId(pid, 0, Long.MaxValue) 58 | val liveProbe = liveEvents.map(_.event.toString).runWith(TestSink.probe[String]) 59 | liveProbe.request(2) 60 | liveProbe.expectNext("cat") 61 | liveProbe.expectNoMessage(100.millis) 62 | liveProbe.cancel() 63 | 64 | val internalEvents = journal.eventsByPersistenceIdWithControl(pid, 0, Long.MaxValue, None) 65 | val internalProbe = internalEvents.map(_.event.toString).runWith(TestSink.probe[String]) 66 | internalProbe.request(2) 67 | internalProbe.expectNext("cat") 68 | liveProbe.expectNoMessage(100.millis) 69 | liveProbe.cancel() 70 | } 71 | } 72 | } 73 | -------------------------------------------------------------------------------- /core/src/test/scala/akka/persistence/cassandra/query/javadsl/CassandraReadJournalSpec.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2016-2025 Lightbend Inc. 3 | */ 4 | 5 | package akka.persistence.cassandra.query.javadsl 6 | 7 | import akka.persistence.cassandra.query.{ javadsl, scaladsl, TestActor } 8 | import akka.persistence.cassandra.{ CassandraLifecycle, CassandraSpec } 9 | import akka.persistence.journal.{ Tagged, WriteEventAdapter } 10 | import akka.persistence.query.{ Offset, PersistenceQuery } 11 | import akka.stream.testkit.scaladsl.TestSink 12 | import com.typesafe.config.ConfigFactory 13 | 14 | import scala.concurrent.duration._ 15 | 16 | object CassandraReadJournalSpec { 17 | val config = ConfigFactory.parseString(s""" 18 | akka.actor.serialize-messages=off 19 | akka.persistence.cassandra.query.refresh-interval = 0.5s 20 | akka.persistence.cassandra.journal.event-adapters { 21 | test-tagger = akka.persistence.cassandra.query.javadsl.TestTagger 22 | } 23 | akka.persistence.cassandra.journal.event-adapter-bindings = { 24 | "java.lang.String" = test-tagger 25 | } 26 | datastax-java-driver.profiles.akka-persistence-cassandra-profile.basic.request.page-size = 10 27 | """).withFallback(CassandraLifecycle.config) 28 | } 29 | 30 | class TestTagger extends WriteEventAdapter { 31 | override def manifest(event: Any): String = "" 32 | override def toJournal(event: Any): Any = event match { 33 | case s: String if s.startsWith("a") => Tagged(event, Set("a")) 34 | case _ => event 35 | } 36 | } 37 | 38 | class CassandraReadJournalSpec extends CassandraSpec(CassandraReadJournalSpec.config) { 39 | 40 | lazy val javaQueries = PersistenceQuery(system) 41 | .getReadJournalFor(classOf[javadsl.CassandraReadJournal], scaladsl.CassandraReadJournal.Identifier) 42 | 43 | "Cassandra Read Journal Java API" must { 44 | "start eventsByPersistenceId query" in { 45 | val a = system.actorOf(TestActor.props("a")) 46 | a ! "a-1" 47 | expectMsg("a-1-done") 48 | 49 | val src = javaQueries.eventsByPersistenceId("a", 0L, Long.MaxValue) 50 | src.asScala.map(_.persistenceId).runWith(TestSink.probe[Any]).request(10).expectNext("a").cancel() 51 | } 52 | 53 | "start current eventsByPersistenceId query" in { 54 | val a = system.actorOf(TestActor.props("b")) 55 | a ! "b-1" 56 | expectMsg("b-1-done") 57 | 58 | val src = javaQueries.currentEventsByPersistenceId("b", 0L, Long.MaxValue) 59 | src.asScala.map(_.persistenceId).runWith(TestSink.probe[Any]).request(10).expectNext("b").expectComplete() 60 | } 61 | 62 | "start eventsByTag query" in { 63 | val src = javaQueries.eventsByTag("a", Offset.noOffset) 64 | src.asScala 65 | .map(_.persistenceId) 66 | .runWith(TestSink.probe[Any]) 67 | .request(10) 68 | .expectNext("a") 69 | .expectNoMessage(100.millis) 70 | .cancel() 71 | } 72 | 73 | "start current eventsByTag query" in { 74 | val src = javaQueries.currentEventsByTag("a", Offset.noOffset) 75 | src.asScala.map(_.persistenceId).runWith(TestSink.probe[Any]).request(10).expectNext("a").expectComplete() 76 | } 77 | } 78 | } 79 | -------------------------------------------------------------------------------- /core/src/main/scala/akka/persistence/cassandra/CassandraStatements.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2016-2025 Lightbend Inc. 3 | */ 4 | 5 | package akka.persistence.cassandra 6 | 7 | import scala.concurrent.ExecutionContext 8 | import scala.concurrent.Future 9 | 10 | import akka.Done 11 | import akka.annotation.InternalApi 12 | import akka.event.LoggingAdapter 13 | import akka.persistence.cassandra.journal.CassandraJournalStatements 14 | import akka.persistence.cassandra.snapshot.CassandraSnapshotStatements 15 | import com.datastax.oss.driver.api.core.CqlSession 16 | import com.datastax.oss.driver.api.core.cql.Row 17 | 18 | /** 19 | * INTERNAL API 20 | */ 21 | @InternalApi 22 | private[akka] class CassandraStatements(val settings: PluginSettings) { 23 | 24 | val journalStatements: CassandraJournalStatements = new CassandraJournalStatements(settings) 25 | 26 | val snapshotStatements: CassandraSnapshotStatements = new CassandraSnapshotStatements(settings.snapshotSettings) 27 | 28 | /** 29 | * Execute creation of keyspace and tables if that is enabled in config. 30 | * Avoid calling this from several threads at the same time to 31 | * reduce the risk of (annoying) "Column family ID mismatch" exception. 32 | * 33 | * Exceptions will be logged but will not fail the returned Future. 34 | */ 35 | def executeAllCreateKeyspaceAndTables(session: CqlSession, log: LoggingAdapter)( 36 | implicit ec: ExecutionContext): Future[Done] = { 37 | for { 38 | _ <- journalStatements.executeCreateKeyspaceAndTables(session, log) 39 | _ <- snapshotStatements.executeCreateKeyspaceAndTables(session, log) 40 | } yield Done 41 | } 42 | 43 | } 44 | 45 | /** 46 | * INTERNAL API: caching to avoid repeated check via ColumnDefinitions 47 | */ 48 | @InternalApi private[akka] class ColumnDefinitionCache { 49 | private def hasColumn(column: String, row: Row, cached: Option[Boolean], updateCache: Boolean => Unit): Boolean = { 50 | cached match { 51 | case Some(b) => b 52 | case None => 53 | val b = row.getColumnDefinitions.contains(column) 54 | updateCache(b) 55 | b 56 | } 57 | } 58 | 59 | @volatile private var _hasMetaColumns: Option[Boolean] = None 60 | private val updateMetaColumnsCache: Boolean => Unit = b => _hasMetaColumns = Some(b) 61 | def hasMetaColumns(row: Row): Boolean = 62 | hasColumn("meta", row, _hasMetaColumns, updateMetaColumnsCache) 63 | 64 | @volatile private var _hasOldTagsColumns: Option[Boolean] = None 65 | private val updateOldTagsColumnsCache: Boolean => Unit = b => _hasOldTagsColumns = Some(b) 66 | def hasOldTagsColumns(row: Row): Boolean = 67 | hasColumn("tag1", row, _hasOldTagsColumns, updateOldTagsColumnsCache) 68 | 69 | @volatile private var _hasTagsColumn: Option[Boolean] = None 70 | private val updateTagsColumnCache: Boolean => Unit = b => _hasTagsColumn = Some(b) 71 | def hasTagsColumn(row: Row): Boolean = 72 | hasColumn("tags", row, _hasTagsColumn, updateTagsColumnCache) 73 | 74 | @volatile private var _hasMessageColumn: Option[Boolean] = None 75 | private val updateMessageColumnCache: Boolean => Unit = b => _hasMessageColumn = Some(b) 76 | def hasMessageColumn(row: Row): Boolean = 77 | hasColumn("message", row, _hasMessageColumn, updateMessageColumnCache) 78 | } 79 | -------------------------------------------------------------------------------- /.github/workflows/check-build-test.yml: -------------------------------------------------------------------------------- 1 | name: CI 2 | 3 | on: 4 | pull_request: 5 | push: 6 | branches: 7 | - main 8 | tags-ignore: 9 | - v* 10 | 11 | permissions: 12 | contents: read 13 | 14 | jobs: 15 | style-compile: 16 | name: Compile, Code Style 17 | if: github.event.repository.fork == false 18 | runs-on: Akka-Default 19 | env: 20 | JAVA_OPTS: -Xms2G -Xmx2G -Xss2M -XX:ReservedCodeCacheSize=256M -Dfile.encoding=UTF-8 21 | 22 | steps: 23 | - name: Checkout 24 | # https://github.com/actions/checkout/releases 25 | # v4.1.1 26 | uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 27 | with: # https://github.com/olafurpg/setup-scala#faster-checkout-of-big-repos 28 | fetch-depth: 0 29 | 30 | - name: Fetch tags 31 | run: git fetch --depth=100 origin +refs/tags/*:refs/tags/* 32 | 33 | - name: Set up JDK 11 34 | # https://github.com/coursier/setup-action/releases 35 | # v1.3.5 36 | uses: coursier/setup-action@7bde40eee928896f074dbb76d22dd772eed5c65f 37 | with: 38 | jvm: temurin:1.11 39 | 40 | - name: Cache Coursier cache 41 | # https://github.com/coursier/cache-action/releases 42 | # v6.4.5 43 | uses: coursier/cache-action@1ff273bff02a8787bc9f1877d347948af647956d 44 | 45 | - name: "Code style, compile tests" 46 | run: sbt "verifyCodeStyle; +Test/compile" 47 | 48 | test: 49 | name: Test 50 | if: github.event.repository.fork == false 51 | runs-on: Akka-Default 52 | 53 | strategy: 54 | fail-fast: false 55 | matrix: 56 | include: 57 | - { jdk: 'temurin:1.11.0.17', container: "cassandra-latest", test: "test" } 58 | - { jdk: 'temurin:1.11.0.17', container: "cassandra2", test: "'testOnly -- -l RequiresCassandraThree'"} 59 | - { jdk: 'temurin:1.11.0.17', container: "cassandra3", test: "test" } 60 | - { jdk: 'temurin:1.25', container: "cassandra3", test: "test" } 61 | 62 | env: 63 | JAVA_OPTS: -Xms2G -Xmx2G -Xss2M -XX:ReservedCodeCacheSize=256M -Dfile.encoding=UTF-8 64 | 65 | steps: 66 | - name: Checkout 67 | # https://github.com/actions/checkout/releases 68 | # v4.1.1 69 | uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 70 | with: 71 | fetch-depth: 0 72 | 73 | - name: Checkout GitHub merge 74 | if: github.event.pull_request 75 | run: |- 76 | git fetch origin pull/${{ github.event.pull_request.number }}/merge:scratch 77 | git checkout scratch 78 | 79 | - name: Set up JDK ${{ matrix.jdk }} 80 | # https://github.com/coursier/setup-action/releases 81 | # v1.3.5 82 | uses: coursier/setup-action@7bde40eee928896f074dbb76d22dd772eed5c65f 83 | with: 84 | jvm: ${{ matrix.jdk }} 85 | 86 | - name: Cache Coursier cache 87 | # https://github.com/coursier/cache-action/releases 88 | # v6.4.5 89 | uses: coursier/cache-action@1ff273bff02a8787bc9f1877d347948af647956d 90 | 91 | - name: Test against ${{ matrix.container }} / ${{ matrix.jdk }} 92 | run: |- 93 | docker compose up -d ${{ matrix.container }} && sbt ${{matrix.test}} 94 | -------------------------------------------------------------------------------- /core/src/main/scala/akka/persistence/cassandra/compaction/BaseCompactionStrategy.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2016-2025 Lightbend Inc. 3 | */ 4 | 5 | package akka.persistence.cassandra.compaction 6 | 7 | import com.typesafe.config.{ Config, ConfigFactory } 8 | 9 | import scala.jdk.CollectionConverters._ 10 | 11 | /* 12 | * Based upon https://github.com/apache/cassandra/blob/cassandra-2.2/src/java/org/apache/cassandra/db/compaction/AbstractCompactionStrategy.java 13 | */ 14 | abstract class BaseCompactionStrategy(config: Config, className: String, propertyKeys: List[String]) 15 | extends CassandraCompactionStrategy { 16 | require(config.hasPath("class") && config.getString("class") == className, s"Config does not specify a $className") 17 | require( 18 | config.entrySet().asScala.map(_.getKey).forall(propertyKeys.contains(_)), 19 | s"Config contains properties not supported by a $className. Supported: $propertyKeys. Supplied: ${config.entrySet().asScala.map(_.getKey)}") 20 | 21 | val enabled: Boolean = 22 | if (config.hasPath("enabled")) config.getBoolean("enabled") else true 23 | val tombstoneCompactionInterval: Long = 24 | if (config.hasPath("tombstone_compaction_interval")) 25 | config.getLong("tombstone_compaction_interval") 26 | else 86400 27 | val tombstoneThreshold: Double = 28 | if (config.hasPath("tombstone_threshold")) 29 | config.getDouble("tombstone_threshold") 30 | else 0.2 31 | val uncheckedTombstoneCompaction: Boolean = 32 | if (config.hasPath("unchecked_tombstone_compaction")) 33 | config.getBoolean("unchecked_tombstone_compaction") 34 | else false 35 | 36 | require( 37 | tombstoneCompactionInterval > 0, 38 | s"tombstone_compaction_interval must be greater than 0, but was $tombstoneCompactionInterval") 39 | require(tombstoneThreshold > 0, s"tombstone_threshold must be greater than 0, but was $tombstoneThreshold") 40 | 41 | override def asCQL: String = 42 | s"""'enabled' : $enabled, 43 | |'tombstone_compaction_interval' : $tombstoneCompactionInterval, 44 | |'tombstone_threshold' : $tombstoneThreshold, 45 | |'unchecked_tombstone_compaction' : $uncheckedTombstoneCompaction 46 | """.stripMargin.trim 47 | } 48 | 49 | object BaseCompactionStrategy extends CassandraCompactionStrategyConfig[BaseCompactionStrategy] { 50 | override val ClassName: String = "BaseCompactionStrategy" 51 | 52 | override def propertyKeys: List[String] = 53 | List("class", "enabled", "tombstone_compaction_interval", "tombstone_threshold", "unchecked_tombstone_compaction") 54 | 55 | override def fromConfig(config: Config): BaseCompactionStrategy = { 56 | val className = 57 | if (config.hasPath("class")) config.getString("class") else "" 58 | 59 | className match { 60 | case TimeWindowCompactionStrategy.ClassName => 61 | TimeWindowCompactionStrategy.fromConfig(config) 62 | case LeveledCompactionStrategy.ClassName => 63 | LeveledCompactionStrategy.fromConfig(config) 64 | case SizeTieredCompactionStrategy.ClassName => 65 | SizeTieredCompactionStrategy.fromConfig(config) 66 | case _ => 67 | SizeTieredCompactionStrategy.fromConfig(ConfigFactory.parseString(s""" 68 | |class = "${SizeTieredCompactionStrategy.ClassName}" 69 | """.stripMargin.trim)) 70 | } 71 | } 72 | } 73 | -------------------------------------------------------------------------------- /project/Dependencies.scala: -------------------------------------------------------------------------------- 1 | import sbt._ 2 | import Keys._ 3 | 4 | object Dependencies { 5 | // Java Platform version for JavaDoc creation 6 | lazy val JavaDocLinkVersion = scala.util.Properties.javaSpecVersion 7 | 8 | val Scala213 = "2.13.17" 9 | val Scala3 = "3.3.7" 10 | 11 | val Scala2Versions = Seq(Scala213) 12 | val ScalaVersions = Dependencies.Scala2Versions :+ Dependencies.Scala3 13 | 14 | val AkkaVersion = System.getProperty("override.akka.version", "2.10.11") 15 | val AkkaVersionInDocs = VersionNumber(AkkaVersion).numbers match { case Seq(major, minor, _*) => s"$major.$minor" } 16 | val CassandraVersionInDocs = "4.0" 17 | // Should be sync with the version of the driver in Alpakka Cassandra 18 | val CassandraDriverVersion = "4.19.1" 19 | val DriverVersionInDocs = "4.14" 20 | 21 | val AlpakkaVersion = "10.0.0" 22 | val AlpakkaVersionInDocs = "10.0" 23 | // for example 24 | val AkkaManagementVersion = "1.6.4" 25 | 26 | val Logback = "ch.qos.logback" % "logback-classic" % "1.5.18" 27 | 28 | val reconcilerDependencies = Seq( 29 | "com.typesafe.akka" %% "akka-actor-testkit-typed" % AkkaVersion % Test, 30 | "com.typesafe.akka" %% "akka-stream-testkit" % AkkaVersion % Test) 31 | 32 | val akkaTestDeps = Seq( 33 | "com.typesafe.akka" %% "akka-persistence", 34 | "com.typesafe.akka" %% "akka-persistence-typed", 35 | "com.typesafe.akka" %% "akka-persistence-query", 36 | "com.typesafe.akka" %% "akka-cluster-typed", 37 | "com.typesafe.akka" %% "akka-actor-testkit-typed", 38 | "com.typesafe.akka" %% "akka-persistence-tck", 39 | "com.typesafe.akka" %% "akka-stream-testkit", 40 | "com.typesafe.akka" %% "akka-multi-node-testkit", 41 | "com.typesafe.akka" %% "akka-cluster-sharding") 42 | 43 | val akkaPersistenceCassandraDependencies = Seq( 44 | "com.lightbend.akka" %% "akka-stream-alpakka-cassandra" % AlpakkaVersion, 45 | "com.typesafe.akka" %% "akka-persistence" % AkkaVersion, 46 | "com.typesafe.akka" %% "akka-persistence-query" % AkkaVersion, 47 | "com.typesafe.akka" %% "akka-cluster-tools" % AkkaVersion, 48 | "org.apache.cassandra" % "java-driver-core" % CassandraDriverVersion, 49 | Logback % Test, 50 | "org.scalatest" %% "scalatest" % "3.2.19" % Test) ++ akkaTestDeps.map(_ % AkkaVersion % Test) 51 | 52 | val exampleDependencies = Seq( 53 | Logback, 54 | "com.typesafe.akka" %% "akka-persistence-typed" % AkkaVersion, 55 | "com.typesafe.akka" %% "akka-discovery" % AkkaVersion, 56 | "com.typesafe.akka" %% "akka-serialization-jackson" % AkkaVersion, 57 | "com.typesafe.akka" %% "akka-cluster-sharding-typed" % AkkaVersion, 58 | "com.lightbend.akka.management" %% "akka-management" % AkkaManagementVersion, 59 | "com.lightbend.akka.management" %% "akka-management-cluster-bootstrap" % AkkaManagementVersion, 60 | "com.lightbend.akka.management" %% "akka-management-cluster-http" % AkkaManagementVersion, 61 | "com.lightbend.akka.discovery" %% "akka-discovery-kubernetes-api" % AkkaManagementVersion, 62 | "org.hdrhistogram" % "HdrHistogram" % "2.1.12") 63 | 64 | val dseTestDependencies = Seq( 65 | "com.datastax.dse" % "dse-java-driver-core" % "2.3.0" % Test, 66 | "com.typesafe.akka" %% "akka-persistence-tck" % AkkaVersion % Test, 67 | "com.typesafe.akka" %% "akka-actor-testkit-typed" % AkkaVersion % Test, 68 | "com.typesafe.akka" %% "akka-stream-testkit" % AkkaVersion % Test, 69 | Logback % Test) 70 | } 71 | -------------------------------------------------------------------------------- /core/src/test/scala/akka/persistence/cassandra/query/scaladsl/CassandraReadJournalSpec.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2016-2025 Lightbend Inc. 3 | */ 4 | 5 | package akka.persistence.cassandra.query.scaladsl 6 | 7 | import akka.persistence.cassandra.query.TestActor 8 | import akka.persistence.cassandra.{ CassandraLifecycle, CassandraSpec } 9 | import akka.persistence.journal.{ Tagged, WriteEventAdapter } 10 | import akka.persistence.query.NoOffset 11 | import akka.stream.alpakka.cassandra.CassandraMetricsRegistry 12 | import akka.stream.testkit.scaladsl.TestSink 13 | import com.typesafe.config.ConfigFactory 14 | 15 | import scala.concurrent.duration._ 16 | 17 | object CassandraReadJournalSpec { 18 | val config = ConfigFactory.parseString(s""" 19 | akka.actor.serialize-messages=off 20 | akka.persistence.cassandra.query.refresh-interval = 0.5s 21 | akka.persistence.cassandra.journal.event-adapters { 22 | test-tagger = akka.persistence.cassandra.query.scaladsl.TestTagger 23 | } 24 | akka.persistence.cassandra.journal.event-adapter-bindings = { 25 | "java.lang.String" = test-tagger 26 | } 27 | akka.persistence.cassandra.log-queries = off 28 | datastax-java-driver.profiles.akka-persistence-cassandra-profile.basic.request.page-size = 10 29 | """).withFallback(CassandraLifecycle.config) 30 | } 31 | 32 | class TestTagger extends WriteEventAdapter { 33 | override def manifest(event: Any): String = "" 34 | override def toJournal(event: Any): Any = event match { 35 | case s: String if s.startsWith("a") => 36 | Tagged(event, Set("a")) 37 | case _ => 38 | event 39 | } 40 | } 41 | 42 | class CassandraReadJournalSpec extends CassandraSpec(CassandraReadJournalSpec.config) { 43 | 44 | "Cassandra Read Journal Scala API" must { 45 | "start eventsByPersistenceId query" in { 46 | val a = system.actorOf(TestActor.props("a")) 47 | a ! "a-1" 48 | expectMsg("a-1-done") 49 | 50 | val src = queries.eventsByPersistenceId("a", 0L, Long.MaxValue) 51 | src.map(_.persistenceId).runWith(TestSink.probe[Any]).request(10).expectNext("a").cancel() 52 | } 53 | 54 | "start current eventsByPersistenceId query" in { 55 | val a = system.actorOf(TestActor.props("b")) 56 | a ! "b-1" 57 | expectMsg("b-1-done") 58 | 59 | val src = queries.currentEventsByPersistenceId("b", 0L, Long.MaxValue) 60 | src.map(_.persistenceId).runWith(TestSink.probe[Any]).request(10).expectNext("b").expectComplete() 61 | } 62 | 63 | // these tests rely on events written in previous tests 64 | "start eventsByTag query" in { 65 | val src = queries.eventsByTag("a", NoOffset) 66 | src 67 | .map(_.persistenceId) 68 | .runWith(TestSink.probe[Any]) 69 | .request(10) 70 | .expectNext("a") 71 | .expectNoMessage(100.millis) 72 | .cancel() 73 | } 74 | 75 | "start current eventsByTag query" in { 76 | val src = queries.currentEventsByTag("a", NoOffset) 77 | src.map(_.persistenceId).runWith(TestSink.probe[Any]).request(10).expectNext("a").expectComplete() 78 | } 79 | 80 | "insert Cassandra metrics to Cassandra Metrics Registry" in { 81 | val registry = CassandraMetricsRegistry(system).getRegistry 82 | val snapshots = 83 | registry.getNames.toArray().filter(value => value.toString.startsWith("akka.persistence.cassandra")) 84 | snapshots.length should be > 0 85 | } 86 | } 87 | } 88 | -------------------------------------------------------------------------------- /docs/src/main/paradox/snapshots.md: -------------------------------------------------------------------------------- 1 | # Snapshots 2 | 3 | ## Features 4 | 5 | - Implements the Akka Persistence @extref:[snapshot store plugin API](akka:persistence-journals.html#snapshot-store-plugin-api). 6 | 7 | ## Schema 8 | 9 | The keyspace and tables needs to be created before using the plugin. 10 | 11 | @@@ warning 12 | 13 | Auto creation of the keyspace and tables 14 | is included as a development convenience and should never be used in production. Cassandra does not handle 15 | concurrent schema migrations well and if every Akka node tries to create the schema at the same time you'll 16 | get column id mismatch errors in Cassandra. 17 | 18 | @@@ 19 | 20 | The default keyspace used by the plugin is `akka_snapshot`, it should be created with the 21 | NetworkTopology replication strategy with a replication factor of at least 3: 22 | 23 | ``` 24 | CREATE KEYSPACE IF NOT EXISTS akka_snapshot WITH replication = {'class': 'NetworkTopologyStrategy', '' : 3 }; 25 | ``` 26 | 27 | For local testing, and the default if you enable `akka.persistence.cassandra.snapshot.keyspace-autocreate` you can use the following: 28 | 29 | @@snip [snapshot-keyspace](/target/snapshot-keyspace.txt) { #snapshot-keyspace } 30 | 31 | A single table is required. This needs to be created before starting your application. 32 | For local testing you can enable `akka.persistence.cassandra.snapshot.tables-autocreate`. 33 | The default table definitions look like this: 34 | 35 | @@snip [snapshot-tables](/target/snapshot-tables.txt) { #snapshot-tables} 36 | 37 | ### Consistency 38 | 39 | By default, the snapshot store uses `ONE` for all reads and writes, since snapshots 40 | should only be used as an optimization to reduce number of replayed events. 41 | If a recovery doesn't see the latest snapshot it will just start from an older snapshot 42 | and replay events from there. Be careful to not delete events too eagerly after storing 43 | snapshots since the deletes may be visible before the snapshot is visible. Keep a few 44 | snapshots and corresponding events before deleting older events and snapshots. 45 | 46 | The consistency level for snapshots can be changed with: 47 | 48 | ``` 49 | datastax-java-driver.profiles { 50 | akka-persistence-cassandra-snapshot-profile { 51 | basic.request.consistency = QUORUM 52 | } 53 | } 54 | ``` 55 | 56 | ## Configuration 57 | 58 | To activate the snapshot-store plugin, add the following line to your Akka `application.conf`: 59 | 60 | akka.persistence.snapshot-store.plugin = "akka.persistence.cassandra.snapshot" 61 | 62 | This will run the snapshot store with its default settings. The default settings can be changed with the configuration 63 | properties defined in @ref:[reference.conf](configuration.md#default-configuration). Journal configuration is under 64 | `akka.persistence.cassandra.snapshot`. 65 | 66 | ## Limitations 67 | 68 | The snapshot is stored in a single row so the maximum size of a serialized snapshot is the Cassandra configured 69 | [`max_mutation_size_in_kb`](https://cassandra.apache.org/doc/latest/faq/index.html#can-large-blob) which is 16MB by default. 70 | 71 | ## Delete all snapshots 72 | 73 | The @apidoc[akka.persistence.cassandra.cleanup.Cleanup] tool can be used for deleting all events and/or snapshots 74 | given list of `persistenceIds` without using persistent actors. It's important that the actors with corresponding 75 | `persistenceId` are not running at the same time as using the tool. See @ref[Database Cleanup](./cleanup.md) for more details. 76 | -------------------------------------------------------------------------------- /docs/release-train-issue-template.md: -------------------------------------------------------------------------------- 1 | Release Akka Persistence Cassandra $VERSION$ 2 | 3 | 13 | 14 | ### Cutting the release 15 | 16 | - [ ] Check that open PRs and issues assigned to the milestone are reasonable 17 | - [ ] Update the Change date and version in the LICENSE file 18 | - [ ] Create a new milestone for the [next version](https://github.com/akka/akka-persistence-cassandra/milestones) 19 | - [ ] Close the [$VERSION$ milestone](https://github.com/akka/akka-persistence-cassandra/milestones?direction=asc&sort=due_date) 20 | - [ ] Make sure all important PRs have been merged 21 | - [ ] Wait until [main build finished](https://github.com/akka/akka-persistence-cassandra/actions) after merging the latest PR 22 | - [ ] Update the [draft release](https://github.com/akka/akka-persistence-cassandra/releases) with the next tag version `v$VERSION$`, title and release description. Use the `Publish release` button, which will create the tag. 23 | - [ ] Check that GitHub Actions release build has executed successfully (GitHub Actions will start a [CI build](https://github.com/akka/akka-persistence-cassandra/actions) for the new tag and publish artifacts to https://repo.akka.io/maven) 24 | 25 | ### Check availability 26 | 27 | - [ ] Check [API](https://doc.akka.io/api/akka-persistence-cassandra/$VERSION$/) documentation 28 | - [ ] Check [reference](https://doc.akka.io/libraries/akka-persistence-cassandra/$VERSION$/) documentation. Check that the reference docs were deployed and show a version warning (see section below on how to fix the version warning). 29 | - [ ] Check the release on https://repo.akka.io/maven/com/typesafe/akka/akka-persistence-cassandra_2.13/$VERSION$/akka-persistence-cassandra_2.13-$VERSION$.pom 30 | 31 | ### When everything is on https://repo.akka.io/maven 32 | - [ ] Log into `gustav.akka.io` as `akkarepo` 33 | - [ ] If this updates the `current` version, run `./update-akka-persistence-cassandra-current-version.sh $VERSION$` 34 | - [ ] otherwise check changes and commit the new version to the local git repository 35 | ``` 36 | cd ~/www 37 | git status 38 | git add libraries/akka-persistence-cassandra/current libraries/akka-persistence-cassandra/$VERSION$ 39 | git add api/akka-persistence-cassandra/current api/akka-persistence-cassandra/$VERSION$ 40 | git commit -m "Akka Persistence Cassandra $VERSION$" 41 | ``` 42 | 43 | ### Announcements 44 | 45 | For important patch releases, and only if critical issues have been fixed: 46 | 47 | - [ ] Send a release notification to [Lightbend discuss](https://discuss.akka.io) 48 | - [ ] Tweet using the [@akkateam](https://twitter.com/akkateam/) account (or ask someone to) about the new release 49 | - [ ] Announce internally (with links to Tweet, discuss) 50 | 51 | For minor or major releases: 52 | 53 | - [ ] Include noteworthy features and improvements in Akka umbrella release announcement at akka.io. Coordinate with PM and marketing. 54 | 55 | ### Afterwards 56 | 57 | - [ ] Update [akka-dependencies bom](https://github.com/lightbend/akka-dependencies) and version for [Akka module versions](https://doc.akka.io/libraries/akka-dependencies/current/) in [akka-dependencies repo](https://github.com/akka/akka-dependencies) 58 | - [ ] Update [Akka Guide samples](https://github.com/lightbend/akka-guide) 59 | - Close this issue 60 | -------------------------------------------------------------------------------- /core/src/test/scala/akka/persistence/cassandra/journal/CassandraEventUpdateSpec.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2016-2025 Lightbend Inc. 3 | */ 4 | 5 | package akka.persistence.cassandra.journal 6 | 7 | import java.util.UUID 8 | 9 | import scala.concurrent.Await 10 | import akka.Done 11 | import akka.event.Logging 12 | import akka.persistence.PersistentRepr 13 | import akka.persistence.cassandra.journal.CassandraJournal.Serialized 14 | import akka.persistence.cassandra.{ CassandraLifecycle, CassandraSpec, TestTaggingActor, _ } 15 | import akka.serialization.SerializationExtension 16 | import com.typesafe.config.ConfigFactory 17 | 18 | import scala.concurrent.ExecutionContext 19 | import scala.concurrent.Future 20 | import akka.actor.ExtendedActorSystem 21 | import akka.stream.alpakka.cassandra.CqlSessionProvider 22 | import akka.stream.alpakka.cassandra.scaladsl.CassandraSession 23 | 24 | object CassandraEventUpdateSpec { 25 | val config = ConfigFactory.parseString(""" 26 | """).withFallback(CassandraLifecycle.config) 27 | } 28 | 29 | class CassandraEventUpdateSpec extends CassandraSpec(CassandraEventUpdateSpec.config) { s => 30 | 31 | private[akka] val log = Logging(system, classOf[CassandraEventUpdateSpec]) 32 | private val serialization = SerializationExtension(system) 33 | 34 | val updater = new CassandraEventUpdate { 35 | 36 | override private[akka] val log = s.log 37 | override private[akka] def settings: PluginSettings = 38 | PluginSettings(system) 39 | override private[akka] implicit val ec: ExecutionContext = system.dispatcher 40 | // use separate session, not shared via CassandraSessionRegistry because init is different 41 | private val sessionProvider = 42 | CqlSessionProvider( 43 | system.asInstanceOf[ExtendedActorSystem], 44 | system.settings.config.getConfig(PluginSettings.DefaultConfigPath)) 45 | override private[akka] val session: CassandraSession = 46 | new CassandraSession( 47 | system, 48 | sessionProvider, 49 | ec, 50 | log, 51 | systemName, 52 | init = _ => Future.successful(Done), 53 | onClose = () => ()) 54 | } 55 | 56 | "CassandraEventUpdate" must { 57 | "update the event in messages" in { 58 | val pid = nextPid 59 | val a = system.actorOf(TestTaggingActor.props(pid)) 60 | a ! "e-1" 61 | expectMsgType[TestTaggingActor.Ack.type] 62 | val eventsBefore = events(pid) 63 | eventsBefore.map(_.pr.payload) shouldEqual Seq("e-1") 64 | val originalEvent = eventsBefore.head 65 | val modifiedEvent = serialize(originalEvent.pr.withPayload("secrets"), originalEvent.offset, Set("ignored")) 66 | 67 | updater.updateEvent(modifiedEvent).futureValue shouldEqual Done 68 | 69 | eventPayloadsWithTags(pid) shouldEqual Seq(("secrets", Set())) 70 | } 71 | 72 | "update the event in tag_views" in { 73 | val pid = nextPid 74 | val b = system.actorOf(TestTaggingActor.props(pid, Set("red", "blue"))) 75 | b ! "e-1" 76 | expectMsgType[TestTaggingActor.Ack.type] 77 | val eventsBefore = events(pid).head 78 | val modifiedEvent = serialize(eventsBefore.pr.withPayload("hidden"), eventsBefore.offset, Set("ignored")) 79 | 80 | expectEventsForTag(tag = "red", "e-1") 81 | expectEventsForTag(tag = "blue", "e-1") 82 | 83 | updater.updateEvent(modifiedEvent).futureValue shouldEqual Done 84 | 85 | expectEventsForTag(tag = "red", "hidden") 86 | expectEventsForTag(tag = "blue", "hidden") 87 | } 88 | 89 | def serialize(pr: PersistentRepr, offset: UUID, tags: Set[String]): Serialized = { 90 | import system.dispatcher 91 | Await.result(serializeEvent(pr, tags, offset, Hour, serialization, system), remainingOrDefault) 92 | } 93 | } 94 | } 95 | -------------------------------------------------------------------------------- /core/src/test/scala/akka/persistence/cassandra/sharding/ClusterShardingQuickTerminationSpec.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2016-2025 Lightbend Inc. 3 | */ 4 | 5 | package akka.persistence.cassandra.sharding 6 | 7 | import akka.actor.{ ActorLogging, ActorRef, Props, ReceiveTimeout } 8 | import akka.cluster.{ Cluster, MemberStatus } 9 | import akka.cluster.sharding.{ ClusterSharding, ClusterShardingSettings, ShardRegion } 10 | import akka.persistence.PersistentActor 11 | import akka.persistence.cassandra.CassandraSpec 12 | import akka.testkit.TestProbe 13 | 14 | import scala.concurrent.duration._ 15 | 16 | object ClusterShardingQuickTerminationSpec { 17 | 18 | case object Increment 19 | case object Decrement 20 | final case class Get(counterId: Long) 21 | final case class EntityEnvelope(id: Long, payload: Any) 22 | case object Ack 23 | 24 | case object Stop 25 | final case class CounterChanged(delta: Int) 26 | 27 | class Counter extends PersistentActor with ActorLogging { 28 | import ShardRegion.Passivate 29 | 30 | context.setReceiveTimeout(5.seconds) 31 | 32 | // self.path.name is the entity identifier (utf-8 URL-encoded) 33 | override def persistenceId: String = "Counter-" + self.path.name 34 | 35 | var count = 0 36 | 37 | def updateState(event: CounterChanged): Unit = 38 | count += event.delta 39 | 40 | override def receiveRecover: Receive = { 41 | case evt: CounterChanged => updateState(evt) 42 | case other => log.debug("Other: {}", other) 43 | } 44 | 45 | override def receiveCommand: Receive = { 46 | case Increment => persist(CounterChanged(+1))(updateState) 47 | case Decrement => persist(CounterChanged(-1))(updateState) 48 | case Get(_) => sender() ! count 49 | case ReceiveTimeout => context.parent ! Passivate(stopMessage = Stop) 50 | case Stop => 51 | sender() ! Ack 52 | context.stop(self) 53 | } 54 | } 55 | val extractEntityId: ShardRegion.ExtractEntityId = { 56 | case EntityEnvelope(id, payload) => (id.toString, payload) 57 | case msg @ Get(id) => (id.toString, msg) 58 | } 59 | 60 | val numberOfShards = 100 61 | 62 | val extractShardId: ShardRegion.ExtractShardId = { 63 | case EntityEnvelope(id, _) => (id % numberOfShards).toString 64 | case Get(id) => (id % numberOfShards).toString 65 | } 66 | 67 | } 68 | 69 | class ClusterShardingQuickTerminationSpec extends CassandraSpec(""" 70 | akka.actor.provider = cluster 71 | """.stripMargin) { 72 | 73 | import ClusterShardingQuickTerminationSpec._ 74 | 75 | "Cassandra Plugin with Cluster Sharding" must { 76 | "clear state if persistent actor shuts down" in { 77 | Cluster(system).join(Cluster(system).selfMember.address) 78 | awaitAssert { 79 | Cluster(system).selfMember.status shouldEqual MemberStatus.Up 80 | } 81 | ClusterSharding(system).start( 82 | typeName = "tagging", 83 | entityProps = Props[Counter](), 84 | settings = ClusterShardingSettings(system), 85 | extractEntityId = extractEntityId, 86 | extractShardId = extractShardId) 87 | 88 | (0 to 100).foreach { i => 89 | val counterRegion: ActorRef = ClusterSharding(system).shardRegion("tagging") 90 | awaitAssert { 91 | val sender = TestProbe() 92 | counterRegion.tell(Get(123), sender.ref) 93 | sender.expectMsg(500.millis, i) 94 | } 95 | 96 | counterRegion ! EntityEnvelope(123, Increment) 97 | counterRegion ! Get(123) 98 | expectMsg(i + 1) 99 | 100 | counterRegion ! EntityEnvelope(123, Stop) 101 | expectMsg(Ack) 102 | } 103 | } 104 | } 105 | } 106 | -------------------------------------------------------------------------------- /example/src/main/scala/akka/persistence/cassandra/example/ReadSide.scala: -------------------------------------------------------------------------------- 1 | package akka.persistence.cassandra.example 2 | 3 | import akka.actor.typed.pubsub.Topic 4 | import akka.actor.typed.scaladsl.Behaviors 5 | import akka.actor.typed.{ ActorRef, ActorSystem, Behavior, PostStop } 6 | import akka.cluster.sharding.typed.{ ClusterShardingSettings, ShardedDaemonProcessSettings } 7 | import akka.cluster.sharding.typed.scaladsl.ShardedDaemonProcess 8 | import akka.stream.{ KillSwitches, SharedKillSwitch } 9 | import com.typesafe.config.Config 10 | import org.HdrHistogram.Histogram 11 | import scala.concurrent.duration._ 12 | 13 | object ReadSide { 14 | 15 | sealed trait Command 16 | private case object ReportMetrics extends Command 17 | 18 | object Settings { 19 | def apply(config: Config): Settings = 20 | Settings(config.getInt("processors"), config.getInt("tags-per-processor")) 21 | } 22 | 23 | case class Settings(nrProcessors: Int, tagsPerProcessor: Int) { 24 | val nrTags: Int = nrProcessors * tagsPerProcessor 25 | } 26 | 27 | def apply( 28 | system: ActorSystem[_], 29 | topic: ActorRef[Topic.Command[ReadSideTopic.ReadSideMetrics]], 30 | settings: Settings): Unit = { 31 | system.log.info("Running {} processors", settings.nrProcessors) 32 | val killSwitch: SharedKillSwitch = KillSwitches.shared("eventProcessorSwitch") 33 | ShardedDaemonProcess(system).init( 34 | "tag-processor", 35 | settings.nrProcessors - 1, // bug that creates +1 processor FIXME remove in 2.6.5 36 | i => behavior(topic, i, settings, killSwitch), 37 | ShardedDaemonProcessSettings(system).withShardingSettings(ClusterShardingSettings(system).withRole("read")), 38 | None) 39 | } 40 | 41 | private def behavior( 42 | topic: ActorRef[Topic.Command[ReadSideTopic.ReadSideMetrics]], 43 | nr: Int, 44 | settings: Settings, 45 | killSwitch: SharedKillSwitch): Behavior[Command] = 46 | Behaviors.withTimers { timers => 47 | timers.startTimerAtFixedRate(ReportMetrics, 10.second) 48 | Behaviors.setup { ctx => 49 | val start = (settings.tagsPerProcessor * nr) 50 | val end = start + (settings.tagsPerProcessor) - 1 51 | val tags = (start to end).map(i => s"tag-$i") 52 | ctx.log.info("Processor {} processing tags {}", nr, tags) 53 | // milliseconds, highest value = 1 minute 54 | val histogram = new Histogram(10 * 1000 * 60, 2) 55 | // maybe easier to just have these as different actors 56 | // my thinking is we can start with a large number of tags and scale out 57 | // read side processors later 58 | // having more tags will also increase write throughput/latency as it'll write to 59 | // many partitions 60 | // downside is running many streams/queries against c* 61 | tags.foreach( 62 | tag => 63 | new EventProcessorStream[ConfigurablePersistentActor.Event]( 64 | ctx.system, 65 | ctx.executionContext, 66 | s"processor-$nr", 67 | tag).runQueryStream(killSwitch, histogram)) 68 | 69 | Behaviors 70 | .receiveMessage[Command] { 71 | case ReportMetrics => 72 | if (histogram.getTotalCount > 0) { 73 | topic ! Topic.Publish( 74 | ReadSideTopic.ReadSideMetrics( 75 | histogram.getTotalCount, 76 | histogram.getMaxValue, 77 | histogram.getValueAtPercentile(99), 78 | histogram.getValueAtPercentile(50))) 79 | histogram.reset() 80 | } 81 | Behaviors.same 82 | } 83 | .receiveSignal { 84 | case (_, PostStop) => 85 | killSwitch.shutdown() 86 | Behaviors.same 87 | } 88 | } 89 | } 90 | 91 | } 92 | -------------------------------------------------------------------------------- /docs/src/main/paradox/overview.md: -------------------------------------------------------------------------------- 1 | # Overview 2 | 3 | The Akka Persistence Cassandra plugin allows for using [Apache Cassandra](https://cassandra.apache.org) as a backend for @extref:[Akka Persistence](akka:typed/persistence.html) and @extref:[Akka Persistence Query](akka:persistence-query.html). It uses @extref:[Alpakka Cassandra](alpakka:cassandra.html) for Cassandra access which is based on the @extref:[Datastax Java Driver](java-driver:). 4 | 5 | ## Project Info 6 | 7 | @@project-info{ projectId="core" } 8 | 9 | ## Dependencies 10 | 11 | This plugin requires **Akka $akka.version$** or later. See [Akka's Binary Compatibility Rules](https://doc.akka.io/libraries/akka-core/current/common/binary-compatibility-rules.html) for details. 12 | 13 | The Akka dependencies are available from Akka's library repository. To access them there, you need to configure the URL for this repository. 14 | 15 | @@repository [sbt,Maven,Gradle] { 16 | id="akka-repository" 17 | name="Akka library repository" 18 | url="https://repo.akka.io/maven" 19 | } 20 | 21 | Additionally, add the dependencies as below. 22 | 23 | @@dependency [sbt,Maven,Gradle] { 24 | group=com.typesafe.akka 25 | artifact=akka-persistence-cassandra_$scala.binary.version$ 26 | version=$project.version$ 27 | symbol=AkkaVersion 28 | value=$akka.version$ 29 | group1=com.typesafe.akka 30 | artifact1=akka-persistence_$scala.binary.version$ 31 | version1=AkkaVersion 32 | group2=com.typesafe.akka 33 | artifact2=akka-persistence-query_$scala.binary.version$ 34 | version2=AkkaVersion 35 | group3=com.typesafe.akka 36 | artifact3=akka-cluster-tools_$scala.binary.version$ 37 | version3=AkkaVersion 38 | } 39 | 40 | Note that it is important that all `akka-*` dependencies are in the same version, so it is recommended to depend on them explicitly to avoid problems with transient dependencies causing an unlucky mix of versions. 41 | 42 | The table below shows Akka Persistence Cassandra’s direct dependencies and the second tab shows all libraries it depends on transitively. 43 | 44 | @@dependencies{ projectId="core" } 45 | 46 | To use the plugin with **Akka 2.5.x** you must use version 0.103 or later in the 0.x series. 47 | 48 | ## Supported features 49 | 50 | Be aware of that many of the new features of [Akka Distributed Cluster](https://doc.akka.io/libraries/akka-distributed-cluster/current/) 51 | and [Akka Edge](https://doc.akka.io/libraries/akka-edge/current/) are only implemented by the 52 | [Akka Persistence R2DBC plugin](https://doc.akka.io/libraries/akka-persistence-r2dbc/current/) and are not supported by the 53 | Cassandra plugin. 54 | 55 | Those features were developed for Distributed Cluster and Edge, but they are also useful in other contexts. 56 | Example of concrete features not supported by the Cassandra plugin: 57 | 58 | * `eventsBySlices` query 59 | * Projections over gRPC 60 | * Replicated Event Sourcing over gRPC 61 | * Dynamic scaling of number of Projection instances 62 | * Low latency Projections 63 | * Projections starting from snapshots 64 | * Scalability of many Projections 65 | * Durable State entities 66 | 67 | ## History 68 | 69 | This [Apache Cassandra](https://cassandra.apache.org/) plugin to Akka Persistence was initiated [originally](https://github.com/krasserm/akka-persistence-cassandra) by Martin Krasser, [@krasserm](https://github.com/krasserm) in 2014. 70 | 71 | It moved to the [Akka](https://github.com/akka/) organisation in 2016 and the first release after that move was 0.7 in January 2016. 72 | 73 | ## Contributing 74 | 75 | Please feel free to contribute to Akka and Akka Persistence Cassandra by reporting issues you identify, or by suggesting changes to the code. Please refer to our [contributing instructions](https://github.com/akka/akka/blob/main/CONTRIBUTING.md) to learn how it can be done. 76 | 77 | We want Akka to strive in a welcoming and open atmosphere and expect all contributors to respect our [code of conduct](https://www.lightbend.com/conduct). 78 | -------------------------------------------------------------------------------- /example/README.md: -------------------------------------------------------------------------------- 1 | # End to end example 2 | 3 | This is for testing events by tag end to end. 4 | 5 | All events are tagged with a configurable number of tags `tag-1`, `tag-2`, etc. 6 | 7 | Then there are N processors that each process a configured number of tags. 8 | 9 | The write side will use processor * tags per processors as the total number of tags 10 | 11 | There are three roles: 12 | 13 | * write - run the persistent actors in sharding 14 | * load - generate load to the persistent actors 15 | * read - run the sharded daemon set to read the tagged events 16 | 17 | The read side periodically publishes latency statistics to distributed pub sub. These are currently just logged out. 18 | 19 | ## Validation 20 | 21 | When everything is running you should see logs on the read side with the estimated latency. 22 | The latency is calculated from the event timestamp so if you restart without an offset the latency will look very high! 23 | 24 | 25 | ## Running locally 26 | 27 | A cassandra cluster must be available on localhost:9042 28 | 29 | The first node can be run with the default ports but you must provide a role and to use the local configuration e.g.: 30 | 31 | `sbt -Dconfig.resource=local.conf -Dakka.cluster.roles.0=write run` 32 | 33 | Each subsequent node needs its akka management and remoting port overriden. The second node should use port 2552 for remoting 34 | and port 8552 as these are configured in bootstrap. 35 | 36 | `sbt -Dakka.remote.artery.canonical.port=2552 -Dakka.management.http.port=8552 -Dconfig.resource=local.conf -Dakka.cluster.roles.0=write run` 37 | 38 | Then add at least one load node: 39 | 40 | `sbt -Dakka.remote.artery.canonical.port=2553 -Dakka.management.http.port=8553 -Dconfig.resource=local.conf -Dakka.cluster.roles.0=load run` 41 | 42 | And finally at least one read node: 43 | 44 | `sbt -Dakka.remote.artery.canonical.port=2554 -Dakka.management.http.port=8554 -Dconfig.resource=local.conf -Dakka.cluster.roles.0=read -Dakka.cluster.roles.1=report run` 45 | 46 | IntelliJ run configurations are included. 47 | 48 | ## Running inside a Kubernetes Cluster 49 | 50 | Configuration and sbt-native-packager are included for running in K8s for larger scale tests. 51 | 52 | There are three deployments in the `kubernetes` folder. One for each role. They all join the same cluster 53 | and use the same image. They have imagePullPolicy set to Never for minikube, remove this for a real cluster. 54 | 55 | 56 | ### Running in minikube 57 | 58 | * Create the Cassandra cluster, this also includes a service. 59 | 60 | `kubectl apply -f kubernetes/cassandra.yaml` 61 | 62 | * Create the write side: 63 | 64 | `kubectl apply -f kubernetes/write.yaml` 65 | 66 | * Generate some load: 67 | 68 | `kubectl apply -f kubernetes/load.yaml` 69 | 70 | * Start the event processors, these also include logging out the aggregated latency every 10s 71 | 72 | `kubectl apply -f kubernetes/read.yaml` 73 | 74 | If everything is working the read side should get logs like this: 75 | 76 | ``` 77 | Read side Count: 1 Max: 100 p99: 100 p50: 100 78 | Read side Count: 1 Max: 97 p99: 97 p50: 97 79 | Read side Count: 3 Max: 101 p99: 101 p50: 100 80 | ``` 81 | 82 | To increase the load edit values in `common.conf` e.g. increase the load-tick duration. 83 | 84 | 85 | ### Running in a real Kubernetes cluster 86 | 87 | #### Publish to a registry the cluster can access e.g. Dockerhub with the kubakka user 88 | 89 | The app image must be in a registry the cluster can see. 90 | The build.sbt uses DockerHub and the `kubakka` user. Update this if your cluster can't 91 | access DockerHub. 92 | 93 | To push an image to docker hub run: 94 | 95 | `sbt docker:publish` 96 | 97 | And remove the imagePullPolicy: Never from the deployments 98 | 99 | #### Configure Cassandra 100 | 101 | Update `kubernetes.conf` to point to a Cassandra cluster e.g. via a Kubernetes service. 102 | -------------------------------------------------------------------------------- /core/src/test/scala/akka/persistence/cassandra/journal/CassandraJournalSpec.scala: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2016-2025 Lightbend Inc. 3 | */ 4 | 5 | package akka.persistence.cassandra.journal 6 | 7 | import akka.actor.Actor 8 | import akka.persistence.CapabilityFlag 9 | import akka.persistence.{ AtomicWrite, PersistentRepr } 10 | import akka.persistence.JournalProtocol.{ ReplayMessages, WriteMessageFailure, WriteMessages, WriteMessagesFailed } 11 | 12 | import scala.concurrent.duration._ 13 | import akka.persistence.journal._ 14 | import akka.persistence.cassandra.CassandraLifecycle 15 | import akka.stream.alpakka.cassandra.CassandraMetricsRegistry 16 | import akka.testkit.TestProbe 17 | import com.typesafe.config.ConfigFactory 18 | 19 | object CassandraJournalConfiguration { 20 | val config = ConfigFactory.parseString(s""" 21 | akka.persistence.cassandra.journal.keyspace=CassandraJournalSpec 22 | akka.persistence.cassandra.snapshot.keyspace=CassandraJournalSpecSnapshot 23 | datastax-java-driver { 24 | basic.session-name = CassandraJournalSpec 25 | advanced.metrics { 26 | session.enabled = [ "bytes-sent", "cql-requests"] 27 | } 28 | } 29 | """).withFallback(CassandraLifecycle.config) 30 | 31 | lazy val perfConfig = 32 | ConfigFactory.parseString(""" 33 | akka.actor.serialize-messages=off 34 | akka.persistence.cassandra.journal.keyspace=CassandraJournalPerfSpec 35 | akka.persistence.cassandra.snapshot.keyspace=CassandraJournalPerfSpecSnapshot 36 | """).withFallback(config) 37 | 38 | } 39 | 40 | // Can't use CassandraSpec so needs to do its own clean up 41 | class CassandraJournalSpec extends JournalSpec(CassandraJournalConfiguration.config) with CassandraLifecycle { 42 | override def systemName: String = "CassandraJournalSpec" 43 | 44 | override def supportsRejectingNonSerializableObjects = false 45 | 46 | "A Cassandra Journal" must { 47 | "insert Cassandra metrics to Cassandra Metrics Registry" in { 48 | val registry = CassandraMetricsRegistry(system).getRegistry 49 | val metricsNames = registry.getNames.toArray.toSet 50 | // metrics category is the configPath of the plugin + the session-name 51 | metricsNames should contain("akka.persistence.cassandra.CassandraJournalSpec.bytes-sent") 52 | metricsNames should contain("akka.persistence.cassandra.CassandraJournalSpec.cql-requests") 53 | } 54 | "be able to replay messages after serialization failure" in { 55 | // there is no chance that a journal could create a data representation for type of event 56 | val notSerializableEvent = new Object { 57 | override def toString = "not serializable" 58 | } 59 | val msg = PersistentRepr( 60 | payload = notSerializableEvent, 61 | sequenceNr = 6, 62 | persistenceId = pid, 63 | sender = Actor.noSender, 64 | writerUuid = writerUuid) 65 | 66 | val probe = TestProbe() 67 | 68 | journal ! WriteMessages(List(AtomicWrite(msg)), probe.ref, actorInstanceId) 69 | val err = probe.expectMsgPF() { 70 | case fail: WriteMessagesFailed => fail.cause 71 | } 72 | probe.expectMsg(WriteMessageFailure(msg, err, actorInstanceId)) 73 | 74 | journal ! ReplayMessages(5, 5, 1, pid, probe.ref) 75 | probe.expectMsg(replayedMessage(5)) 76 | } 77 | } 78 | } 79 | 80 | class CassandraJournalMetaSpec extends JournalSpec(CassandraJournalConfiguration.config) with CassandraLifecycle { 81 | override def systemName: String = "CassandraJournalSpec" 82 | 83 | override def supportsRejectingNonSerializableObjects = false 84 | protected override def supportsMetadata: CapabilityFlag = true 85 | } 86 | 87 | class CassandraJournalPerfSpec 88 | extends JournalPerfSpec(CassandraJournalConfiguration.perfConfig) 89 | with CassandraLifecycle { 90 | override def systemName: String = "CassandraJournalPerfSpec" 91 | 92 | override def awaitDurationMillis: Long = 20.seconds.toMillis 93 | 94 | override def supportsRejectingNonSerializableObjects = false 95 | 96 | } 97 | --------------------------------------------------------------------------------